Datasets:

function_name
stringlengths
1
63
docstring
stringlengths
50
5.89k
masked_code
stringlengths
50
882k
implementation
stringlengths
169
12.9k
start_line
int32
1
14.6k
end_line
int32
16
14.6k
file_content
stringlengths
274
882k
read_namespaced_job_status
read status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_job_status(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :return: V1Job If the method is called asynchronously, returns the request thread.
# coding: utf-8 """ Kubernetes No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) OpenAPI spec version: v1.13.1 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import sys import os import re # python 2 and python 3 compatibility library from six import iteritems from ..api_client import ApiClient class BatchV1Api(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen """ def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client def create_namespaced_job(self, namespace, body, **kwargs): """ create a Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_namespaced_job(namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.create_namespaced_job_with_http_info(namespace, body, **kwargs) else: (data) = self.create_namespaced_job_with_http_info(namespace, body, **kwargs) return data def create_namespaced_job_with_http_info(self, namespace, body, **kwargs): """ create a Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_namespaced_job_with_http_info(namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['namespace', 'body', 'include_uninitialized', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method create_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_job`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `create_namespaced_job`") collection_formats = {} path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def delete_collection_namespaced_job(self, namespace, **kwargs): """ delete collection of Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_namespaced_job(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs) else: (data) = self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs) return data def delete_collection_namespaced_job_with_http_info(self, namespace, **kwargs): """ delete collection of Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_namespaced_job_with_http_info(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread. """ all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_collection_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_job`") collection_formats = {} path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if '_continue' in params: query_params.append(('continue', params['_continue'])) if 'field_selector' in params: query_params.append(('fieldSelector', params['field_selector'])) if 'label_selector' in params: query_params.append(('labelSelector', params['label_selector'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'resource_version' in params: query_params.append(('resourceVersion', params['resource_version'])) if 'timeout_seconds' in params: query_params.append(('timeoutSeconds', params['timeout_seconds'])) if 'watch' in params: query_params.append(('watch', params['watch'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Status', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def delete_namespaced_job(self, name, namespace, **kwargs): """ delete a Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_namespaced_job(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param V1DeleteOptions body: :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_namespaced_job_with_http_info(name, namespace, **kwargs) else: (data) = self.delete_namespaced_job_with_http_info(name, namespace, **kwargs) return data def delete_namespaced_job_with_http_info(self, name, namespace, **kwargs): """ delete a Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_namespaced_job_with_http_info(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param V1DeleteOptions body: :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :return: V1Status If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'pretty', 'body', 'dry_run', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_job`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_job`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) if 'grace_period_seconds' in params: query_params.append(('gracePeriodSeconds', params['grace_period_seconds'])) if 'orphan_dependents' in params: query_params.append(('orphanDependents', params['orphan_dependents'])) if 'propagation_policy' in params: query_params.append(('propagationPolicy', params['propagation_policy'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Status', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_api_resources(self, **kwargs): """ get available resources This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_api_resources(async_req=True) >>> result = thread.get() :param async_req bool :return: V1APIResourceList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_api_resources_with_http_info(**kwargs) else: (data) = self.get_api_resources_with_http_info(**kwargs) return data def get_api_resources_with_http_info(self, **kwargs): """ get available resources This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_api_resources_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool :return: V1APIResourceList If the method is called asynchronously, returns the request thread. """ all_params = [] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_api_resources" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1APIResourceList', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def list_job_for_all_namespaces(self, **kwargs): """ list or watch objects of kind Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_job_for_all_namespaces(async_req=True) >>> result = thread.get() :param async_req bool :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str pretty: If 'true', then the output is pretty printed. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1JobList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_job_for_all_namespaces_with_http_info(**kwargs) else: (data) = self.list_job_for_all_namespaces_with_http_info(**kwargs) return data def list_job_for_all_namespaces_with_http_info(self, **kwargs): """ list or watch objects of kind Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_job_for_all_namespaces_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str pretty: If 'true', then the output is pretty printed. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1JobList If the method is called asynchronously, returns the request thread. """ all_params = ['_continue', 'field_selector', 'include_uninitialized', 'label_selector', 'limit', 'pretty', 'resource_version', 'timeout_seconds', 'watch'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method list_job_for_all_namespaces" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] if '_continue' in params: query_params.append(('continue', params['_continue'])) if 'field_selector' in params: query_params.append(('fieldSelector', params['field_selector'])) if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'label_selector' in params: query_params.append(('labelSelector', params['label_selector'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'resource_version' in params: query_params.append(('resourceVersion', params['resource_version'])) if 'timeout_seconds' in params: query_params.append(('timeoutSeconds', params['timeout_seconds'])) if 'watch' in params: query_params.append(('watch', params['watch'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/jobs', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1JobList', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def list_namespaced_job(self, namespace, **kwargs): """ list or watch objects of kind Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_namespaced_job(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1JobList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_namespaced_job_with_http_info(namespace, **kwargs) else: (data) = self.list_namespaced_job_with_http_info(namespace, **kwargs) return data def list_namespaced_job_with_http_info(self, namespace, **kwargs): """ list or watch objects of kind Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_namespaced_job_with_http_info(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1JobList If the method is called asynchronously, returns the request thread. """ all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method list_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `list_namespaced_job`") collection_formats = {} path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if '_continue' in params: query_params.append(('continue', params['_continue'])) if 'field_selector' in params: query_params.append(('fieldSelector', params['field_selector'])) if 'label_selector' in params: query_params.append(('labelSelector', params['label_selector'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'resource_version' in params: query_params.append(('resourceVersion', params['resource_version'])) if 'timeout_seconds' in params: query_params.append(('timeoutSeconds', params['timeout_seconds'])) if 'watch' in params: query_params.append(('watch', params['watch'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1JobList', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def patch_namespaced_job(self, name, namespace, body, **kwargs): """ partially update the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_job(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs) else: (data) = self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs) return data def patch_namespaced_job_with_http_info(self, name, namespace, body, **kwargs): """ partially update the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_job_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method patch_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def patch_namespaced_job_status(self, name, namespace, body, **kwargs): """ partially update status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_job_status(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs) else: (data) = self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs) return data def patch_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs): """ partially update status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_job_status_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method patch_namespaced_job_status" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job_status`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job_status`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job_status`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def read_namespaced_job(self, name, namespace, **kwargs): """ read the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_job(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. :param bool export: Should this value be exported. Export strips fields that a user can not specify. :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.read_namespaced_job_with_http_info(name, namespace, **kwargs) else: (data) = self.read_namespaced_job_with_http_info(name, namespace, **kwargs) return data def read_namespaced_job_with_http_info(self, name, namespace, **kwargs): """ read the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_job_with_http_info(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. :param bool export: Should this value be exported. Export strips fields that a user can not specify. :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'pretty', 'exact', 'export'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method read_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'exact' in params: query_params.append(('exact', params['exact'])) if 'export' in params: query_params.append(('export', params['export'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) # MASKED: read_namespaced_job_status function (lines 1108-1129) def read_namespaced_job_status_with_http_info(self, name, namespace, **kwargs): """ read status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_job_status_with_http_info(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'pretty'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method read_namespaced_job_status" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job_status`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job_status`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def replace_namespaced_job(self, name, namespace, body, **kwargs): """ replace the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_job(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs) else: (data) = self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs) return data def replace_namespaced_job_with_http_info(self, name, namespace, body, **kwargs): """ replace the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_job_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method replace_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def replace_namespaced_job_status(self, name, namespace, body, **kwargs): """ replace status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_job_status(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs) else: (data) = self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs) return data def replace_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs): """ replace status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_job_status_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method replace_namespaced_job_status" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job_status`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job_status`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job_status`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
def read_namespaced_job_status(self, name, namespace, **kwargs): """ read status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_job_status(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs) else: (data) = self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs) return data
1,108
1,129
# coding: utf-8 """ Kubernetes No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) OpenAPI spec version: v1.13.1 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import sys import os import re # python 2 and python 3 compatibility library from six import iteritems from ..api_client import ApiClient class BatchV1Api(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen """ def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client def create_namespaced_job(self, namespace, body, **kwargs): """ create a Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_namespaced_job(namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.create_namespaced_job_with_http_info(namespace, body, **kwargs) else: (data) = self.create_namespaced_job_with_http_info(namespace, body, **kwargs) return data def create_namespaced_job_with_http_info(self, namespace, body, **kwargs): """ create a Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_namespaced_job_with_http_info(namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['namespace', 'body', 'include_uninitialized', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method create_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_job`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `create_namespaced_job`") collection_formats = {} path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def delete_collection_namespaced_job(self, namespace, **kwargs): """ delete collection of Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_namespaced_job(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs) else: (data) = self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs) return data def delete_collection_namespaced_job_with_http_info(self, namespace, **kwargs): """ delete collection of Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_namespaced_job_with_http_info(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread. """ all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_collection_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_job`") collection_formats = {} path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if '_continue' in params: query_params.append(('continue', params['_continue'])) if 'field_selector' in params: query_params.append(('fieldSelector', params['field_selector'])) if 'label_selector' in params: query_params.append(('labelSelector', params['label_selector'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'resource_version' in params: query_params.append(('resourceVersion', params['resource_version'])) if 'timeout_seconds' in params: query_params.append(('timeoutSeconds', params['timeout_seconds'])) if 'watch' in params: query_params.append(('watch', params['watch'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Status', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def delete_namespaced_job(self, name, namespace, **kwargs): """ delete a Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_namespaced_job(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param V1DeleteOptions body: :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_namespaced_job_with_http_info(name, namespace, **kwargs) else: (data) = self.delete_namespaced_job_with_http_info(name, namespace, **kwargs) return data def delete_namespaced_job_with_http_info(self, name, namespace, **kwargs): """ delete a Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_namespaced_job_with_http_info(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param V1DeleteOptions body: :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :return: V1Status If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'pretty', 'body', 'dry_run', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_job`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_job`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) if 'grace_period_seconds' in params: query_params.append(('gracePeriodSeconds', params['grace_period_seconds'])) if 'orphan_dependents' in params: query_params.append(('orphanDependents', params['orphan_dependents'])) if 'propagation_policy' in params: query_params.append(('propagationPolicy', params['propagation_policy'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Status', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_api_resources(self, **kwargs): """ get available resources This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_api_resources(async_req=True) >>> result = thread.get() :param async_req bool :return: V1APIResourceList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_api_resources_with_http_info(**kwargs) else: (data) = self.get_api_resources_with_http_info(**kwargs) return data def get_api_resources_with_http_info(self, **kwargs): """ get available resources This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_api_resources_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool :return: V1APIResourceList If the method is called asynchronously, returns the request thread. """ all_params = [] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_api_resources" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1APIResourceList', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def list_job_for_all_namespaces(self, **kwargs): """ list or watch objects of kind Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_job_for_all_namespaces(async_req=True) >>> result = thread.get() :param async_req bool :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str pretty: If 'true', then the output is pretty printed. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1JobList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_job_for_all_namespaces_with_http_info(**kwargs) else: (data) = self.list_job_for_all_namespaces_with_http_info(**kwargs) return data def list_job_for_all_namespaces_with_http_info(self, **kwargs): """ list or watch objects of kind Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_job_for_all_namespaces_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str pretty: If 'true', then the output is pretty printed. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1JobList If the method is called asynchronously, returns the request thread. """ all_params = ['_continue', 'field_selector', 'include_uninitialized', 'label_selector', 'limit', 'pretty', 'resource_version', 'timeout_seconds', 'watch'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method list_job_for_all_namespaces" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] if '_continue' in params: query_params.append(('continue', params['_continue'])) if 'field_selector' in params: query_params.append(('fieldSelector', params['field_selector'])) if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'label_selector' in params: query_params.append(('labelSelector', params['label_selector'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'resource_version' in params: query_params.append(('resourceVersion', params['resource_version'])) if 'timeout_seconds' in params: query_params.append(('timeoutSeconds', params['timeout_seconds'])) if 'watch' in params: query_params.append(('watch', params['watch'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/jobs', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1JobList', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def list_namespaced_job(self, namespace, **kwargs): """ list or watch objects of kind Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_namespaced_job(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1JobList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_namespaced_job_with_http_info(namespace, **kwargs) else: (data) = self.list_namespaced_job_with_http_info(namespace, **kwargs) return data def list_namespaced_job_with_http_info(self, namespace, **kwargs): """ list or watch objects of kind Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_namespaced_job_with_http_info(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1JobList If the method is called asynchronously, returns the request thread. """ all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method list_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `list_namespaced_job`") collection_formats = {} path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if '_continue' in params: query_params.append(('continue', params['_continue'])) if 'field_selector' in params: query_params.append(('fieldSelector', params['field_selector'])) if 'label_selector' in params: query_params.append(('labelSelector', params['label_selector'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'resource_version' in params: query_params.append(('resourceVersion', params['resource_version'])) if 'timeout_seconds' in params: query_params.append(('timeoutSeconds', params['timeout_seconds'])) if 'watch' in params: query_params.append(('watch', params['watch'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1JobList', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def patch_namespaced_job(self, name, namespace, body, **kwargs): """ partially update the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_job(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs) else: (data) = self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs) return data def patch_namespaced_job_with_http_info(self, name, namespace, body, **kwargs): """ partially update the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_job_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method patch_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def patch_namespaced_job_status(self, name, namespace, body, **kwargs): """ partially update status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_job_status(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs) else: (data) = self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs) return data def patch_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs): """ partially update status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_job_status_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method patch_namespaced_job_status" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job_status`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job_status`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job_status`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def read_namespaced_job(self, name, namespace, **kwargs): """ read the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_job(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. :param bool export: Should this value be exported. Export strips fields that a user can not specify. :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.read_namespaced_job_with_http_info(name, namespace, **kwargs) else: (data) = self.read_namespaced_job_with_http_info(name, namespace, **kwargs) return data def read_namespaced_job_with_http_info(self, name, namespace, **kwargs): """ read the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_job_with_http_info(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. :param bool export: Should this value be exported. Export strips fields that a user can not specify. :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'pretty', 'exact', 'export'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method read_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'exact' in params: query_params.append(('exact', params['exact'])) if 'export' in params: query_params.append(('export', params['export'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def read_namespaced_job_status(self, name, namespace, **kwargs): """ read status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_job_status(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs) else: (data) = self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs) return data def read_namespaced_job_status_with_http_info(self, name, namespace, **kwargs): """ read status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_job_status_with_http_info(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'pretty'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method read_namespaced_job_status" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job_status`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job_status`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def replace_namespaced_job(self, name, namespace, body, **kwargs): """ replace the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_job(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs) else: (data) = self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs) return data def replace_namespaced_job_with_http_info(self, name, namespace, body, **kwargs): """ replace the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_job_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method replace_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def replace_namespaced_job_status(self, name, namespace, body, **kwargs): """ replace status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_job_status(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs) else: (data) = self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs) return data def replace_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs): """ replace status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_job_status_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method replace_namespaced_job_status" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job_status`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job_status`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job_status`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
read_namespaced_job_status_with_http_info
read status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_job_status_with_http_info(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :return: V1Job If the method is called asynchronously, returns the request thread.
# coding: utf-8 """ Kubernetes No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) OpenAPI spec version: v1.13.1 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import sys import os import re # python 2 and python 3 compatibility library from six import iteritems from ..api_client import ApiClient class BatchV1Api(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen """ def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client def create_namespaced_job(self, namespace, body, **kwargs): """ create a Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_namespaced_job(namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.create_namespaced_job_with_http_info(namespace, body, **kwargs) else: (data) = self.create_namespaced_job_with_http_info(namespace, body, **kwargs) return data def create_namespaced_job_with_http_info(self, namespace, body, **kwargs): """ create a Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_namespaced_job_with_http_info(namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['namespace', 'body', 'include_uninitialized', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method create_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_job`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `create_namespaced_job`") collection_formats = {} path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def delete_collection_namespaced_job(self, namespace, **kwargs): """ delete collection of Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_namespaced_job(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs) else: (data) = self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs) return data def delete_collection_namespaced_job_with_http_info(self, namespace, **kwargs): """ delete collection of Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_namespaced_job_with_http_info(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread. """ all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_collection_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_job`") collection_formats = {} path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if '_continue' in params: query_params.append(('continue', params['_continue'])) if 'field_selector' in params: query_params.append(('fieldSelector', params['field_selector'])) if 'label_selector' in params: query_params.append(('labelSelector', params['label_selector'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'resource_version' in params: query_params.append(('resourceVersion', params['resource_version'])) if 'timeout_seconds' in params: query_params.append(('timeoutSeconds', params['timeout_seconds'])) if 'watch' in params: query_params.append(('watch', params['watch'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Status', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def delete_namespaced_job(self, name, namespace, **kwargs): """ delete a Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_namespaced_job(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param V1DeleteOptions body: :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_namespaced_job_with_http_info(name, namespace, **kwargs) else: (data) = self.delete_namespaced_job_with_http_info(name, namespace, **kwargs) return data def delete_namespaced_job_with_http_info(self, name, namespace, **kwargs): """ delete a Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_namespaced_job_with_http_info(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param V1DeleteOptions body: :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :return: V1Status If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'pretty', 'body', 'dry_run', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_job`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_job`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) if 'grace_period_seconds' in params: query_params.append(('gracePeriodSeconds', params['grace_period_seconds'])) if 'orphan_dependents' in params: query_params.append(('orphanDependents', params['orphan_dependents'])) if 'propagation_policy' in params: query_params.append(('propagationPolicy', params['propagation_policy'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Status', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_api_resources(self, **kwargs): """ get available resources This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_api_resources(async_req=True) >>> result = thread.get() :param async_req bool :return: V1APIResourceList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_api_resources_with_http_info(**kwargs) else: (data) = self.get_api_resources_with_http_info(**kwargs) return data def get_api_resources_with_http_info(self, **kwargs): """ get available resources This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_api_resources_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool :return: V1APIResourceList If the method is called asynchronously, returns the request thread. """ all_params = [] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_api_resources" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1APIResourceList', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def list_job_for_all_namespaces(self, **kwargs): """ list or watch objects of kind Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_job_for_all_namespaces(async_req=True) >>> result = thread.get() :param async_req bool :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str pretty: If 'true', then the output is pretty printed. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1JobList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_job_for_all_namespaces_with_http_info(**kwargs) else: (data) = self.list_job_for_all_namespaces_with_http_info(**kwargs) return data def list_job_for_all_namespaces_with_http_info(self, **kwargs): """ list or watch objects of kind Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_job_for_all_namespaces_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str pretty: If 'true', then the output is pretty printed. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1JobList If the method is called asynchronously, returns the request thread. """ all_params = ['_continue', 'field_selector', 'include_uninitialized', 'label_selector', 'limit', 'pretty', 'resource_version', 'timeout_seconds', 'watch'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method list_job_for_all_namespaces" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] if '_continue' in params: query_params.append(('continue', params['_continue'])) if 'field_selector' in params: query_params.append(('fieldSelector', params['field_selector'])) if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'label_selector' in params: query_params.append(('labelSelector', params['label_selector'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'resource_version' in params: query_params.append(('resourceVersion', params['resource_version'])) if 'timeout_seconds' in params: query_params.append(('timeoutSeconds', params['timeout_seconds'])) if 'watch' in params: query_params.append(('watch', params['watch'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/jobs', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1JobList', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def list_namespaced_job(self, namespace, **kwargs): """ list or watch objects of kind Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_namespaced_job(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1JobList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_namespaced_job_with_http_info(namespace, **kwargs) else: (data) = self.list_namespaced_job_with_http_info(namespace, **kwargs) return data def list_namespaced_job_with_http_info(self, namespace, **kwargs): """ list or watch objects of kind Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_namespaced_job_with_http_info(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1JobList If the method is called asynchronously, returns the request thread. """ all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method list_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `list_namespaced_job`") collection_formats = {} path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if '_continue' in params: query_params.append(('continue', params['_continue'])) if 'field_selector' in params: query_params.append(('fieldSelector', params['field_selector'])) if 'label_selector' in params: query_params.append(('labelSelector', params['label_selector'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'resource_version' in params: query_params.append(('resourceVersion', params['resource_version'])) if 'timeout_seconds' in params: query_params.append(('timeoutSeconds', params['timeout_seconds'])) if 'watch' in params: query_params.append(('watch', params['watch'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1JobList', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def patch_namespaced_job(self, name, namespace, body, **kwargs): """ partially update the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_job(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs) else: (data) = self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs) return data def patch_namespaced_job_with_http_info(self, name, namespace, body, **kwargs): """ partially update the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_job_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method patch_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def patch_namespaced_job_status(self, name, namespace, body, **kwargs): """ partially update status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_job_status(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs) else: (data) = self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs) return data def patch_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs): """ partially update status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_job_status_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method patch_namespaced_job_status" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job_status`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job_status`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job_status`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def read_namespaced_job(self, name, namespace, **kwargs): """ read the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_job(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. :param bool export: Should this value be exported. Export strips fields that a user can not specify. :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.read_namespaced_job_with_http_info(name, namespace, **kwargs) else: (data) = self.read_namespaced_job_with_http_info(name, namespace, **kwargs) return data def read_namespaced_job_with_http_info(self, name, namespace, **kwargs): """ read the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_job_with_http_info(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. :param bool export: Should this value be exported. Export strips fields that a user can not specify. :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'pretty', 'exact', 'export'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method read_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'exact' in params: query_params.append(('exact', params['exact'])) if 'export' in params: query_params.append(('export', params['export'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def read_namespaced_job_status(self, name, namespace, **kwargs): """ read status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_job_status(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs) else: (data) = self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs) return data # MASKED: read_namespaced_job_status_with_http_info function (lines 1131-1213) def replace_namespaced_job(self, name, namespace, body, **kwargs): """ replace the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_job(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs) else: (data) = self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs) return data def replace_namespaced_job_with_http_info(self, name, namespace, body, **kwargs): """ replace the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_job_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method replace_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def replace_namespaced_job_status(self, name, namespace, body, **kwargs): """ replace status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_job_status(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs) else: (data) = self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs) return data def replace_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs): """ replace status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_job_status_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method replace_namespaced_job_status" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job_status`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job_status`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job_status`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
def read_namespaced_job_status_with_http_info(self, name, namespace, **kwargs): """ read status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_job_status_with_http_info(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'pretty'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method read_namespaced_job_status" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job_status`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job_status`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
1,131
1,213
# coding: utf-8 """ Kubernetes No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) OpenAPI spec version: v1.13.1 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import sys import os import re # python 2 and python 3 compatibility library from six import iteritems from ..api_client import ApiClient class BatchV1Api(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen """ def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client def create_namespaced_job(self, namespace, body, **kwargs): """ create a Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_namespaced_job(namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.create_namespaced_job_with_http_info(namespace, body, **kwargs) else: (data) = self.create_namespaced_job_with_http_info(namespace, body, **kwargs) return data def create_namespaced_job_with_http_info(self, namespace, body, **kwargs): """ create a Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_namespaced_job_with_http_info(namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['namespace', 'body', 'include_uninitialized', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method create_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_job`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `create_namespaced_job`") collection_formats = {} path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def delete_collection_namespaced_job(self, namespace, **kwargs): """ delete collection of Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_namespaced_job(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs) else: (data) = self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs) return data def delete_collection_namespaced_job_with_http_info(self, namespace, **kwargs): """ delete collection of Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_namespaced_job_with_http_info(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread. """ all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_collection_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_job`") collection_formats = {} path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if '_continue' in params: query_params.append(('continue', params['_continue'])) if 'field_selector' in params: query_params.append(('fieldSelector', params['field_selector'])) if 'label_selector' in params: query_params.append(('labelSelector', params['label_selector'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'resource_version' in params: query_params.append(('resourceVersion', params['resource_version'])) if 'timeout_seconds' in params: query_params.append(('timeoutSeconds', params['timeout_seconds'])) if 'watch' in params: query_params.append(('watch', params['watch'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Status', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def delete_namespaced_job(self, name, namespace, **kwargs): """ delete a Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_namespaced_job(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param V1DeleteOptions body: :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_namespaced_job_with_http_info(name, namespace, **kwargs) else: (data) = self.delete_namespaced_job_with_http_info(name, namespace, **kwargs) return data def delete_namespaced_job_with_http_info(self, name, namespace, **kwargs): """ delete a Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_namespaced_job_with_http_info(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param V1DeleteOptions body: :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :return: V1Status If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'pretty', 'body', 'dry_run', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_job`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_job`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) if 'grace_period_seconds' in params: query_params.append(('gracePeriodSeconds', params['grace_period_seconds'])) if 'orphan_dependents' in params: query_params.append(('orphanDependents', params['orphan_dependents'])) if 'propagation_policy' in params: query_params.append(('propagationPolicy', params['propagation_policy'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Status', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_api_resources(self, **kwargs): """ get available resources This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_api_resources(async_req=True) >>> result = thread.get() :param async_req bool :return: V1APIResourceList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_api_resources_with_http_info(**kwargs) else: (data) = self.get_api_resources_with_http_info(**kwargs) return data def get_api_resources_with_http_info(self, **kwargs): """ get available resources This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_api_resources_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool :return: V1APIResourceList If the method is called asynchronously, returns the request thread. """ all_params = [] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_api_resources" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1APIResourceList', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def list_job_for_all_namespaces(self, **kwargs): """ list or watch objects of kind Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_job_for_all_namespaces(async_req=True) >>> result = thread.get() :param async_req bool :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str pretty: If 'true', then the output is pretty printed. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1JobList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_job_for_all_namespaces_with_http_info(**kwargs) else: (data) = self.list_job_for_all_namespaces_with_http_info(**kwargs) return data def list_job_for_all_namespaces_with_http_info(self, **kwargs): """ list or watch objects of kind Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_job_for_all_namespaces_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str pretty: If 'true', then the output is pretty printed. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1JobList If the method is called asynchronously, returns the request thread. """ all_params = ['_continue', 'field_selector', 'include_uninitialized', 'label_selector', 'limit', 'pretty', 'resource_version', 'timeout_seconds', 'watch'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method list_job_for_all_namespaces" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] if '_continue' in params: query_params.append(('continue', params['_continue'])) if 'field_selector' in params: query_params.append(('fieldSelector', params['field_selector'])) if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'label_selector' in params: query_params.append(('labelSelector', params['label_selector'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'resource_version' in params: query_params.append(('resourceVersion', params['resource_version'])) if 'timeout_seconds' in params: query_params.append(('timeoutSeconds', params['timeout_seconds'])) if 'watch' in params: query_params.append(('watch', params['watch'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/jobs', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1JobList', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def list_namespaced_job(self, namespace, **kwargs): """ list or watch objects of kind Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_namespaced_job(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1JobList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_namespaced_job_with_http_info(namespace, **kwargs) else: (data) = self.list_namespaced_job_with_http_info(namespace, **kwargs) return data def list_namespaced_job_with_http_info(self, namespace, **kwargs): """ list or watch objects of kind Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_namespaced_job_with_http_info(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1JobList If the method is called asynchronously, returns the request thread. """ all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method list_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `list_namespaced_job`") collection_formats = {} path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if '_continue' in params: query_params.append(('continue', params['_continue'])) if 'field_selector' in params: query_params.append(('fieldSelector', params['field_selector'])) if 'label_selector' in params: query_params.append(('labelSelector', params['label_selector'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'resource_version' in params: query_params.append(('resourceVersion', params['resource_version'])) if 'timeout_seconds' in params: query_params.append(('timeoutSeconds', params['timeout_seconds'])) if 'watch' in params: query_params.append(('watch', params['watch'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1JobList', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def patch_namespaced_job(self, name, namespace, body, **kwargs): """ partially update the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_job(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs) else: (data) = self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs) return data def patch_namespaced_job_with_http_info(self, name, namespace, body, **kwargs): """ partially update the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_job_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method patch_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def patch_namespaced_job_status(self, name, namespace, body, **kwargs): """ partially update status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_job_status(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs) else: (data) = self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs) return data def patch_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs): """ partially update status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_job_status_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method patch_namespaced_job_status" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job_status`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job_status`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job_status`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def read_namespaced_job(self, name, namespace, **kwargs): """ read the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_job(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. :param bool export: Should this value be exported. Export strips fields that a user can not specify. :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.read_namespaced_job_with_http_info(name, namespace, **kwargs) else: (data) = self.read_namespaced_job_with_http_info(name, namespace, **kwargs) return data def read_namespaced_job_with_http_info(self, name, namespace, **kwargs): """ read the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_job_with_http_info(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. :param bool export: Should this value be exported. Export strips fields that a user can not specify. :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'pretty', 'exact', 'export'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method read_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'exact' in params: query_params.append(('exact', params['exact'])) if 'export' in params: query_params.append(('export', params['export'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def read_namespaced_job_status(self, name, namespace, **kwargs): """ read status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_job_status(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs) else: (data) = self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs) return data def read_namespaced_job_status_with_http_info(self, name, namespace, **kwargs): """ read status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_job_status_with_http_info(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'pretty'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method read_namespaced_job_status" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job_status`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job_status`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def replace_namespaced_job(self, name, namespace, body, **kwargs): """ replace the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_job(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs) else: (data) = self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs) return data def replace_namespaced_job_with_http_info(self, name, namespace, body, **kwargs): """ replace the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_job_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method replace_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def replace_namespaced_job_status(self, name, namespace, body, **kwargs): """ replace status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_job_status(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs) else: (data) = self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs) return data def replace_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs): """ replace status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_job_status_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method replace_namespaced_job_status" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job_status`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job_status`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job_status`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
replace_namespaced_job
replace the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_job(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread.
# coding: utf-8 """ Kubernetes No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) OpenAPI spec version: v1.13.1 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import sys import os import re # python 2 and python 3 compatibility library from six import iteritems from ..api_client import ApiClient class BatchV1Api(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen """ def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client def create_namespaced_job(self, namespace, body, **kwargs): """ create a Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_namespaced_job(namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.create_namespaced_job_with_http_info(namespace, body, **kwargs) else: (data) = self.create_namespaced_job_with_http_info(namespace, body, **kwargs) return data def create_namespaced_job_with_http_info(self, namespace, body, **kwargs): """ create a Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_namespaced_job_with_http_info(namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['namespace', 'body', 'include_uninitialized', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method create_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_job`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `create_namespaced_job`") collection_formats = {} path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def delete_collection_namespaced_job(self, namespace, **kwargs): """ delete collection of Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_namespaced_job(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs) else: (data) = self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs) return data def delete_collection_namespaced_job_with_http_info(self, namespace, **kwargs): """ delete collection of Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_namespaced_job_with_http_info(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread. """ all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_collection_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_job`") collection_formats = {} path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if '_continue' in params: query_params.append(('continue', params['_continue'])) if 'field_selector' in params: query_params.append(('fieldSelector', params['field_selector'])) if 'label_selector' in params: query_params.append(('labelSelector', params['label_selector'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'resource_version' in params: query_params.append(('resourceVersion', params['resource_version'])) if 'timeout_seconds' in params: query_params.append(('timeoutSeconds', params['timeout_seconds'])) if 'watch' in params: query_params.append(('watch', params['watch'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Status', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def delete_namespaced_job(self, name, namespace, **kwargs): """ delete a Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_namespaced_job(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param V1DeleteOptions body: :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_namespaced_job_with_http_info(name, namespace, **kwargs) else: (data) = self.delete_namespaced_job_with_http_info(name, namespace, **kwargs) return data def delete_namespaced_job_with_http_info(self, name, namespace, **kwargs): """ delete a Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_namespaced_job_with_http_info(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param V1DeleteOptions body: :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :return: V1Status If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'pretty', 'body', 'dry_run', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_job`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_job`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) if 'grace_period_seconds' in params: query_params.append(('gracePeriodSeconds', params['grace_period_seconds'])) if 'orphan_dependents' in params: query_params.append(('orphanDependents', params['orphan_dependents'])) if 'propagation_policy' in params: query_params.append(('propagationPolicy', params['propagation_policy'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Status', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_api_resources(self, **kwargs): """ get available resources This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_api_resources(async_req=True) >>> result = thread.get() :param async_req bool :return: V1APIResourceList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_api_resources_with_http_info(**kwargs) else: (data) = self.get_api_resources_with_http_info(**kwargs) return data def get_api_resources_with_http_info(self, **kwargs): """ get available resources This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_api_resources_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool :return: V1APIResourceList If the method is called asynchronously, returns the request thread. """ all_params = [] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_api_resources" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1APIResourceList', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def list_job_for_all_namespaces(self, **kwargs): """ list or watch objects of kind Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_job_for_all_namespaces(async_req=True) >>> result = thread.get() :param async_req bool :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str pretty: If 'true', then the output is pretty printed. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1JobList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_job_for_all_namespaces_with_http_info(**kwargs) else: (data) = self.list_job_for_all_namespaces_with_http_info(**kwargs) return data def list_job_for_all_namespaces_with_http_info(self, **kwargs): """ list or watch objects of kind Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_job_for_all_namespaces_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str pretty: If 'true', then the output is pretty printed. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1JobList If the method is called asynchronously, returns the request thread. """ all_params = ['_continue', 'field_selector', 'include_uninitialized', 'label_selector', 'limit', 'pretty', 'resource_version', 'timeout_seconds', 'watch'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method list_job_for_all_namespaces" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] if '_continue' in params: query_params.append(('continue', params['_continue'])) if 'field_selector' in params: query_params.append(('fieldSelector', params['field_selector'])) if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'label_selector' in params: query_params.append(('labelSelector', params['label_selector'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'resource_version' in params: query_params.append(('resourceVersion', params['resource_version'])) if 'timeout_seconds' in params: query_params.append(('timeoutSeconds', params['timeout_seconds'])) if 'watch' in params: query_params.append(('watch', params['watch'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/jobs', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1JobList', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def list_namespaced_job(self, namespace, **kwargs): """ list or watch objects of kind Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_namespaced_job(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1JobList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_namespaced_job_with_http_info(namespace, **kwargs) else: (data) = self.list_namespaced_job_with_http_info(namespace, **kwargs) return data def list_namespaced_job_with_http_info(self, namespace, **kwargs): """ list or watch objects of kind Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_namespaced_job_with_http_info(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1JobList If the method is called asynchronously, returns the request thread. """ all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method list_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `list_namespaced_job`") collection_formats = {} path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if '_continue' in params: query_params.append(('continue', params['_continue'])) if 'field_selector' in params: query_params.append(('fieldSelector', params['field_selector'])) if 'label_selector' in params: query_params.append(('labelSelector', params['label_selector'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'resource_version' in params: query_params.append(('resourceVersion', params['resource_version'])) if 'timeout_seconds' in params: query_params.append(('timeoutSeconds', params['timeout_seconds'])) if 'watch' in params: query_params.append(('watch', params['watch'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1JobList', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def patch_namespaced_job(self, name, namespace, body, **kwargs): """ partially update the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_job(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs) else: (data) = self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs) return data def patch_namespaced_job_with_http_info(self, name, namespace, body, **kwargs): """ partially update the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_job_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method patch_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def patch_namespaced_job_status(self, name, namespace, body, **kwargs): """ partially update status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_job_status(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs) else: (data) = self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs) return data def patch_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs): """ partially update status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_job_status_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method patch_namespaced_job_status" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job_status`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job_status`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job_status`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def read_namespaced_job(self, name, namespace, **kwargs): """ read the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_job(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. :param bool export: Should this value be exported. Export strips fields that a user can not specify. :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.read_namespaced_job_with_http_info(name, namespace, **kwargs) else: (data) = self.read_namespaced_job_with_http_info(name, namespace, **kwargs) return data def read_namespaced_job_with_http_info(self, name, namespace, **kwargs): """ read the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_job_with_http_info(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. :param bool export: Should this value be exported. Export strips fields that a user can not specify. :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'pretty', 'exact', 'export'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method read_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'exact' in params: query_params.append(('exact', params['exact'])) if 'export' in params: query_params.append(('export', params['export'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def read_namespaced_job_status(self, name, namespace, **kwargs): """ read status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_job_status(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs) else: (data) = self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs) return data def read_namespaced_job_status_with_http_info(self, name, namespace, **kwargs): """ read status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_job_status_with_http_info(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'pretty'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method read_namespaced_job_status" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job_status`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job_status`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) # MASKED: replace_namespaced_job function (lines 1215-1238) def replace_namespaced_job_with_http_info(self, name, namespace, body, **kwargs): """ replace the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_job_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method replace_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def replace_namespaced_job_status(self, name, namespace, body, **kwargs): """ replace status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_job_status(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs) else: (data) = self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs) return data def replace_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs): """ replace status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_job_status_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method replace_namespaced_job_status" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job_status`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job_status`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job_status`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
def replace_namespaced_job(self, name, namespace, body, **kwargs): """ replace the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_job(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs) else: (data) = self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs) return data
1,215
1,238
# coding: utf-8 """ Kubernetes No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) OpenAPI spec version: v1.13.1 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import sys import os import re # python 2 and python 3 compatibility library from six import iteritems from ..api_client import ApiClient class BatchV1Api(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen """ def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client def create_namespaced_job(self, namespace, body, **kwargs): """ create a Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_namespaced_job(namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.create_namespaced_job_with_http_info(namespace, body, **kwargs) else: (data) = self.create_namespaced_job_with_http_info(namespace, body, **kwargs) return data def create_namespaced_job_with_http_info(self, namespace, body, **kwargs): """ create a Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_namespaced_job_with_http_info(namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['namespace', 'body', 'include_uninitialized', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method create_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_job`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `create_namespaced_job`") collection_formats = {} path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def delete_collection_namespaced_job(self, namespace, **kwargs): """ delete collection of Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_namespaced_job(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs) else: (data) = self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs) return data def delete_collection_namespaced_job_with_http_info(self, namespace, **kwargs): """ delete collection of Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_namespaced_job_with_http_info(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread. """ all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_collection_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_job`") collection_formats = {} path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if '_continue' in params: query_params.append(('continue', params['_continue'])) if 'field_selector' in params: query_params.append(('fieldSelector', params['field_selector'])) if 'label_selector' in params: query_params.append(('labelSelector', params['label_selector'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'resource_version' in params: query_params.append(('resourceVersion', params['resource_version'])) if 'timeout_seconds' in params: query_params.append(('timeoutSeconds', params['timeout_seconds'])) if 'watch' in params: query_params.append(('watch', params['watch'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Status', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def delete_namespaced_job(self, name, namespace, **kwargs): """ delete a Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_namespaced_job(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param V1DeleteOptions body: :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_namespaced_job_with_http_info(name, namespace, **kwargs) else: (data) = self.delete_namespaced_job_with_http_info(name, namespace, **kwargs) return data def delete_namespaced_job_with_http_info(self, name, namespace, **kwargs): """ delete a Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_namespaced_job_with_http_info(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param V1DeleteOptions body: :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :return: V1Status If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'pretty', 'body', 'dry_run', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_job`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_job`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) if 'grace_period_seconds' in params: query_params.append(('gracePeriodSeconds', params['grace_period_seconds'])) if 'orphan_dependents' in params: query_params.append(('orphanDependents', params['orphan_dependents'])) if 'propagation_policy' in params: query_params.append(('propagationPolicy', params['propagation_policy'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Status', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_api_resources(self, **kwargs): """ get available resources This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_api_resources(async_req=True) >>> result = thread.get() :param async_req bool :return: V1APIResourceList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_api_resources_with_http_info(**kwargs) else: (data) = self.get_api_resources_with_http_info(**kwargs) return data def get_api_resources_with_http_info(self, **kwargs): """ get available resources This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_api_resources_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool :return: V1APIResourceList If the method is called asynchronously, returns the request thread. """ all_params = [] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_api_resources" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1APIResourceList', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def list_job_for_all_namespaces(self, **kwargs): """ list or watch objects of kind Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_job_for_all_namespaces(async_req=True) >>> result = thread.get() :param async_req bool :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str pretty: If 'true', then the output is pretty printed. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1JobList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_job_for_all_namespaces_with_http_info(**kwargs) else: (data) = self.list_job_for_all_namespaces_with_http_info(**kwargs) return data def list_job_for_all_namespaces_with_http_info(self, **kwargs): """ list or watch objects of kind Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_job_for_all_namespaces_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str pretty: If 'true', then the output is pretty printed. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1JobList If the method is called asynchronously, returns the request thread. """ all_params = ['_continue', 'field_selector', 'include_uninitialized', 'label_selector', 'limit', 'pretty', 'resource_version', 'timeout_seconds', 'watch'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method list_job_for_all_namespaces" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] if '_continue' in params: query_params.append(('continue', params['_continue'])) if 'field_selector' in params: query_params.append(('fieldSelector', params['field_selector'])) if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'label_selector' in params: query_params.append(('labelSelector', params['label_selector'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'resource_version' in params: query_params.append(('resourceVersion', params['resource_version'])) if 'timeout_seconds' in params: query_params.append(('timeoutSeconds', params['timeout_seconds'])) if 'watch' in params: query_params.append(('watch', params['watch'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/jobs', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1JobList', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def list_namespaced_job(self, namespace, **kwargs): """ list or watch objects of kind Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_namespaced_job(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1JobList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_namespaced_job_with_http_info(namespace, **kwargs) else: (data) = self.list_namespaced_job_with_http_info(namespace, **kwargs) return data def list_namespaced_job_with_http_info(self, namespace, **kwargs): """ list or watch objects of kind Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_namespaced_job_with_http_info(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1JobList If the method is called asynchronously, returns the request thread. """ all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method list_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `list_namespaced_job`") collection_formats = {} path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if '_continue' in params: query_params.append(('continue', params['_continue'])) if 'field_selector' in params: query_params.append(('fieldSelector', params['field_selector'])) if 'label_selector' in params: query_params.append(('labelSelector', params['label_selector'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'resource_version' in params: query_params.append(('resourceVersion', params['resource_version'])) if 'timeout_seconds' in params: query_params.append(('timeoutSeconds', params['timeout_seconds'])) if 'watch' in params: query_params.append(('watch', params['watch'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1JobList', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def patch_namespaced_job(self, name, namespace, body, **kwargs): """ partially update the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_job(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs) else: (data) = self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs) return data def patch_namespaced_job_with_http_info(self, name, namespace, body, **kwargs): """ partially update the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_job_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method patch_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def patch_namespaced_job_status(self, name, namespace, body, **kwargs): """ partially update status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_job_status(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs) else: (data) = self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs) return data def patch_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs): """ partially update status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_job_status_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method patch_namespaced_job_status" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job_status`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job_status`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job_status`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def read_namespaced_job(self, name, namespace, **kwargs): """ read the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_job(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. :param bool export: Should this value be exported. Export strips fields that a user can not specify. :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.read_namespaced_job_with_http_info(name, namespace, **kwargs) else: (data) = self.read_namespaced_job_with_http_info(name, namespace, **kwargs) return data def read_namespaced_job_with_http_info(self, name, namespace, **kwargs): """ read the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_job_with_http_info(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. :param bool export: Should this value be exported. Export strips fields that a user can not specify. :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'pretty', 'exact', 'export'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method read_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'exact' in params: query_params.append(('exact', params['exact'])) if 'export' in params: query_params.append(('export', params['export'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def read_namespaced_job_status(self, name, namespace, **kwargs): """ read status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_job_status(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs) else: (data) = self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs) return data def read_namespaced_job_status_with_http_info(self, name, namespace, **kwargs): """ read status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_job_status_with_http_info(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'pretty'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method read_namespaced_job_status" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job_status`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job_status`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def replace_namespaced_job(self, name, namespace, body, **kwargs): """ replace the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_job(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs) else: (data) = self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs) return data def replace_namespaced_job_with_http_info(self, name, namespace, body, **kwargs): """ replace the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_job_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method replace_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def replace_namespaced_job_status(self, name, namespace, body, **kwargs): """ replace status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_job_status(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs) else: (data) = self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs) return data def replace_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs): """ replace status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_job_status_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method replace_namespaced_job_status" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job_status`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job_status`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job_status`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
replace_namespaced_job_with_http_info
replace the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_job_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread.
# coding: utf-8 """ Kubernetes No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) OpenAPI spec version: v1.13.1 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import sys import os import re # python 2 and python 3 compatibility library from six import iteritems from ..api_client import ApiClient class BatchV1Api(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen """ def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client def create_namespaced_job(self, namespace, body, **kwargs): """ create a Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_namespaced_job(namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.create_namespaced_job_with_http_info(namespace, body, **kwargs) else: (data) = self.create_namespaced_job_with_http_info(namespace, body, **kwargs) return data def create_namespaced_job_with_http_info(self, namespace, body, **kwargs): """ create a Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_namespaced_job_with_http_info(namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['namespace', 'body', 'include_uninitialized', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method create_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_job`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `create_namespaced_job`") collection_formats = {} path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def delete_collection_namespaced_job(self, namespace, **kwargs): """ delete collection of Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_namespaced_job(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs) else: (data) = self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs) return data def delete_collection_namespaced_job_with_http_info(self, namespace, **kwargs): """ delete collection of Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_namespaced_job_with_http_info(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread. """ all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_collection_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_job`") collection_formats = {} path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if '_continue' in params: query_params.append(('continue', params['_continue'])) if 'field_selector' in params: query_params.append(('fieldSelector', params['field_selector'])) if 'label_selector' in params: query_params.append(('labelSelector', params['label_selector'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'resource_version' in params: query_params.append(('resourceVersion', params['resource_version'])) if 'timeout_seconds' in params: query_params.append(('timeoutSeconds', params['timeout_seconds'])) if 'watch' in params: query_params.append(('watch', params['watch'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Status', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def delete_namespaced_job(self, name, namespace, **kwargs): """ delete a Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_namespaced_job(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param V1DeleteOptions body: :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_namespaced_job_with_http_info(name, namespace, **kwargs) else: (data) = self.delete_namespaced_job_with_http_info(name, namespace, **kwargs) return data def delete_namespaced_job_with_http_info(self, name, namespace, **kwargs): """ delete a Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_namespaced_job_with_http_info(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param V1DeleteOptions body: :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :return: V1Status If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'pretty', 'body', 'dry_run', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_job`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_job`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) if 'grace_period_seconds' in params: query_params.append(('gracePeriodSeconds', params['grace_period_seconds'])) if 'orphan_dependents' in params: query_params.append(('orphanDependents', params['orphan_dependents'])) if 'propagation_policy' in params: query_params.append(('propagationPolicy', params['propagation_policy'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Status', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_api_resources(self, **kwargs): """ get available resources This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_api_resources(async_req=True) >>> result = thread.get() :param async_req bool :return: V1APIResourceList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_api_resources_with_http_info(**kwargs) else: (data) = self.get_api_resources_with_http_info(**kwargs) return data def get_api_resources_with_http_info(self, **kwargs): """ get available resources This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_api_resources_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool :return: V1APIResourceList If the method is called asynchronously, returns the request thread. """ all_params = [] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_api_resources" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1APIResourceList', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def list_job_for_all_namespaces(self, **kwargs): """ list or watch objects of kind Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_job_for_all_namespaces(async_req=True) >>> result = thread.get() :param async_req bool :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str pretty: If 'true', then the output is pretty printed. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1JobList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_job_for_all_namespaces_with_http_info(**kwargs) else: (data) = self.list_job_for_all_namespaces_with_http_info(**kwargs) return data def list_job_for_all_namespaces_with_http_info(self, **kwargs): """ list or watch objects of kind Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_job_for_all_namespaces_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str pretty: If 'true', then the output is pretty printed. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1JobList If the method is called asynchronously, returns the request thread. """ all_params = ['_continue', 'field_selector', 'include_uninitialized', 'label_selector', 'limit', 'pretty', 'resource_version', 'timeout_seconds', 'watch'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method list_job_for_all_namespaces" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] if '_continue' in params: query_params.append(('continue', params['_continue'])) if 'field_selector' in params: query_params.append(('fieldSelector', params['field_selector'])) if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'label_selector' in params: query_params.append(('labelSelector', params['label_selector'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'resource_version' in params: query_params.append(('resourceVersion', params['resource_version'])) if 'timeout_seconds' in params: query_params.append(('timeoutSeconds', params['timeout_seconds'])) if 'watch' in params: query_params.append(('watch', params['watch'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/jobs', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1JobList', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def list_namespaced_job(self, namespace, **kwargs): """ list or watch objects of kind Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_namespaced_job(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1JobList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_namespaced_job_with_http_info(namespace, **kwargs) else: (data) = self.list_namespaced_job_with_http_info(namespace, **kwargs) return data def list_namespaced_job_with_http_info(self, namespace, **kwargs): """ list or watch objects of kind Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_namespaced_job_with_http_info(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1JobList If the method is called asynchronously, returns the request thread. """ all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method list_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `list_namespaced_job`") collection_formats = {} path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if '_continue' in params: query_params.append(('continue', params['_continue'])) if 'field_selector' in params: query_params.append(('fieldSelector', params['field_selector'])) if 'label_selector' in params: query_params.append(('labelSelector', params['label_selector'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'resource_version' in params: query_params.append(('resourceVersion', params['resource_version'])) if 'timeout_seconds' in params: query_params.append(('timeoutSeconds', params['timeout_seconds'])) if 'watch' in params: query_params.append(('watch', params['watch'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1JobList', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def patch_namespaced_job(self, name, namespace, body, **kwargs): """ partially update the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_job(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs) else: (data) = self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs) return data def patch_namespaced_job_with_http_info(self, name, namespace, body, **kwargs): """ partially update the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_job_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method patch_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def patch_namespaced_job_status(self, name, namespace, body, **kwargs): """ partially update status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_job_status(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs) else: (data) = self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs) return data def patch_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs): """ partially update status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_job_status_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method patch_namespaced_job_status" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job_status`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job_status`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job_status`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def read_namespaced_job(self, name, namespace, **kwargs): """ read the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_job(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. :param bool export: Should this value be exported. Export strips fields that a user can not specify. :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.read_namespaced_job_with_http_info(name, namespace, **kwargs) else: (data) = self.read_namespaced_job_with_http_info(name, namespace, **kwargs) return data def read_namespaced_job_with_http_info(self, name, namespace, **kwargs): """ read the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_job_with_http_info(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. :param bool export: Should this value be exported. Export strips fields that a user can not specify. :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'pretty', 'exact', 'export'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method read_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'exact' in params: query_params.append(('exact', params['exact'])) if 'export' in params: query_params.append(('export', params['export'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def read_namespaced_job_status(self, name, namespace, **kwargs): """ read status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_job_status(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs) else: (data) = self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs) return data def read_namespaced_job_status_with_http_info(self, name, namespace, **kwargs): """ read status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_job_status_with_http_info(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'pretty'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method read_namespaced_job_status" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job_status`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job_status`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def replace_namespaced_job(self, name, namespace, body, **kwargs): """ replace the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_job(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs) else: (data) = self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs) return data # MASKED: replace_namespaced_job_with_http_info function (lines 1240-1331) def replace_namespaced_job_status(self, name, namespace, body, **kwargs): """ replace status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_job_status(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs) else: (data) = self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs) return data def replace_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs): """ replace status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_job_status_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method replace_namespaced_job_status" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job_status`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job_status`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job_status`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
def replace_namespaced_job_with_http_info(self, name, namespace, body, **kwargs): """ replace the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_job_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method replace_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
1,240
1,331
# coding: utf-8 """ Kubernetes No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) OpenAPI spec version: v1.13.1 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import sys import os import re # python 2 and python 3 compatibility library from six import iteritems from ..api_client import ApiClient class BatchV1Api(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen """ def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client def create_namespaced_job(self, namespace, body, **kwargs): """ create a Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_namespaced_job(namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.create_namespaced_job_with_http_info(namespace, body, **kwargs) else: (data) = self.create_namespaced_job_with_http_info(namespace, body, **kwargs) return data def create_namespaced_job_with_http_info(self, namespace, body, **kwargs): """ create a Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_namespaced_job_with_http_info(namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['namespace', 'body', 'include_uninitialized', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method create_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_job`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `create_namespaced_job`") collection_formats = {} path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def delete_collection_namespaced_job(self, namespace, **kwargs): """ delete collection of Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_namespaced_job(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs) else: (data) = self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs) return data def delete_collection_namespaced_job_with_http_info(self, namespace, **kwargs): """ delete collection of Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_namespaced_job_with_http_info(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread. """ all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_collection_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_job`") collection_formats = {} path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if '_continue' in params: query_params.append(('continue', params['_continue'])) if 'field_selector' in params: query_params.append(('fieldSelector', params['field_selector'])) if 'label_selector' in params: query_params.append(('labelSelector', params['label_selector'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'resource_version' in params: query_params.append(('resourceVersion', params['resource_version'])) if 'timeout_seconds' in params: query_params.append(('timeoutSeconds', params['timeout_seconds'])) if 'watch' in params: query_params.append(('watch', params['watch'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Status', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def delete_namespaced_job(self, name, namespace, **kwargs): """ delete a Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_namespaced_job(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param V1DeleteOptions body: :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_namespaced_job_with_http_info(name, namespace, **kwargs) else: (data) = self.delete_namespaced_job_with_http_info(name, namespace, **kwargs) return data def delete_namespaced_job_with_http_info(self, name, namespace, **kwargs): """ delete a Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_namespaced_job_with_http_info(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param V1DeleteOptions body: :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :return: V1Status If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'pretty', 'body', 'dry_run', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_job`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_job`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) if 'grace_period_seconds' in params: query_params.append(('gracePeriodSeconds', params['grace_period_seconds'])) if 'orphan_dependents' in params: query_params.append(('orphanDependents', params['orphan_dependents'])) if 'propagation_policy' in params: query_params.append(('propagationPolicy', params['propagation_policy'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Status', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_api_resources(self, **kwargs): """ get available resources This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_api_resources(async_req=True) >>> result = thread.get() :param async_req bool :return: V1APIResourceList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_api_resources_with_http_info(**kwargs) else: (data) = self.get_api_resources_with_http_info(**kwargs) return data def get_api_resources_with_http_info(self, **kwargs): """ get available resources This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_api_resources_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool :return: V1APIResourceList If the method is called asynchronously, returns the request thread. """ all_params = [] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_api_resources" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1APIResourceList', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def list_job_for_all_namespaces(self, **kwargs): """ list or watch objects of kind Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_job_for_all_namespaces(async_req=True) >>> result = thread.get() :param async_req bool :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str pretty: If 'true', then the output is pretty printed. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1JobList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_job_for_all_namespaces_with_http_info(**kwargs) else: (data) = self.list_job_for_all_namespaces_with_http_info(**kwargs) return data def list_job_for_all_namespaces_with_http_info(self, **kwargs): """ list or watch objects of kind Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_job_for_all_namespaces_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str pretty: If 'true', then the output is pretty printed. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1JobList If the method is called asynchronously, returns the request thread. """ all_params = ['_continue', 'field_selector', 'include_uninitialized', 'label_selector', 'limit', 'pretty', 'resource_version', 'timeout_seconds', 'watch'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method list_job_for_all_namespaces" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] if '_continue' in params: query_params.append(('continue', params['_continue'])) if 'field_selector' in params: query_params.append(('fieldSelector', params['field_selector'])) if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'label_selector' in params: query_params.append(('labelSelector', params['label_selector'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'resource_version' in params: query_params.append(('resourceVersion', params['resource_version'])) if 'timeout_seconds' in params: query_params.append(('timeoutSeconds', params['timeout_seconds'])) if 'watch' in params: query_params.append(('watch', params['watch'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/jobs', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1JobList', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def list_namespaced_job(self, namespace, **kwargs): """ list or watch objects of kind Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_namespaced_job(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1JobList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_namespaced_job_with_http_info(namespace, **kwargs) else: (data) = self.list_namespaced_job_with_http_info(namespace, **kwargs) return data def list_namespaced_job_with_http_info(self, namespace, **kwargs): """ list or watch objects of kind Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_namespaced_job_with_http_info(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1JobList If the method is called asynchronously, returns the request thread. """ all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method list_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `list_namespaced_job`") collection_formats = {} path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if '_continue' in params: query_params.append(('continue', params['_continue'])) if 'field_selector' in params: query_params.append(('fieldSelector', params['field_selector'])) if 'label_selector' in params: query_params.append(('labelSelector', params['label_selector'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'resource_version' in params: query_params.append(('resourceVersion', params['resource_version'])) if 'timeout_seconds' in params: query_params.append(('timeoutSeconds', params['timeout_seconds'])) if 'watch' in params: query_params.append(('watch', params['watch'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1JobList', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def patch_namespaced_job(self, name, namespace, body, **kwargs): """ partially update the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_job(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs) else: (data) = self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs) return data def patch_namespaced_job_with_http_info(self, name, namespace, body, **kwargs): """ partially update the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_job_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method patch_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def patch_namespaced_job_status(self, name, namespace, body, **kwargs): """ partially update status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_job_status(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs) else: (data) = self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs) return data def patch_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs): """ partially update status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_job_status_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method patch_namespaced_job_status" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job_status`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job_status`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job_status`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def read_namespaced_job(self, name, namespace, **kwargs): """ read the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_job(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. :param bool export: Should this value be exported. Export strips fields that a user can not specify. :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.read_namespaced_job_with_http_info(name, namespace, **kwargs) else: (data) = self.read_namespaced_job_with_http_info(name, namespace, **kwargs) return data def read_namespaced_job_with_http_info(self, name, namespace, **kwargs): """ read the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_job_with_http_info(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. :param bool export: Should this value be exported. Export strips fields that a user can not specify. :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'pretty', 'exact', 'export'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method read_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'exact' in params: query_params.append(('exact', params['exact'])) if 'export' in params: query_params.append(('export', params['export'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def read_namespaced_job_status(self, name, namespace, **kwargs): """ read status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_job_status(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs) else: (data) = self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs) return data def read_namespaced_job_status_with_http_info(self, name, namespace, **kwargs): """ read status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_job_status_with_http_info(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'pretty'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method read_namespaced_job_status" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job_status`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job_status`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def replace_namespaced_job(self, name, namespace, body, **kwargs): """ replace the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_job(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs) else: (data) = self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs) return data def replace_namespaced_job_with_http_info(self, name, namespace, body, **kwargs): """ replace the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_job_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method replace_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def replace_namespaced_job_status(self, name, namespace, body, **kwargs): """ replace status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_job_status(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs) else: (data) = self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs) return data def replace_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs): """ replace status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_job_status_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method replace_namespaced_job_status" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job_status`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job_status`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job_status`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
replace_namespaced_job_status
replace status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_job_status(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread.
# coding: utf-8 """ Kubernetes No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) OpenAPI spec version: v1.13.1 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import sys import os import re # python 2 and python 3 compatibility library from six import iteritems from ..api_client import ApiClient class BatchV1Api(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen """ def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client def create_namespaced_job(self, namespace, body, **kwargs): """ create a Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_namespaced_job(namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.create_namespaced_job_with_http_info(namespace, body, **kwargs) else: (data) = self.create_namespaced_job_with_http_info(namespace, body, **kwargs) return data def create_namespaced_job_with_http_info(self, namespace, body, **kwargs): """ create a Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_namespaced_job_with_http_info(namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['namespace', 'body', 'include_uninitialized', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method create_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_job`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `create_namespaced_job`") collection_formats = {} path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def delete_collection_namespaced_job(self, namespace, **kwargs): """ delete collection of Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_namespaced_job(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs) else: (data) = self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs) return data def delete_collection_namespaced_job_with_http_info(self, namespace, **kwargs): """ delete collection of Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_namespaced_job_with_http_info(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread. """ all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_collection_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_job`") collection_formats = {} path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if '_continue' in params: query_params.append(('continue', params['_continue'])) if 'field_selector' in params: query_params.append(('fieldSelector', params['field_selector'])) if 'label_selector' in params: query_params.append(('labelSelector', params['label_selector'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'resource_version' in params: query_params.append(('resourceVersion', params['resource_version'])) if 'timeout_seconds' in params: query_params.append(('timeoutSeconds', params['timeout_seconds'])) if 'watch' in params: query_params.append(('watch', params['watch'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Status', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def delete_namespaced_job(self, name, namespace, **kwargs): """ delete a Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_namespaced_job(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param V1DeleteOptions body: :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_namespaced_job_with_http_info(name, namespace, **kwargs) else: (data) = self.delete_namespaced_job_with_http_info(name, namespace, **kwargs) return data def delete_namespaced_job_with_http_info(self, name, namespace, **kwargs): """ delete a Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_namespaced_job_with_http_info(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param V1DeleteOptions body: :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :return: V1Status If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'pretty', 'body', 'dry_run', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_job`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_job`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) if 'grace_period_seconds' in params: query_params.append(('gracePeriodSeconds', params['grace_period_seconds'])) if 'orphan_dependents' in params: query_params.append(('orphanDependents', params['orphan_dependents'])) if 'propagation_policy' in params: query_params.append(('propagationPolicy', params['propagation_policy'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Status', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_api_resources(self, **kwargs): """ get available resources This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_api_resources(async_req=True) >>> result = thread.get() :param async_req bool :return: V1APIResourceList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_api_resources_with_http_info(**kwargs) else: (data) = self.get_api_resources_with_http_info(**kwargs) return data def get_api_resources_with_http_info(self, **kwargs): """ get available resources This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_api_resources_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool :return: V1APIResourceList If the method is called asynchronously, returns the request thread. """ all_params = [] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_api_resources" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1APIResourceList', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def list_job_for_all_namespaces(self, **kwargs): """ list or watch objects of kind Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_job_for_all_namespaces(async_req=True) >>> result = thread.get() :param async_req bool :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str pretty: If 'true', then the output is pretty printed. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1JobList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_job_for_all_namespaces_with_http_info(**kwargs) else: (data) = self.list_job_for_all_namespaces_with_http_info(**kwargs) return data def list_job_for_all_namespaces_with_http_info(self, **kwargs): """ list or watch objects of kind Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_job_for_all_namespaces_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str pretty: If 'true', then the output is pretty printed. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1JobList If the method is called asynchronously, returns the request thread. """ all_params = ['_continue', 'field_selector', 'include_uninitialized', 'label_selector', 'limit', 'pretty', 'resource_version', 'timeout_seconds', 'watch'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method list_job_for_all_namespaces" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] if '_continue' in params: query_params.append(('continue', params['_continue'])) if 'field_selector' in params: query_params.append(('fieldSelector', params['field_selector'])) if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'label_selector' in params: query_params.append(('labelSelector', params['label_selector'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'resource_version' in params: query_params.append(('resourceVersion', params['resource_version'])) if 'timeout_seconds' in params: query_params.append(('timeoutSeconds', params['timeout_seconds'])) if 'watch' in params: query_params.append(('watch', params['watch'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/jobs', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1JobList', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def list_namespaced_job(self, namespace, **kwargs): """ list or watch objects of kind Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_namespaced_job(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1JobList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_namespaced_job_with_http_info(namespace, **kwargs) else: (data) = self.list_namespaced_job_with_http_info(namespace, **kwargs) return data def list_namespaced_job_with_http_info(self, namespace, **kwargs): """ list or watch objects of kind Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_namespaced_job_with_http_info(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1JobList If the method is called asynchronously, returns the request thread. """ all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method list_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `list_namespaced_job`") collection_formats = {} path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if '_continue' in params: query_params.append(('continue', params['_continue'])) if 'field_selector' in params: query_params.append(('fieldSelector', params['field_selector'])) if 'label_selector' in params: query_params.append(('labelSelector', params['label_selector'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'resource_version' in params: query_params.append(('resourceVersion', params['resource_version'])) if 'timeout_seconds' in params: query_params.append(('timeoutSeconds', params['timeout_seconds'])) if 'watch' in params: query_params.append(('watch', params['watch'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1JobList', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def patch_namespaced_job(self, name, namespace, body, **kwargs): """ partially update the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_job(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs) else: (data) = self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs) return data def patch_namespaced_job_with_http_info(self, name, namespace, body, **kwargs): """ partially update the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_job_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method patch_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def patch_namespaced_job_status(self, name, namespace, body, **kwargs): """ partially update status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_job_status(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs) else: (data) = self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs) return data def patch_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs): """ partially update status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_job_status_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method patch_namespaced_job_status" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job_status`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job_status`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job_status`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def read_namespaced_job(self, name, namespace, **kwargs): """ read the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_job(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. :param bool export: Should this value be exported. Export strips fields that a user can not specify. :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.read_namespaced_job_with_http_info(name, namespace, **kwargs) else: (data) = self.read_namespaced_job_with_http_info(name, namespace, **kwargs) return data def read_namespaced_job_with_http_info(self, name, namespace, **kwargs): """ read the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_job_with_http_info(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. :param bool export: Should this value be exported. Export strips fields that a user can not specify. :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'pretty', 'exact', 'export'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method read_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'exact' in params: query_params.append(('exact', params['exact'])) if 'export' in params: query_params.append(('export', params['export'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def read_namespaced_job_status(self, name, namespace, **kwargs): """ read status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_job_status(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs) else: (data) = self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs) return data def read_namespaced_job_status_with_http_info(self, name, namespace, **kwargs): """ read status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_job_status_with_http_info(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'pretty'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method read_namespaced_job_status" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job_status`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job_status`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def replace_namespaced_job(self, name, namespace, body, **kwargs): """ replace the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_job(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs) else: (data) = self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs) return data def replace_namespaced_job_with_http_info(self, name, namespace, body, **kwargs): """ replace the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_job_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method replace_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) # MASKED: replace_namespaced_job_status function (lines 1333-1356) def replace_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs): """ replace status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_job_status_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method replace_namespaced_job_status" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job_status`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job_status`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job_status`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
def replace_namespaced_job_status(self, name, namespace, body, **kwargs): """ replace status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_job_status(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs) else: (data) = self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs) return data
1,333
1,356
# coding: utf-8 """ Kubernetes No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) OpenAPI spec version: v1.13.1 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import sys import os import re # python 2 and python 3 compatibility library from six import iteritems from ..api_client import ApiClient class BatchV1Api(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen """ def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client def create_namespaced_job(self, namespace, body, **kwargs): """ create a Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_namespaced_job(namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.create_namespaced_job_with_http_info(namespace, body, **kwargs) else: (data) = self.create_namespaced_job_with_http_info(namespace, body, **kwargs) return data def create_namespaced_job_with_http_info(self, namespace, body, **kwargs): """ create a Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_namespaced_job_with_http_info(namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['namespace', 'body', 'include_uninitialized', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method create_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_job`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `create_namespaced_job`") collection_formats = {} path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def delete_collection_namespaced_job(self, namespace, **kwargs): """ delete collection of Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_namespaced_job(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs) else: (data) = self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs) return data def delete_collection_namespaced_job_with_http_info(self, namespace, **kwargs): """ delete collection of Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_namespaced_job_with_http_info(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread. """ all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_collection_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_job`") collection_formats = {} path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if '_continue' in params: query_params.append(('continue', params['_continue'])) if 'field_selector' in params: query_params.append(('fieldSelector', params['field_selector'])) if 'label_selector' in params: query_params.append(('labelSelector', params['label_selector'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'resource_version' in params: query_params.append(('resourceVersion', params['resource_version'])) if 'timeout_seconds' in params: query_params.append(('timeoutSeconds', params['timeout_seconds'])) if 'watch' in params: query_params.append(('watch', params['watch'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Status', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def delete_namespaced_job(self, name, namespace, **kwargs): """ delete a Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_namespaced_job(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param V1DeleteOptions body: :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_namespaced_job_with_http_info(name, namespace, **kwargs) else: (data) = self.delete_namespaced_job_with_http_info(name, namespace, **kwargs) return data def delete_namespaced_job_with_http_info(self, name, namespace, **kwargs): """ delete a Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_namespaced_job_with_http_info(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param V1DeleteOptions body: :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :return: V1Status If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'pretty', 'body', 'dry_run', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_job`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_job`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) if 'grace_period_seconds' in params: query_params.append(('gracePeriodSeconds', params['grace_period_seconds'])) if 'orphan_dependents' in params: query_params.append(('orphanDependents', params['orphan_dependents'])) if 'propagation_policy' in params: query_params.append(('propagationPolicy', params['propagation_policy'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Status', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_api_resources(self, **kwargs): """ get available resources This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_api_resources(async_req=True) >>> result = thread.get() :param async_req bool :return: V1APIResourceList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_api_resources_with_http_info(**kwargs) else: (data) = self.get_api_resources_with_http_info(**kwargs) return data def get_api_resources_with_http_info(self, **kwargs): """ get available resources This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_api_resources_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool :return: V1APIResourceList If the method is called asynchronously, returns the request thread. """ all_params = [] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_api_resources" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1APIResourceList', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def list_job_for_all_namespaces(self, **kwargs): """ list or watch objects of kind Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_job_for_all_namespaces(async_req=True) >>> result = thread.get() :param async_req bool :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str pretty: If 'true', then the output is pretty printed. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1JobList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_job_for_all_namespaces_with_http_info(**kwargs) else: (data) = self.list_job_for_all_namespaces_with_http_info(**kwargs) return data def list_job_for_all_namespaces_with_http_info(self, **kwargs): """ list or watch objects of kind Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_job_for_all_namespaces_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str pretty: If 'true', then the output is pretty printed. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1JobList If the method is called asynchronously, returns the request thread. """ all_params = ['_continue', 'field_selector', 'include_uninitialized', 'label_selector', 'limit', 'pretty', 'resource_version', 'timeout_seconds', 'watch'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method list_job_for_all_namespaces" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] if '_continue' in params: query_params.append(('continue', params['_continue'])) if 'field_selector' in params: query_params.append(('fieldSelector', params['field_selector'])) if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'label_selector' in params: query_params.append(('labelSelector', params['label_selector'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'resource_version' in params: query_params.append(('resourceVersion', params['resource_version'])) if 'timeout_seconds' in params: query_params.append(('timeoutSeconds', params['timeout_seconds'])) if 'watch' in params: query_params.append(('watch', params['watch'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/jobs', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1JobList', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def list_namespaced_job(self, namespace, **kwargs): """ list or watch objects of kind Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_namespaced_job(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1JobList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_namespaced_job_with_http_info(namespace, **kwargs) else: (data) = self.list_namespaced_job_with_http_info(namespace, **kwargs) return data def list_namespaced_job_with_http_info(self, namespace, **kwargs): """ list or watch objects of kind Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_namespaced_job_with_http_info(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1JobList If the method is called asynchronously, returns the request thread. """ all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method list_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `list_namespaced_job`") collection_formats = {} path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if '_continue' in params: query_params.append(('continue', params['_continue'])) if 'field_selector' in params: query_params.append(('fieldSelector', params['field_selector'])) if 'label_selector' in params: query_params.append(('labelSelector', params['label_selector'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'resource_version' in params: query_params.append(('resourceVersion', params['resource_version'])) if 'timeout_seconds' in params: query_params.append(('timeoutSeconds', params['timeout_seconds'])) if 'watch' in params: query_params.append(('watch', params['watch'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1JobList', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def patch_namespaced_job(self, name, namespace, body, **kwargs): """ partially update the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_job(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs) else: (data) = self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs) return data def patch_namespaced_job_with_http_info(self, name, namespace, body, **kwargs): """ partially update the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_job_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method patch_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def patch_namespaced_job_status(self, name, namespace, body, **kwargs): """ partially update status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_job_status(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs) else: (data) = self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs) return data def patch_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs): """ partially update status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_job_status_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method patch_namespaced_job_status" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job_status`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job_status`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job_status`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def read_namespaced_job(self, name, namespace, **kwargs): """ read the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_job(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. :param bool export: Should this value be exported. Export strips fields that a user can not specify. :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.read_namespaced_job_with_http_info(name, namespace, **kwargs) else: (data) = self.read_namespaced_job_with_http_info(name, namespace, **kwargs) return data def read_namespaced_job_with_http_info(self, name, namespace, **kwargs): """ read the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_job_with_http_info(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. :param bool export: Should this value be exported. Export strips fields that a user can not specify. :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'pretty', 'exact', 'export'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method read_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'exact' in params: query_params.append(('exact', params['exact'])) if 'export' in params: query_params.append(('export', params['export'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def read_namespaced_job_status(self, name, namespace, **kwargs): """ read status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_job_status(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs) else: (data) = self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs) return data def read_namespaced_job_status_with_http_info(self, name, namespace, **kwargs): """ read status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_job_status_with_http_info(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'pretty'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method read_namespaced_job_status" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job_status`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job_status`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def replace_namespaced_job(self, name, namespace, body, **kwargs): """ replace the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_job(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs) else: (data) = self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs) return data def replace_namespaced_job_with_http_info(self, name, namespace, body, **kwargs): """ replace the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_job_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method replace_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def replace_namespaced_job_status(self, name, namespace, body, **kwargs): """ replace status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_job_status(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs) else: (data) = self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs) return data def replace_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs): """ replace status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_job_status_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method replace_namespaced_job_status" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job_status`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job_status`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job_status`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
replace_namespaced_job_status_with_http_info
replace status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_job_status_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread.
# coding: utf-8 """ Kubernetes No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) OpenAPI spec version: v1.13.1 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import sys import os import re # python 2 and python 3 compatibility library from six import iteritems from ..api_client import ApiClient class BatchV1Api(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen """ def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client def create_namespaced_job(self, namespace, body, **kwargs): """ create a Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_namespaced_job(namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.create_namespaced_job_with_http_info(namespace, body, **kwargs) else: (data) = self.create_namespaced_job_with_http_info(namespace, body, **kwargs) return data def create_namespaced_job_with_http_info(self, namespace, body, **kwargs): """ create a Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_namespaced_job_with_http_info(namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['namespace', 'body', 'include_uninitialized', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method create_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_job`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `create_namespaced_job`") collection_formats = {} path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def delete_collection_namespaced_job(self, namespace, **kwargs): """ delete collection of Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_namespaced_job(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs) else: (data) = self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs) return data def delete_collection_namespaced_job_with_http_info(self, namespace, **kwargs): """ delete collection of Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_namespaced_job_with_http_info(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread. """ all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_collection_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_job`") collection_formats = {} path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if '_continue' in params: query_params.append(('continue', params['_continue'])) if 'field_selector' in params: query_params.append(('fieldSelector', params['field_selector'])) if 'label_selector' in params: query_params.append(('labelSelector', params['label_selector'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'resource_version' in params: query_params.append(('resourceVersion', params['resource_version'])) if 'timeout_seconds' in params: query_params.append(('timeoutSeconds', params['timeout_seconds'])) if 'watch' in params: query_params.append(('watch', params['watch'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Status', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def delete_namespaced_job(self, name, namespace, **kwargs): """ delete a Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_namespaced_job(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param V1DeleteOptions body: :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_namespaced_job_with_http_info(name, namespace, **kwargs) else: (data) = self.delete_namespaced_job_with_http_info(name, namespace, **kwargs) return data def delete_namespaced_job_with_http_info(self, name, namespace, **kwargs): """ delete a Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_namespaced_job_with_http_info(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param V1DeleteOptions body: :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :return: V1Status If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'pretty', 'body', 'dry_run', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_job`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_job`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) if 'grace_period_seconds' in params: query_params.append(('gracePeriodSeconds', params['grace_period_seconds'])) if 'orphan_dependents' in params: query_params.append(('orphanDependents', params['orphan_dependents'])) if 'propagation_policy' in params: query_params.append(('propagationPolicy', params['propagation_policy'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Status', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_api_resources(self, **kwargs): """ get available resources This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_api_resources(async_req=True) >>> result = thread.get() :param async_req bool :return: V1APIResourceList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_api_resources_with_http_info(**kwargs) else: (data) = self.get_api_resources_with_http_info(**kwargs) return data def get_api_resources_with_http_info(self, **kwargs): """ get available resources This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_api_resources_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool :return: V1APIResourceList If the method is called asynchronously, returns the request thread. """ all_params = [] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_api_resources" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1APIResourceList', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def list_job_for_all_namespaces(self, **kwargs): """ list or watch objects of kind Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_job_for_all_namespaces(async_req=True) >>> result = thread.get() :param async_req bool :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str pretty: If 'true', then the output is pretty printed. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1JobList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_job_for_all_namespaces_with_http_info(**kwargs) else: (data) = self.list_job_for_all_namespaces_with_http_info(**kwargs) return data def list_job_for_all_namespaces_with_http_info(self, **kwargs): """ list or watch objects of kind Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_job_for_all_namespaces_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str pretty: If 'true', then the output is pretty printed. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1JobList If the method is called asynchronously, returns the request thread. """ all_params = ['_continue', 'field_selector', 'include_uninitialized', 'label_selector', 'limit', 'pretty', 'resource_version', 'timeout_seconds', 'watch'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method list_job_for_all_namespaces" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] if '_continue' in params: query_params.append(('continue', params['_continue'])) if 'field_selector' in params: query_params.append(('fieldSelector', params['field_selector'])) if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'label_selector' in params: query_params.append(('labelSelector', params['label_selector'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'resource_version' in params: query_params.append(('resourceVersion', params['resource_version'])) if 'timeout_seconds' in params: query_params.append(('timeoutSeconds', params['timeout_seconds'])) if 'watch' in params: query_params.append(('watch', params['watch'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/jobs', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1JobList', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def list_namespaced_job(self, namespace, **kwargs): """ list or watch objects of kind Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_namespaced_job(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1JobList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_namespaced_job_with_http_info(namespace, **kwargs) else: (data) = self.list_namespaced_job_with_http_info(namespace, **kwargs) return data def list_namespaced_job_with_http_info(self, namespace, **kwargs): """ list or watch objects of kind Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_namespaced_job_with_http_info(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1JobList If the method is called asynchronously, returns the request thread. """ all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method list_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `list_namespaced_job`") collection_formats = {} path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if '_continue' in params: query_params.append(('continue', params['_continue'])) if 'field_selector' in params: query_params.append(('fieldSelector', params['field_selector'])) if 'label_selector' in params: query_params.append(('labelSelector', params['label_selector'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'resource_version' in params: query_params.append(('resourceVersion', params['resource_version'])) if 'timeout_seconds' in params: query_params.append(('timeoutSeconds', params['timeout_seconds'])) if 'watch' in params: query_params.append(('watch', params['watch'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1JobList', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def patch_namespaced_job(self, name, namespace, body, **kwargs): """ partially update the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_job(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs) else: (data) = self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs) return data def patch_namespaced_job_with_http_info(self, name, namespace, body, **kwargs): """ partially update the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_job_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method patch_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def patch_namespaced_job_status(self, name, namespace, body, **kwargs): """ partially update status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_job_status(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs) else: (data) = self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs) return data def patch_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs): """ partially update status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_job_status_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method patch_namespaced_job_status" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job_status`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job_status`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job_status`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def read_namespaced_job(self, name, namespace, **kwargs): """ read the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_job(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. :param bool export: Should this value be exported. Export strips fields that a user can not specify. :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.read_namespaced_job_with_http_info(name, namespace, **kwargs) else: (data) = self.read_namespaced_job_with_http_info(name, namespace, **kwargs) return data def read_namespaced_job_with_http_info(self, name, namespace, **kwargs): """ read the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_job_with_http_info(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. :param bool export: Should this value be exported. Export strips fields that a user can not specify. :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'pretty', 'exact', 'export'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method read_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'exact' in params: query_params.append(('exact', params['exact'])) if 'export' in params: query_params.append(('export', params['export'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def read_namespaced_job_status(self, name, namespace, **kwargs): """ read status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_job_status(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs) else: (data) = self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs) return data def read_namespaced_job_status_with_http_info(self, name, namespace, **kwargs): """ read status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_job_status_with_http_info(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'pretty'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method read_namespaced_job_status" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job_status`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job_status`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def replace_namespaced_job(self, name, namespace, body, **kwargs): """ replace the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_job(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs) else: (data) = self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs) return data def replace_namespaced_job_with_http_info(self, name, namespace, body, **kwargs): """ replace the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_job_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method replace_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def replace_namespaced_job_status(self, name, namespace, body, **kwargs): """ replace status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_job_status(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs) else: (data) = self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs) return data # MASKED: replace_namespaced_job_status_with_http_info function (lines 1358-1449)
def replace_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs): """ replace status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_job_status_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method replace_namespaced_job_status" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job_status`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job_status`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job_status`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
1,358
1,449
# coding: utf-8 """ Kubernetes No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) OpenAPI spec version: v1.13.1 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import sys import os import re # python 2 and python 3 compatibility library from six import iteritems from ..api_client import ApiClient class BatchV1Api(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen """ def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client def create_namespaced_job(self, namespace, body, **kwargs): """ create a Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_namespaced_job(namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.create_namespaced_job_with_http_info(namespace, body, **kwargs) else: (data) = self.create_namespaced_job_with_http_info(namespace, body, **kwargs) return data def create_namespaced_job_with_http_info(self, namespace, body, **kwargs): """ create a Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_namespaced_job_with_http_info(namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['namespace', 'body', 'include_uninitialized', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method create_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_job`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `create_namespaced_job`") collection_formats = {} path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def delete_collection_namespaced_job(self, namespace, **kwargs): """ delete collection of Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_namespaced_job(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs) else: (data) = self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs) return data def delete_collection_namespaced_job_with_http_info(self, namespace, **kwargs): """ delete collection of Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_namespaced_job_with_http_info(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread. """ all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_collection_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_job`") collection_formats = {} path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if '_continue' in params: query_params.append(('continue', params['_continue'])) if 'field_selector' in params: query_params.append(('fieldSelector', params['field_selector'])) if 'label_selector' in params: query_params.append(('labelSelector', params['label_selector'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'resource_version' in params: query_params.append(('resourceVersion', params['resource_version'])) if 'timeout_seconds' in params: query_params.append(('timeoutSeconds', params['timeout_seconds'])) if 'watch' in params: query_params.append(('watch', params['watch'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Status', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def delete_namespaced_job(self, name, namespace, **kwargs): """ delete a Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_namespaced_job(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param V1DeleteOptions body: :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_namespaced_job_with_http_info(name, namespace, **kwargs) else: (data) = self.delete_namespaced_job_with_http_info(name, namespace, **kwargs) return data def delete_namespaced_job_with_http_info(self, name, namespace, **kwargs): """ delete a Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_namespaced_job_with_http_info(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param V1DeleteOptions body: :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :return: V1Status If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'pretty', 'body', 'dry_run', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_job`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_job`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) if 'grace_period_seconds' in params: query_params.append(('gracePeriodSeconds', params['grace_period_seconds'])) if 'orphan_dependents' in params: query_params.append(('orphanDependents', params['orphan_dependents'])) if 'propagation_policy' in params: query_params.append(('propagationPolicy', params['propagation_policy'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Status', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_api_resources(self, **kwargs): """ get available resources This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_api_resources(async_req=True) >>> result = thread.get() :param async_req bool :return: V1APIResourceList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_api_resources_with_http_info(**kwargs) else: (data) = self.get_api_resources_with_http_info(**kwargs) return data def get_api_resources_with_http_info(self, **kwargs): """ get available resources This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_api_resources_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool :return: V1APIResourceList If the method is called asynchronously, returns the request thread. """ all_params = [] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_api_resources" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1APIResourceList', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def list_job_for_all_namespaces(self, **kwargs): """ list or watch objects of kind Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_job_for_all_namespaces(async_req=True) >>> result = thread.get() :param async_req bool :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str pretty: If 'true', then the output is pretty printed. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1JobList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_job_for_all_namespaces_with_http_info(**kwargs) else: (data) = self.list_job_for_all_namespaces_with_http_info(**kwargs) return data def list_job_for_all_namespaces_with_http_info(self, **kwargs): """ list or watch objects of kind Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_job_for_all_namespaces_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str pretty: If 'true', then the output is pretty printed. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1JobList If the method is called asynchronously, returns the request thread. """ all_params = ['_continue', 'field_selector', 'include_uninitialized', 'label_selector', 'limit', 'pretty', 'resource_version', 'timeout_seconds', 'watch'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method list_job_for_all_namespaces" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] if '_continue' in params: query_params.append(('continue', params['_continue'])) if 'field_selector' in params: query_params.append(('fieldSelector', params['field_selector'])) if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'label_selector' in params: query_params.append(('labelSelector', params['label_selector'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'resource_version' in params: query_params.append(('resourceVersion', params['resource_version'])) if 'timeout_seconds' in params: query_params.append(('timeoutSeconds', params['timeout_seconds'])) if 'watch' in params: query_params.append(('watch', params['watch'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/jobs', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1JobList', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def list_namespaced_job(self, namespace, **kwargs): """ list or watch objects of kind Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_namespaced_job(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1JobList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_namespaced_job_with_http_info(namespace, **kwargs) else: (data) = self.list_namespaced_job_with_http_info(namespace, **kwargs) return data def list_namespaced_job_with_http_info(self, namespace, **kwargs): """ list or watch objects of kind Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_namespaced_job_with_http_info(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1JobList If the method is called asynchronously, returns the request thread. """ all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method list_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `list_namespaced_job`") collection_formats = {} path_params = {} if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'include_uninitialized' in params: query_params.append(('includeUninitialized', params['include_uninitialized'])) if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if '_continue' in params: query_params.append(('continue', params['_continue'])) if 'field_selector' in params: query_params.append(('fieldSelector', params['field_selector'])) if 'label_selector' in params: query_params.append(('labelSelector', params['label_selector'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'resource_version' in params: query_params.append(('resourceVersion', params['resource_version'])) if 'timeout_seconds' in params: query_params.append(('timeoutSeconds', params['timeout_seconds'])) if 'watch' in params: query_params.append(('watch', params['watch'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1JobList', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def patch_namespaced_job(self, name, namespace, body, **kwargs): """ partially update the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_job(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs) else: (data) = self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs) return data def patch_namespaced_job_with_http_info(self, name, namespace, body, **kwargs): """ partially update the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_job_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method patch_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def patch_namespaced_job_status(self, name, namespace, body, **kwargs): """ partially update status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_job_status(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs) else: (data) = self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs) return data def patch_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs): """ partially update status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_job_status_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method patch_namespaced_job_status" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job_status`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job_status`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job_status`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def read_namespaced_job(self, name, namespace, **kwargs): """ read the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_job(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. :param bool export: Should this value be exported. Export strips fields that a user can not specify. :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.read_namespaced_job_with_http_info(name, namespace, **kwargs) else: (data) = self.read_namespaced_job_with_http_info(name, namespace, **kwargs) return data def read_namespaced_job_with_http_info(self, name, namespace, **kwargs): """ read the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_job_with_http_info(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. :param bool export: Should this value be exported. Export strips fields that a user can not specify. :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'pretty', 'exact', 'export'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method read_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'exact' in params: query_params.append(('exact', params['exact'])) if 'export' in params: query_params.append(('export', params['export'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def read_namespaced_job_status(self, name, namespace, **kwargs): """ read status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_job_status(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs) else: (data) = self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs) return data def read_namespaced_job_status_with_http_info(self, name, namespace, **kwargs): """ read status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_job_status_with_http_info(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'pretty'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method read_namespaced_job_status" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job_status`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job_status`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def replace_namespaced_job(self, name, namespace, body, **kwargs): """ replace the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_job(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs) else: (data) = self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs) return data def replace_namespaced_job_with_http_info(self, name, namespace, body, **kwargs): """ replace the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_job_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method replace_namespaced_job" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def replace_namespaced_job_status(self, name, namespace, body, **kwargs): """ replace status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_job_status(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs) else: (data) = self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs) return data def replace_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs): """ replace status of the specified Job This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_job_status_with_http_info(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Job (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Job body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Job If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method replace_namespaced_job_status" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job_status`") # verify the required parameter 'namespace' is set if ('namespace' not in params) or (params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job_status`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job_status`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] if 'namespace' in params: path_params['namespace'] = params['namespace'] query_params = [] if 'pretty' in params: query_params.append(('pretty', params['pretty'])) if 'dry_run' in params: query_params.append(('dryRun', params['dry_run'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['*/*']) # Authentication setting auth_settings = ['BearerToken'] return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Job', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
_evaluate_steps
One step evaluation across replica. Args: per_replica_features: the batched features. per_replica_labels: the batched labels. Returns: The loss corresponding to the given batch.
"""Tensorflow trainer class.""" import logging import math import os from typing import Callable, Dict, Optional import numpy as np import tensorflow as tf from .modeling_tf_utils import TFPreTrainedModel, shape_list from .optimization_tf import GradientAccumulator, create_optimizer from .trainer_utils import PREFIX_CHECKPOINT_DIR, EvalPrediction, PredictionOutput from .training_args_tf import TFTrainingArguments logger = logging.getLogger(__name__) class TFTrainer: model: TFPreTrainedModel args: TFTrainingArguments # something similar to a PT Dataset. # This is just temporary before to have # a framework-agnostic approach for datasets. train_dataset: Optional[tf.data.Dataset] eval_dataset: Optional[tf.data.Dataset] compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None prediction_loss_only: bool def __init__( self, model: TFPreTrainedModel, args: TFTrainingArguments, train_dataset: Optional[tf.data.Dataset] = None, eval_dataset: Optional[tf.data.Dataset] = None, compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None, prediction_loss_only=False, ): self.model = model self.args = args self.train_dataset = train_dataset self.eval_dataset = eval_dataset self.compute_metrics = compute_metrics self.prediction_loss_only = prediction_loss_only self.gradient_accumulator = GradientAccumulator() self._setup_training() def _setup_training(self) -> None: """ Setup the different steps to train a model: - check if all the data are given - create the proper strategy - create the features - prepare the model settings """ self._prepare_dataset() with self.args.strategy.scope(): self._create_optimizer() _ = self.optimizer.iterations self._set_loss_and_metric() self._create_checkpoint_manager() self._create_summary_writer() def _set_loss_and_metric(self) -> None: """ Create the training loss and metric with their name. Allowed names are those listed in the Tensorflow documentation and those contained in the transformers library. """ try: self.loss = tf.keras.losses.get( { "class_name": self.args.loss_name, "config": {"from_logits": True, "reduction": tf.keras.losses.Reduction.NONE}, } ) except TypeError: self.loss = tf.keras.losses.get( {"class_name": self.args.loss_name, "config": {"reduction": tf.keras.losses.Reduction.NONE}} ) def _create_summary_writer(self) -> None: """ Create a summary writer to be able to read the logs in Tensorboard. """ self.writer = tf.summary.create_file_writer(self.args.logging_dir) def _prepare_dataset(self) -> None: """ Prepare the training, validation and test data. """ if self.train_dataset is not None: self.num_train_examples = self.train_dataset.reduce(tf.constant(0), lambda x, _: x + 1).numpy() if self.args.max_steps > 0: self.train_steps = self.args.max_steps else: self.train_steps: int = math.ceil(self.num_train_examples / self.args.train_batch_size) self.train_dataset = ( self.train_dataset.cache() .shuffle(self.num_train_examples) .batch(self.args.train_batch_size) .prefetch(tf.data.experimental.AUTOTUNE) ) if self.args.max_steps > 0: self.train_dataset = self.train_dataset.repeat(-1) self.train_dataset = self.args.strategy.experimental_distribute_dataset(self.train_dataset) else: self.train_steps = 0 if self.eval_dataset is not None: self.eval_dataset = ( self.eval_dataset.batch(self.args.eval_batch_size).cache().prefetch(tf.data.experimental.AUTOTUNE) ) self.eval_dataset = self.args.strategy.experimental_distribute_dataset(self.eval_dataset) def _create_optimizer(self) -> None: """ Create the training optimizer with its name. Allowed names are those listed in the Tensorflow documentation and those contained in the transformers library. """ if self.args.optimizer_name == "adamw": self.optimizer = create_optimizer( self.args.learning_rate, self.train_steps, self.args.warmup_steps, self.args.end_lr ) else: try: self.optimizer = tf.keras.optimizers.get( { "class_name": self.args.optimizer_name, "config": {"learning_rate": self.args.learning_rate, "epsilon": self.args.adam_epsilon}, } ) except TypeError: # This is for the case where the optimizer is not Adam-like such as SGD self.optimizer = tf.keras.optimizers.get( {"class_name": self.args.optimizer_name, "config": {"learning_rate": self.args.learning_rate}} ) logger.info("Created an/a {} optimizer".format(self.args.optimizer_name)) def _create_checkpoint_manager(self, max_to_keep: int = 5, load_model: bool = True) -> None: """ Create a checkpoint manager in order to be able to make the training fault-tolerant. Args: max_to_keep: the maximum number of checkpoints to keep in the checkpoint path. load_model: if we want to start the training from the latest checkpoint. """ ckpt = tf.train.Checkpoint(optimizer=self.optimizer, model=self.model) self.model.ckpt_manager = tf.train.CheckpointManager(ckpt, PREFIX_CHECKPOINT_DIR, max_to_keep=max_to_keep) if load_model: ckpt.restore(self.model.ckpt_manager.latest_checkpoint).expect_partial() # MASKED: _evaluate_steps function (lines 161-180) def _prediction_loop( self, dataset: tf.data.Dataset, description: str, prediction_loss_only: Optional[bool] = None ) -> PredictionOutput: logger.info("***** Running %s *****", description) logger.info(" Batch size = %d", self.args.eval_batch_size) label_ids: np.ndarray = None preds: np.ndarray = None step: int = 1 for features, labels in dataset: step = tf.convert_to_tensor(step, dtype=tf.int64) loss, logits = self._evaluate_steps(features, labels) loss = tf.reduce_mean(loss) if not prediction_loss_only: if self.args.n_gpu > 1: for val in logits.values: if preds is None: preds = val.numpy() else: preds = np.append(preds, val.numpy(), axis=0) for val in labels.values: if label_ids is None: label_ids = val.numpy() else: label_ids = np.append(label_ids, val.numpy(), axis=0) else: if preds is None: preds = logits.numpy() else: preds = np.append(preds, logits.numpy(), axis=0) if label_ids is None: label_ids = labels.numpy() else: label_ids = np.append(label_ids, labels.numpy(), axis=0) step += 1 if self.compute_metrics is not None and preds is not None and label_ids is not None: metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids)) else: metrics = {} metrics["eval_loss"] = loss.numpy() for key in list(metrics.keys()): if not key.startswith("eval_"): metrics[f"eval_{key}"] = metrics.pop(key) return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics) def evaluate( self, eval_dataset: Optional[tf.data.Dataset] = None, prediction_loss_only: Optional[bool] = None ) -> Dict[str, float]: """ Prediction/evaluation loop, shared by `evaluate()` and `predict()`. """ if eval_dataset is None: eval_dataset = self.eval_dataset output = self._prediction_loop(eval_dataset, description="Evaluation") return output.metrics def train(self) -> None: """ Train method to train the model. """ if self.args.debug: tf.summary.trace_on(graph=True, profiler=True) self.gradient_accumulator.reset() iterations = self.optimizer.iterations if iterations.numpy() > 0: logger.info("Start the training from the last checkpoint") start_epoch = (iterations.numpy() // self.train_steps) + 1 else: start_epoch = 1 tf.summary.experimental.set_step(iterations) epochs = 1 if self.args.max_steps > 0 else self.args.num_train_epochs logger.info("***** Running training *****") logger.info(" Num examples = %d", self.num_train_examples) logger.info(" Num Epochs = %d", epochs) logger.info(" Total optimization steps = %d", self.train_steps) for epoch in range(start_epoch, int(epochs + 1)): for training_loss in self._training_steps(): step = iterations.numpy() if self.args.debug: with self.writer.as_default(): tf.summary.scalar("loss", training_loss, step=step) if step == 1 and self.args.debug: with self.writer.as_default(): tf.summary.trace_export(name="training", step=step, profiler_outdir=self.args.logging_dir) if self.args.evaluate_during_training and step % self.args.eval_steps == 0: logs = {} results = self.evaluate() for key, value in results.items(): eval_key = "eval_{}".format(key) logs[eval_key] = value if callable(self.optimizer.learning_rate): logs["learning_rate"] = self.optimizer.learning_rate(step).numpy() else: logs["learning_rate"] = self.optimizer.learning_rate.numpy() logger.info("Epoch {} Step {} Validation Metrics {}".format(epoch, step, logs)) with self.writer.as_default(): for k, v in logs.items(): tf.summary.scalar(k, v, step=step) if step % self.args.logging_steps == 0: logger.info("Epoch {} Step {} Train Loss {:.4f}".format(epoch, step, training_loss.numpy())) if step % self.args.save_steps == 0: ckpt_save_path = self.model.ckpt_manager.save() logger.info("Saving checkpoint for step {} at {}".format(step, ckpt_save_path)) if step % self.train_steps == 0: break def _training_steps(self): """ Returns a generator over training steps (i.e. parameters update). """ for i, loss in enumerate(self._accumulate_next_gradients()): if i % self.args.gradient_accumulation_steps == 0: self._apply_gradients() yield loss @tf.function def _apply_gradients(self): """Applies the gradients (cross-replica).""" self.args.strategy.experimental_run_v2(self._step) def _step(self): """Applies gradients and resets accumulation.""" gradient_scale = self.gradient_accumulator.step * self.args.strategy.num_replicas_in_sync gradients = [ gradient / tf.cast(gradient_scale, gradient.dtype) for gradient in self.gradient_accumulator.gradients ] gradients = [(tf.clip_by_value(grad, -self.args.max_grad_norm, self.args.max_grad_norm)) for grad in gradients] self.optimizer.apply_gradients(list(zip(gradients, self.model.trainable_variables))) self.gradient_accumulator.reset() def _accumulate_next_gradients(self): """Accumulates the gradients from the next element in dataset.""" iterator = iter(self.train_dataset) @tf.function def _accumulate_next(): per_replica_features, per_replica_labels = next(iterator) return self._accumulate_gradients(per_replica_features, per_replica_labels) while True: try: yield _accumulate_next() except tf.errors.OutOfRangeError: break def _accumulate_gradients(self, per_replica_features, per_replica_labels): """Accumulates the gradients across all the replica.""" per_replica_loss = self.args.strategy.experimental_run_v2( self._forward, args=(per_replica_features, per_replica_labels) ) try: reduced_loss = self.args.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, axis=0) except ValueError: reduced_loss = self.args.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, None) return reduced_loss def _forward(self, features, labels): """Forwards a training example and accumulates the gradients.""" per_example_loss, _ = self._run_model(features, labels, True) gradients = tf.gradients(per_example_loss, self.model.trainable_variables) gradients = [ g if g is not None else tf.zeros_like(v) for g, v in zip(gradients, self.model.trainable_variables) ] self.gradient_accumulator(gradients) return per_example_loss def _run_model(self, features, labels, training): """ Computes the loss of the given features and labels pair. Args: features: the batched features. labels: the batched labels. training: run the model in training mode or not """ if self.args.mode == "text-classification" or self.args.mode == "token-classification": logits = self.model(features, training=training)[0] else: logits = self.model(features, training=training) if self.args.mode == "token-classification": active_loss = tf.reshape(labels, (-1,)) != -1 reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss) labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss) loss = self.loss(labels, reduced_logits) elif self.args.mode == "question-answering": start_loss = self.loss(labels["start_position"], logits[0]) end_loss = self.loss(labels["end_position"], logits[1]) loss = (start_loss + end_loss) / 2.0 else: loss = self.loss(labels, logits) loss += sum(self.model.losses) * (1.0 / self.args.n_gpu) return loss, logits def predict(self, test_dataset: tf.data.Dataset) -> PredictionOutput: """ Run prediction and return predictions and potential metrics. Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method will also return metrics, like in evaluate(). Args: test_dataset: something similar to a PT Dataset. This is just temporary before to have a framework-agnostic approach for datasets. """ test_dataset = test_dataset.batch(self.args.eval_batch_size) test_dataset = self.args.strategy.experimental_distribute_dataset(test_dataset) return self._prediction_loop(test_dataset, description="Prediction") def save_model(self) -> None: """ Save the pretrained model and create a Tensorflow saved model. """ logger.info("Saving model in {}".format(self.args.output_dir)) path = os.path.join(self.args.output_dir, "saved_model") logger.info("Saving model in {}".format(path)) os.makedirs(path, exist_ok=True) self.model.save_pretrained(self.args.output_dir)
@tf.function def _evaluate_steps(self, per_replica_features, per_replica_labels): """ One step evaluation across replica. Args: per_replica_features: the batched features. per_replica_labels: the batched labels. Returns: The loss corresponding to the given batch. """ per_replica_loss, per_replica_logits = self.args.strategy.experimental_run_v2( self._run_model, args=(per_replica_features, per_replica_labels, False) ) try: reduced_loss = self.args.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, axis=0) except ValueError: reduced_loss = self.args.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, None) return reduced_loss, per_replica_logits
161
180
"""Tensorflow trainer class.""" import logging import math import os from typing import Callable, Dict, Optional import numpy as np import tensorflow as tf from .modeling_tf_utils import TFPreTrainedModel, shape_list from .optimization_tf import GradientAccumulator, create_optimizer from .trainer_utils import PREFIX_CHECKPOINT_DIR, EvalPrediction, PredictionOutput from .training_args_tf import TFTrainingArguments logger = logging.getLogger(__name__) class TFTrainer: model: TFPreTrainedModel args: TFTrainingArguments # something similar to a PT Dataset. # This is just temporary before to have # a framework-agnostic approach for datasets. train_dataset: Optional[tf.data.Dataset] eval_dataset: Optional[tf.data.Dataset] compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None prediction_loss_only: bool def __init__( self, model: TFPreTrainedModel, args: TFTrainingArguments, train_dataset: Optional[tf.data.Dataset] = None, eval_dataset: Optional[tf.data.Dataset] = None, compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None, prediction_loss_only=False, ): self.model = model self.args = args self.train_dataset = train_dataset self.eval_dataset = eval_dataset self.compute_metrics = compute_metrics self.prediction_loss_only = prediction_loss_only self.gradient_accumulator = GradientAccumulator() self._setup_training() def _setup_training(self) -> None: """ Setup the different steps to train a model: - check if all the data are given - create the proper strategy - create the features - prepare the model settings """ self._prepare_dataset() with self.args.strategy.scope(): self._create_optimizer() _ = self.optimizer.iterations self._set_loss_and_metric() self._create_checkpoint_manager() self._create_summary_writer() def _set_loss_and_metric(self) -> None: """ Create the training loss and metric with their name. Allowed names are those listed in the Tensorflow documentation and those contained in the transformers library. """ try: self.loss = tf.keras.losses.get( { "class_name": self.args.loss_name, "config": {"from_logits": True, "reduction": tf.keras.losses.Reduction.NONE}, } ) except TypeError: self.loss = tf.keras.losses.get( {"class_name": self.args.loss_name, "config": {"reduction": tf.keras.losses.Reduction.NONE}} ) def _create_summary_writer(self) -> None: """ Create a summary writer to be able to read the logs in Tensorboard. """ self.writer = tf.summary.create_file_writer(self.args.logging_dir) def _prepare_dataset(self) -> None: """ Prepare the training, validation and test data. """ if self.train_dataset is not None: self.num_train_examples = self.train_dataset.reduce(tf.constant(0), lambda x, _: x + 1).numpy() if self.args.max_steps > 0: self.train_steps = self.args.max_steps else: self.train_steps: int = math.ceil(self.num_train_examples / self.args.train_batch_size) self.train_dataset = ( self.train_dataset.cache() .shuffle(self.num_train_examples) .batch(self.args.train_batch_size) .prefetch(tf.data.experimental.AUTOTUNE) ) if self.args.max_steps > 0: self.train_dataset = self.train_dataset.repeat(-1) self.train_dataset = self.args.strategy.experimental_distribute_dataset(self.train_dataset) else: self.train_steps = 0 if self.eval_dataset is not None: self.eval_dataset = ( self.eval_dataset.batch(self.args.eval_batch_size).cache().prefetch(tf.data.experimental.AUTOTUNE) ) self.eval_dataset = self.args.strategy.experimental_distribute_dataset(self.eval_dataset) def _create_optimizer(self) -> None: """ Create the training optimizer with its name. Allowed names are those listed in the Tensorflow documentation and those contained in the transformers library. """ if self.args.optimizer_name == "adamw": self.optimizer = create_optimizer( self.args.learning_rate, self.train_steps, self.args.warmup_steps, self.args.end_lr ) else: try: self.optimizer = tf.keras.optimizers.get( { "class_name": self.args.optimizer_name, "config": {"learning_rate": self.args.learning_rate, "epsilon": self.args.adam_epsilon}, } ) except TypeError: # This is for the case where the optimizer is not Adam-like such as SGD self.optimizer = tf.keras.optimizers.get( {"class_name": self.args.optimizer_name, "config": {"learning_rate": self.args.learning_rate}} ) logger.info("Created an/a {} optimizer".format(self.args.optimizer_name)) def _create_checkpoint_manager(self, max_to_keep: int = 5, load_model: bool = True) -> None: """ Create a checkpoint manager in order to be able to make the training fault-tolerant. Args: max_to_keep: the maximum number of checkpoints to keep in the checkpoint path. load_model: if we want to start the training from the latest checkpoint. """ ckpt = tf.train.Checkpoint(optimizer=self.optimizer, model=self.model) self.model.ckpt_manager = tf.train.CheckpointManager(ckpt, PREFIX_CHECKPOINT_DIR, max_to_keep=max_to_keep) if load_model: ckpt.restore(self.model.ckpt_manager.latest_checkpoint).expect_partial() @tf.function def _evaluate_steps(self, per_replica_features, per_replica_labels): """ One step evaluation across replica. Args: per_replica_features: the batched features. per_replica_labels: the batched labels. Returns: The loss corresponding to the given batch. """ per_replica_loss, per_replica_logits = self.args.strategy.experimental_run_v2( self._run_model, args=(per_replica_features, per_replica_labels, False) ) try: reduced_loss = self.args.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, axis=0) except ValueError: reduced_loss = self.args.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, None) return reduced_loss, per_replica_logits def _prediction_loop( self, dataset: tf.data.Dataset, description: str, prediction_loss_only: Optional[bool] = None ) -> PredictionOutput: logger.info("***** Running %s *****", description) logger.info(" Batch size = %d", self.args.eval_batch_size) label_ids: np.ndarray = None preds: np.ndarray = None step: int = 1 for features, labels in dataset: step = tf.convert_to_tensor(step, dtype=tf.int64) loss, logits = self._evaluate_steps(features, labels) loss = tf.reduce_mean(loss) if not prediction_loss_only: if self.args.n_gpu > 1: for val in logits.values: if preds is None: preds = val.numpy() else: preds = np.append(preds, val.numpy(), axis=0) for val in labels.values: if label_ids is None: label_ids = val.numpy() else: label_ids = np.append(label_ids, val.numpy(), axis=0) else: if preds is None: preds = logits.numpy() else: preds = np.append(preds, logits.numpy(), axis=0) if label_ids is None: label_ids = labels.numpy() else: label_ids = np.append(label_ids, labels.numpy(), axis=0) step += 1 if self.compute_metrics is not None and preds is not None and label_ids is not None: metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids)) else: metrics = {} metrics["eval_loss"] = loss.numpy() for key in list(metrics.keys()): if not key.startswith("eval_"): metrics[f"eval_{key}"] = metrics.pop(key) return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics) def evaluate( self, eval_dataset: Optional[tf.data.Dataset] = None, prediction_loss_only: Optional[bool] = None ) -> Dict[str, float]: """ Prediction/evaluation loop, shared by `evaluate()` and `predict()`. """ if eval_dataset is None: eval_dataset = self.eval_dataset output = self._prediction_loop(eval_dataset, description="Evaluation") return output.metrics def train(self) -> None: """ Train method to train the model. """ if self.args.debug: tf.summary.trace_on(graph=True, profiler=True) self.gradient_accumulator.reset() iterations = self.optimizer.iterations if iterations.numpy() > 0: logger.info("Start the training from the last checkpoint") start_epoch = (iterations.numpy() // self.train_steps) + 1 else: start_epoch = 1 tf.summary.experimental.set_step(iterations) epochs = 1 if self.args.max_steps > 0 else self.args.num_train_epochs logger.info("***** Running training *****") logger.info(" Num examples = %d", self.num_train_examples) logger.info(" Num Epochs = %d", epochs) logger.info(" Total optimization steps = %d", self.train_steps) for epoch in range(start_epoch, int(epochs + 1)): for training_loss in self._training_steps(): step = iterations.numpy() if self.args.debug: with self.writer.as_default(): tf.summary.scalar("loss", training_loss, step=step) if step == 1 and self.args.debug: with self.writer.as_default(): tf.summary.trace_export(name="training", step=step, profiler_outdir=self.args.logging_dir) if self.args.evaluate_during_training and step % self.args.eval_steps == 0: logs = {} results = self.evaluate() for key, value in results.items(): eval_key = "eval_{}".format(key) logs[eval_key] = value if callable(self.optimizer.learning_rate): logs["learning_rate"] = self.optimizer.learning_rate(step).numpy() else: logs["learning_rate"] = self.optimizer.learning_rate.numpy() logger.info("Epoch {} Step {} Validation Metrics {}".format(epoch, step, logs)) with self.writer.as_default(): for k, v in logs.items(): tf.summary.scalar(k, v, step=step) if step % self.args.logging_steps == 0: logger.info("Epoch {} Step {} Train Loss {:.4f}".format(epoch, step, training_loss.numpy())) if step % self.args.save_steps == 0: ckpt_save_path = self.model.ckpt_manager.save() logger.info("Saving checkpoint for step {} at {}".format(step, ckpt_save_path)) if step % self.train_steps == 0: break def _training_steps(self): """ Returns a generator over training steps (i.e. parameters update). """ for i, loss in enumerate(self._accumulate_next_gradients()): if i % self.args.gradient_accumulation_steps == 0: self._apply_gradients() yield loss @tf.function def _apply_gradients(self): """Applies the gradients (cross-replica).""" self.args.strategy.experimental_run_v2(self._step) def _step(self): """Applies gradients and resets accumulation.""" gradient_scale = self.gradient_accumulator.step * self.args.strategy.num_replicas_in_sync gradients = [ gradient / tf.cast(gradient_scale, gradient.dtype) for gradient in self.gradient_accumulator.gradients ] gradients = [(tf.clip_by_value(grad, -self.args.max_grad_norm, self.args.max_grad_norm)) for grad in gradients] self.optimizer.apply_gradients(list(zip(gradients, self.model.trainable_variables))) self.gradient_accumulator.reset() def _accumulate_next_gradients(self): """Accumulates the gradients from the next element in dataset.""" iterator = iter(self.train_dataset) @tf.function def _accumulate_next(): per_replica_features, per_replica_labels = next(iterator) return self._accumulate_gradients(per_replica_features, per_replica_labels) while True: try: yield _accumulate_next() except tf.errors.OutOfRangeError: break def _accumulate_gradients(self, per_replica_features, per_replica_labels): """Accumulates the gradients across all the replica.""" per_replica_loss = self.args.strategy.experimental_run_v2( self._forward, args=(per_replica_features, per_replica_labels) ) try: reduced_loss = self.args.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, axis=0) except ValueError: reduced_loss = self.args.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, None) return reduced_loss def _forward(self, features, labels): """Forwards a training example and accumulates the gradients.""" per_example_loss, _ = self._run_model(features, labels, True) gradients = tf.gradients(per_example_loss, self.model.trainable_variables) gradients = [ g if g is not None else tf.zeros_like(v) for g, v in zip(gradients, self.model.trainable_variables) ] self.gradient_accumulator(gradients) return per_example_loss def _run_model(self, features, labels, training): """ Computes the loss of the given features and labels pair. Args: features: the batched features. labels: the batched labels. training: run the model in training mode or not """ if self.args.mode == "text-classification" or self.args.mode == "token-classification": logits = self.model(features, training=training)[0] else: logits = self.model(features, training=training) if self.args.mode == "token-classification": active_loss = tf.reshape(labels, (-1,)) != -1 reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss) labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss) loss = self.loss(labels, reduced_logits) elif self.args.mode == "question-answering": start_loss = self.loss(labels["start_position"], logits[0]) end_loss = self.loss(labels["end_position"], logits[1]) loss = (start_loss + end_loss) / 2.0 else: loss = self.loss(labels, logits) loss += sum(self.model.losses) * (1.0 / self.args.n_gpu) return loss, logits def predict(self, test_dataset: tf.data.Dataset) -> PredictionOutput: """ Run prediction and return predictions and potential metrics. Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method will also return metrics, like in evaluate(). Args: test_dataset: something similar to a PT Dataset. This is just temporary before to have a framework-agnostic approach for datasets. """ test_dataset = test_dataset.batch(self.args.eval_batch_size) test_dataset = self.args.strategy.experimental_distribute_dataset(test_dataset) return self._prediction_loop(test_dataset, description="Prediction") def save_model(self) -> None: """ Save the pretrained model and create a Tensorflow saved model. """ logger.info("Saving model in {}".format(self.args.output_dir)) path = os.path.join(self.args.output_dir, "saved_model") logger.info("Saving model in {}".format(path)) os.makedirs(path, exist_ok=True) self.model.save_pretrained(self.args.output_dir)
_run_model
Computes the loss of the given features and labels pair. Args: features: the batched features. labels: the batched labels. training: run the model in training mode or not
"""Tensorflow trainer class.""" import logging import math import os from typing import Callable, Dict, Optional import numpy as np import tensorflow as tf from .modeling_tf_utils import TFPreTrainedModel, shape_list from .optimization_tf import GradientAccumulator, create_optimizer from .trainer_utils import PREFIX_CHECKPOINT_DIR, EvalPrediction, PredictionOutput from .training_args_tf import TFTrainingArguments logger = logging.getLogger(__name__) class TFTrainer: model: TFPreTrainedModel args: TFTrainingArguments # something similar to a PT Dataset. # This is just temporary before to have # a framework-agnostic approach for datasets. train_dataset: Optional[tf.data.Dataset] eval_dataset: Optional[tf.data.Dataset] compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None prediction_loss_only: bool def __init__( self, model: TFPreTrainedModel, args: TFTrainingArguments, train_dataset: Optional[tf.data.Dataset] = None, eval_dataset: Optional[tf.data.Dataset] = None, compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None, prediction_loss_only=False, ): self.model = model self.args = args self.train_dataset = train_dataset self.eval_dataset = eval_dataset self.compute_metrics = compute_metrics self.prediction_loss_only = prediction_loss_only self.gradient_accumulator = GradientAccumulator() self._setup_training() def _setup_training(self) -> None: """ Setup the different steps to train a model: - check if all the data are given - create the proper strategy - create the features - prepare the model settings """ self._prepare_dataset() with self.args.strategy.scope(): self._create_optimizer() _ = self.optimizer.iterations self._set_loss_and_metric() self._create_checkpoint_manager() self._create_summary_writer() def _set_loss_and_metric(self) -> None: """ Create the training loss and metric with their name. Allowed names are those listed in the Tensorflow documentation and those contained in the transformers library. """ try: self.loss = tf.keras.losses.get( { "class_name": self.args.loss_name, "config": {"from_logits": True, "reduction": tf.keras.losses.Reduction.NONE}, } ) except TypeError: self.loss = tf.keras.losses.get( {"class_name": self.args.loss_name, "config": {"reduction": tf.keras.losses.Reduction.NONE}} ) def _create_summary_writer(self) -> None: """ Create a summary writer to be able to read the logs in Tensorboard. """ self.writer = tf.summary.create_file_writer(self.args.logging_dir) def _prepare_dataset(self) -> None: """ Prepare the training, validation and test data. """ if self.train_dataset is not None: self.num_train_examples = self.train_dataset.reduce(tf.constant(0), lambda x, _: x + 1).numpy() if self.args.max_steps > 0: self.train_steps = self.args.max_steps else: self.train_steps: int = math.ceil(self.num_train_examples / self.args.train_batch_size) self.train_dataset = ( self.train_dataset.cache() .shuffle(self.num_train_examples) .batch(self.args.train_batch_size) .prefetch(tf.data.experimental.AUTOTUNE) ) if self.args.max_steps > 0: self.train_dataset = self.train_dataset.repeat(-1) self.train_dataset = self.args.strategy.experimental_distribute_dataset(self.train_dataset) else: self.train_steps = 0 if self.eval_dataset is not None: self.eval_dataset = ( self.eval_dataset.batch(self.args.eval_batch_size).cache().prefetch(tf.data.experimental.AUTOTUNE) ) self.eval_dataset = self.args.strategy.experimental_distribute_dataset(self.eval_dataset) def _create_optimizer(self) -> None: """ Create the training optimizer with its name. Allowed names are those listed in the Tensorflow documentation and those contained in the transformers library. """ if self.args.optimizer_name == "adamw": self.optimizer = create_optimizer( self.args.learning_rate, self.train_steps, self.args.warmup_steps, self.args.end_lr ) else: try: self.optimizer = tf.keras.optimizers.get( { "class_name": self.args.optimizer_name, "config": {"learning_rate": self.args.learning_rate, "epsilon": self.args.adam_epsilon}, } ) except TypeError: # This is for the case where the optimizer is not Adam-like such as SGD self.optimizer = tf.keras.optimizers.get( {"class_name": self.args.optimizer_name, "config": {"learning_rate": self.args.learning_rate}} ) logger.info("Created an/a {} optimizer".format(self.args.optimizer_name)) def _create_checkpoint_manager(self, max_to_keep: int = 5, load_model: bool = True) -> None: """ Create a checkpoint manager in order to be able to make the training fault-tolerant. Args: max_to_keep: the maximum number of checkpoints to keep in the checkpoint path. load_model: if we want to start the training from the latest checkpoint. """ ckpt = tf.train.Checkpoint(optimizer=self.optimizer, model=self.model) self.model.ckpt_manager = tf.train.CheckpointManager(ckpt, PREFIX_CHECKPOINT_DIR, max_to_keep=max_to_keep) if load_model: ckpt.restore(self.model.ckpt_manager.latest_checkpoint).expect_partial() @tf.function def _evaluate_steps(self, per_replica_features, per_replica_labels): """ One step evaluation across replica. Args: per_replica_features: the batched features. per_replica_labels: the batched labels. Returns: The loss corresponding to the given batch. """ per_replica_loss, per_replica_logits = self.args.strategy.experimental_run_v2( self._run_model, args=(per_replica_features, per_replica_labels, False) ) try: reduced_loss = self.args.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, axis=0) except ValueError: reduced_loss = self.args.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, None) return reduced_loss, per_replica_logits def _prediction_loop( self, dataset: tf.data.Dataset, description: str, prediction_loss_only: Optional[bool] = None ) -> PredictionOutput: logger.info("***** Running %s *****", description) logger.info(" Batch size = %d", self.args.eval_batch_size) label_ids: np.ndarray = None preds: np.ndarray = None step: int = 1 for features, labels in dataset: step = tf.convert_to_tensor(step, dtype=tf.int64) loss, logits = self._evaluate_steps(features, labels) loss = tf.reduce_mean(loss) if not prediction_loss_only: if self.args.n_gpu > 1: for val in logits.values: if preds is None: preds = val.numpy() else: preds = np.append(preds, val.numpy(), axis=0) for val in labels.values: if label_ids is None: label_ids = val.numpy() else: label_ids = np.append(label_ids, val.numpy(), axis=0) else: if preds is None: preds = logits.numpy() else: preds = np.append(preds, logits.numpy(), axis=0) if label_ids is None: label_ids = labels.numpy() else: label_ids = np.append(label_ids, labels.numpy(), axis=0) step += 1 if self.compute_metrics is not None and preds is not None and label_ids is not None: metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids)) else: metrics = {} metrics["eval_loss"] = loss.numpy() for key in list(metrics.keys()): if not key.startswith("eval_"): metrics[f"eval_{key}"] = metrics.pop(key) return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics) def evaluate( self, eval_dataset: Optional[tf.data.Dataset] = None, prediction_loss_only: Optional[bool] = None ) -> Dict[str, float]: """ Prediction/evaluation loop, shared by `evaluate()` and `predict()`. """ if eval_dataset is None: eval_dataset = self.eval_dataset output = self._prediction_loop(eval_dataset, description="Evaluation") return output.metrics def train(self) -> None: """ Train method to train the model. """ if self.args.debug: tf.summary.trace_on(graph=True, profiler=True) self.gradient_accumulator.reset() iterations = self.optimizer.iterations if iterations.numpy() > 0: logger.info("Start the training from the last checkpoint") start_epoch = (iterations.numpy() // self.train_steps) + 1 else: start_epoch = 1 tf.summary.experimental.set_step(iterations) epochs = 1 if self.args.max_steps > 0 else self.args.num_train_epochs logger.info("***** Running training *****") logger.info(" Num examples = %d", self.num_train_examples) logger.info(" Num Epochs = %d", epochs) logger.info(" Total optimization steps = %d", self.train_steps) for epoch in range(start_epoch, int(epochs + 1)): for training_loss in self._training_steps(): step = iterations.numpy() if self.args.debug: with self.writer.as_default(): tf.summary.scalar("loss", training_loss, step=step) if step == 1 and self.args.debug: with self.writer.as_default(): tf.summary.trace_export(name="training", step=step, profiler_outdir=self.args.logging_dir) if self.args.evaluate_during_training and step % self.args.eval_steps == 0: logs = {} results = self.evaluate() for key, value in results.items(): eval_key = "eval_{}".format(key) logs[eval_key] = value if callable(self.optimizer.learning_rate): logs["learning_rate"] = self.optimizer.learning_rate(step).numpy() else: logs["learning_rate"] = self.optimizer.learning_rate.numpy() logger.info("Epoch {} Step {} Validation Metrics {}".format(epoch, step, logs)) with self.writer.as_default(): for k, v in logs.items(): tf.summary.scalar(k, v, step=step) if step % self.args.logging_steps == 0: logger.info("Epoch {} Step {} Train Loss {:.4f}".format(epoch, step, training_loss.numpy())) if step % self.args.save_steps == 0: ckpt_save_path = self.model.ckpt_manager.save() logger.info("Saving checkpoint for step {} at {}".format(step, ckpt_save_path)) if step % self.train_steps == 0: break def _training_steps(self): """ Returns a generator over training steps (i.e. parameters update). """ for i, loss in enumerate(self._accumulate_next_gradients()): if i % self.args.gradient_accumulation_steps == 0: self._apply_gradients() yield loss @tf.function def _apply_gradients(self): """Applies the gradients (cross-replica).""" self.args.strategy.experimental_run_v2(self._step) def _step(self): """Applies gradients and resets accumulation.""" gradient_scale = self.gradient_accumulator.step * self.args.strategy.num_replicas_in_sync gradients = [ gradient / tf.cast(gradient_scale, gradient.dtype) for gradient in self.gradient_accumulator.gradients ] gradients = [(tf.clip_by_value(grad, -self.args.max_grad_norm, self.args.max_grad_norm)) for grad in gradients] self.optimizer.apply_gradients(list(zip(gradients, self.model.trainable_variables))) self.gradient_accumulator.reset() def _accumulate_next_gradients(self): """Accumulates the gradients from the next element in dataset.""" iterator = iter(self.train_dataset) @tf.function def _accumulate_next(): per_replica_features, per_replica_labels = next(iterator) return self._accumulate_gradients(per_replica_features, per_replica_labels) while True: try: yield _accumulate_next() except tf.errors.OutOfRangeError: break def _accumulate_gradients(self, per_replica_features, per_replica_labels): """Accumulates the gradients across all the replica.""" per_replica_loss = self.args.strategy.experimental_run_v2( self._forward, args=(per_replica_features, per_replica_labels) ) try: reduced_loss = self.args.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, axis=0) except ValueError: reduced_loss = self.args.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, None) return reduced_loss def _forward(self, features, labels): """Forwards a training example and accumulates the gradients.""" per_example_loss, _ = self._run_model(features, labels, True) gradients = tf.gradients(per_example_loss, self.model.trainable_variables) gradients = [ g if g is not None else tf.zeros_like(v) for g, v in zip(gradients, self.model.trainable_variables) ] self.gradient_accumulator(gradients) return per_example_loss # MASKED: _run_model function (lines 383-410) def predict(self, test_dataset: tf.data.Dataset) -> PredictionOutput: """ Run prediction and return predictions and potential metrics. Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method will also return metrics, like in evaluate(). Args: test_dataset: something similar to a PT Dataset. This is just temporary before to have a framework-agnostic approach for datasets. """ test_dataset = test_dataset.batch(self.args.eval_batch_size) test_dataset = self.args.strategy.experimental_distribute_dataset(test_dataset) return self._prediction_loop(test_dataset, description="Prediction") def save_model(self) -> None: """ Save the pretrained model and create a Tensorflow saved model. """ logger.info("Saving model in {}".format(self.args.output_dir)) path = os.path.join(self.args.output_dir, "saved_model") logger.info("Saving model in {}".format(path)) os.makedirs(path, exist_ok=True) self.model.save_pretrained(self.args.output_dir)
def _run_model(self, features, labels, training): """ Computes the loss of the given features and labels pair. Args: features: the batched features. labels: the batched labels. training: run the model in training mode or not """ if self.args.mode == "text-classification" or self.args.mode == "token-classification": logits = self.model(features, training=training)[0] else: logits = self.model(features, training=training) if self.args.mode == "token-classification": active_loss = tf.reshape(labels, (-1,)) != -1 reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss) labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss) loss = self.loss(labels, reduced_logits) elif self.args.mode == "question-answering": start_loss = self.loss(labels["start_position"], logits[0]) end_loss = self.loss(labels["end_position"], logits[1]) loss = (start_loss + end_loss) / 2.0 else: loss = self.loss(labels, logits) loss += sum(self.model.losses) * (1.0 / self.args.n_gpu) return loss, logits
383
410
"""Tensorflow trainer class.""" import logging import math import os from typing import Callable, Dict, Optional import numpy as np import tensorflow as tf from .modeling_tf_utils import TFPreTrainedModel, shape_list from .optimization_tf import GradientAccumulator, create_optimizer from .trainer_utils import PREFIX_CHECKPOINT_DIR, EvalPrediction, PredictionOutput from .training_args_tf import TFTrainingArguments logger = logging.getLogger(__name__) class TFTrainer: model: TFPreTrainedModel args: TFTrainingArguments # something similar to a PT Dataset. # This is just temporary before to have # a framework-agnostic approach for datasets. train_dataset: Optional[tf.data.Dataset] eval_dataset: Optional[tf.data.Dataset] compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None prediction_loss_only: bool def __init__( self, model: TFPreTrainedModel, args: TFTrainingArguments, train_dataset: Optional[tf.data.Dataset] = None, eval_dataset: Optional[tf.data.Dataset] = None, compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None, prediction_loss_only=False, ): self.model = model self.args = args self.train_dataset = train_dataset self.eval_dataset = eval_dataset self.compute_metrics = compute_metrics self.prediction_loss_only = prediction_loss_only self.gradient_accumulator = GradientAccumulator() self._setup_training() def _setup_training(self) -> None: """ Setup the different steps to train a model: - check if all the data are given - create the proper strategy - create the features - prepare the model settings """ self._prepare_dataset() with self.args.strategy.scope(): self._create_optimizer() _ = self.optimizer.iterations self._set_loss_and_metric() self._create_checkpoint_manager() self._create_summary_writer() def _set_loss_and_metric(self) -> None: """ Create the training loss and metric with their name. Allowed names are those listed in the Tensorflow documentation and those contained in the transformers library. """ try: self.loss = tf.keras.losses.get( { "class_name": self.args.loss_name, "config": {"from_logits": True, "reduction": tf.keras.losses.Reduction.NONE}, } ) except TypeError: self.loss = tf.keras.losses.get( {"class_name": self.args.loss_name, "config": {"reduction": tf.keras.losses.Reduction.NONE}} ) def _create_summary_writer(self) -> None: """ Create a summary writer to be able to read the logs in Tensorboard. """ self.writer = tf.summary.create_file_writer(self.args.logging_dir) def _prepare_dataset(self) -> None: """ Prepare the training, validation and test data. """ if self.train_dataset is not None: self.num_train_examples = self.train_dataset.reduce(tf.constant(0), lambda x, _: x + 1).numpy() if self.args.max_steps > 0: self.train_steps = self.args.max_steps else: self.train_steps: int = math.ceil(self.num_train_examples / self.args.train_batch_size) self.train_dataset = ( self.train_dataset.cache() .shuffle(self.num_train_examples) .batch(self.args.train_batch_size) .prefetch(tf.data.experimental.AUTOTUNE) ) if self.args.max_steps > 0: self.train_dataset = self.train_dataset.repeat(-1) self.train_dataset = self.args.strategy.experimental_distribute_dataset(self.train_dataset) else: self.train_steps = 0 if self.eval_dataset is not None: self.eval_dataset = ( self.eval_dataset.batch(self.args.eval_batch_size).cache().prefetch(tf.data.experimental.AUTOTUNE) ) self.eval_dataset = self.args.strategy.experimental_distribute_dataset(self.eval_dataset) def _create_optimizer(self) -> None: """ Create the training optimizer with its name. Allowed names are those listed in the Tensorflow documentation and those contained in the transformers library. """ if self.args.optimizer_name == "adamw": self.optimizer = create_optimizer( self.args.learning_rate, self.train_steps, self.args.warmup_steps, self.args.end_lr ) else: try: self.optimizer = tf.keras.optimizers.get( { "class_name": self.args.optimizer_name, "config": {"learning_rate": self.args.learning_rate, "epsilon": self.args.adam_epsilon}, } ) except TypeError: # This is for the case where the optimizer is not Adam-like such as SGD self.optimizer = tf.keras.optimizers.get( {"class_name": self.args.optimizer_name, "config": {"learning_rate": self.args.learning_rate}} ) logger.info("Created an/a {} optimizer".format(self.args.optimizer_name)) def _create_checkpoint_manager(self, max_to_keep: int = 5, load_model: bool = True) -> None: """ Create a checkpoint manager in order to be able to make the training fault-tolerant. Args: max_to_keep: the maximum number of checkpoints to keep in the checkpoint path. load_model: if we want to start the training from the latest checkpoint. """ ckpt = tf.train.Checkpoint(optimizer=self.optimizer, model=self.model) self.model.ckpt_manager = tf.train.CheckpointManager(ckpt, PREFIX_CHECKPOINT_DIR, max_to_keep=max_to_keep) if load_model: ckpt.restore(self.model.ckpt_manager.latest_checkpoint).expect_partial() @tf.function def _evaluate_steps(self, per_replica_features, per_replica_labels): """ One step evaluation across replica. Args: per_replica_features: the batched features. per_replica_labels: the batched labels. Returns: The loss corresponding to the given batch. """ per_replica_loss, per_replica_logits = self.args.strategy.experimental_run_v2( self._run_model, args=(per_replica_features, per_replica_labels, False) ) try: reduced_loss = self.args.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, axis=0) except ValueError: reduced_loss = self.args.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, None) return reduced_loss, per_replica_logits def _prediction_loop( self, dataset: tf.data.Dataset, description: str, prediction_loss_only: Optional[bool] = None ) -> PredictionOutput: logger.info("***** Running %s *****", description) logger.info(" Batch size = %d", self.args.eval_batch_size) label_ids: np.ndarray = None preds: np.ndarray = None step: int = 1 for features, labels in dataset: step = tf.convert_to_tensor(step, dtype=tf.int64) loss, logits = self._evaluate_steps(features, labels) loss = tf.reduce_mean(loss) if not prediction_loss_only: if self.args.n_gpu > 1: for val in logits.values: if preds is None: preds = val.numpy() else: preds = np.append(preds, val.numpy(), axis=0) for val in labels.values: if label_ids is None: label_ids = val.numpy() else: label_ids = np.append(label_ids, val.numpy(), axis=0) else: if preds is None: preds = logits.numpy() else: preds = np.append(preds, logits.numpy(), axis=0) if label_ids is None: label_ids = labels.numpy() else: label_ids = np.append(label_ids, labels.numpy(), axis=0) step += 1 if self.compute_metrics is not None and preds is not None and label_ids is not None: metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids)) else: metrics = {} metrics["eval_loss"] = loss.numpy() for key in list(metrics.keys()): if not key.startswith("eval_"): metrics[f"eval_{key}"] = metrics.pop(key) return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics) def evaluate( self, eval_dataset: Optional[tf.data.Dataset] = None, prediction_loss_only: Optional[bool] = None ) -> Dict[str, float]: """ Prediction/evaluation loop, shared by `evaluate()` and `predict()`. """ if eval_dataset is None: eval_dataset = self.eval_dataset output = self._prediction_loop(eval_dataset, description="Evaluation") return output.metrics def train(self) -> None: """ Train method to train the model. """ if self.args.debug: tf.summary.trace_on(graph=True, profiler=True) self.gradient_accumulator.reset() iterations = self.optimizer.iterations if iterations.numpy() > 0: logger.info("Start the training from the last checkpoint") start_epoch = (iterations.numpy() // self.train_steps) + 1 else: start_epoch = 1 tf.summary.experimental.set_step(iterations) epochs = 1 if self.args.max_steps > 0 else self.args.num_train_epochs logger.info("***** Running training *****") logger.info(" Num examples = %d", self.num_train_examples) logger.info(" Num Epochs = %d", epochs) logger.info(" Total optimization steps = %d", self.train_steps) for epoch in range(start_epoch, int(epochs + 1)): for training_loss in self._training_steps(): step = iterations.numpy() if self.args.debug: with self.writer.as_default(): tf.summary.scalar("loss", training_loss, step=step) if step == 1 and self.args.debug: with self.writer.as_default(): tf.summary.trace_export(name="training", step=step, profiler_outdir=self.args.logging_dir) if self.args.evaluate_during_training and step % self.args.eval_steps == 0: logs = {} results = self.evaluate() for key, value in results.items(): eval_key = "eval_{}".format(key) logs[eval_key] = value if callable(self.optimizer.learning_rate): logs["learning_rate"] = self.optimizer.learning_rate(step).numpy() else: logs["learning_rate"] = self.optimizer.learning_rate.numpy() logger.info("Epoch {} Step {} Validation Metrics {}".format(epoch, step, logs)) with self.writer.as_default(): for k, v in logs.items(): tf.summary.scalar(k, v, step=step) if step % self.args.logging_steps == 0: logger.info("Epoch {} Step {} Train Loss {:.4f}".format(epoch, step, training_loss.numpy())) if step % self.args.save_steps == 0: ckpt_save_path = self.model.ckpt_manager.save() logger.info("Saving checkpoint for step {} at {}".format(step, ckpt_save_path)) if step % self.train_steps == 0: break def _training_steps(self): """ Returns a generator over training steps (i.e. parameters update). """ for i, loss in enumerate(self._accumulate_next_gradients()): if i % self.args.gradient_accumulation_steps == 0: self._apply_gradients() yield loss @tf.function def _apply_gradients(self): """Applies the gradients (cross-replica).""" self.args.strategy.experimental_run_v2(self._step) def _step(self): """Applies gradients and resets accumulation.""" gradient_scale = self.gradient_accumulator.step * self.args.strategy.num_replicas_in_sync gradients = [ gradient / tf.cast(gradient_scale, gradient.dtype) for gradient in self.gradient_accumulator.gradients ] gradients = [(tf.clip_by_value(grad, -self.args.max_grad_norm, self.args.max_grad_norm)) for grad in gradients] self.optimizer.apply_gradients(list(zip(gradients, self.model.trainable_variables))) self.gradient_accumulator.reset() def _accumulate_next_gradients(self): """Accumulates the gradients from the next element in dataset.""" iterator = iter(self.train_dataset) @tf.function def _accumulate_next(): per_replica_features, per_replica_labels = next(iterator) return self._accumulate_gradients(per_replica_features, per_replica_labels) while True: try: yield _accumulate_next() except tf.errors.OutOfRangeError: break def _accumulate_gradients(self, per_replica_features, per_replica_labels): """Accumulates the gradients across all the replica.""" per_replica_loss = self.args.strategy.experimental_run_v2( self._forward, args=(per_replica_features, per_replica_labels) ) try: reduced_loss = self.args.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, axis=0) except ValueError: reduced_loss = self.args.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, None) return reduced_loss def _forward(self, features, labels): """Forwards a training example and accumulates the gradients.""" per_example_loss, _ = self._run_model(features, labels, True) gradients = tf.gradients(per_example_loss, self.model.trainable_variables) gradients = [ g if g is not None else tf.zeros_like(v) for g, v in zip(gradients, self.model.trainable_variables) ] self.gradient_accumulator(gradients) return per_example_loss def _run_model(self, features, labels, training): """ Computes the loss of the given features and labels pair. Args: features: the batched features. labels: the batched labels. training: run the model in training mode or not """ if self.args.mode == "text-classification" or self.args.mode == "token-classification": logits = self.model(features, training=training)[0] else: logits = self.model(features, training=training) if self.args.mode == "token-classification": active_loss = tf.reshape(labels, (-1,)) != -1 reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss) labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss) loss = self.loss(labels, reduced_logits) elif self.args.mode == "question-answering": start_loss = self.loss(labels["start_position"], logits[0]) end_loss = self.loss(labels["end_position"], logits[1]) loss = (start_loss + end_loss) / 2.0 else: loss = self.loss(labels, logits) loss += sum(self.model.losses) * (1.0 / self.args.n_gpu) return loss, logits def predict(self, test_dataset: tf.data.Dataset) -> PredictionOutput: """ Run prediction and return predictions and potential metrics. Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method will also return metrics, like in evaluate(). Args: test_dataset: something similar to a PT Dataset. This is just temporary before to have a framework-agnostic approach for datasets. """ test_dataset = test_dataset.batch(self.args.eval_batch_size) test_dataset = self.args.strategy.experimental_distribute_dataset(test_dataset) return self._prediction_loop(test_dataset, description="Prediction") def save_model(self) -> None: """ Save the pretrained model and create a Tensorflow saved model. """ logger.info("Saving model in {}".format(self.args.output_dir)) path = os.path.join(self.args.output_dir, "saved_model") logger.info("Saving model in {}".format(path)) os.makedirs(path, exist_ok=True) self.model.save_pretrained(self.args.output_dir)
predict
Run prediction and return predictions and potential metrics. Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method will also return metrics, like in evaluate(). Args: test_dataset: something similar to a PT Dataset. This is just temporary before to have a framework-agnostic approach for datasets.
"""Tensorflow trainer class.""" import logging import math import os from typing import Callable, Dict, Optional import numpy as np import tensorflow as tf from .modeling_tf_utils import TFPreTrainedModel, shape_list from .optimization_tf import GradientAccumulator, create_optimizer from .trainer_utils import PREFIX_CHECKPOINT_DIR, EvalPrediction, PredictionOutput from .training_args_tf import TFTrainingArguments logger = logging.getLogger(__name__) class TFTrainer: model: TFPreTrainedModel args: TFTrainingArguments # something similar to a PT Dataset. # This is just temporary before to have # a framework-agnostic approach for datasets. train_dataset: Optional[tf.data.Dataset] eval_dataset: Optional[tf.data.Dataset] compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None prediction_loss_only: bool def __init__( self, model: TFPreTrainedModel, args: TFTrainingArguments, train_dataset: Optional[tf.data.Dataset] = None, eval_dataset: Optional[tf.data.Dataset] = None, compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None, prediction_loss_only=False, ): self.model = model self.args = args self.train_dataset = train_dataset self.eval_dataset = eval_dataset self.compute_metrics = compute_metrics self.prediction_loss_only = prediction_loss_only self.gradient_accumulator = GradientAccumulator() self._setup_training() def _setup_training(self) -> None: """ Setup the different steps to train a model: - check if all the data are given - create the proper strategy - create the features - prepare the model settings """ self._prepare_dataset() with self.args.strategy.scope(): self._create_optimizer() _ = self.optimizer.iterations self._set_loss_and_metric() self._create_checkpoint_manager() self._create_summary_writer() def _set_loss_and_metric(self) -> None: """ Create the training loss and metric with their name. Allowed names are those listed in the Tensorflow documentation and those contained in the transformers library. """ try: self.loss = tf.keras.losses.get( { "class_name": self.args.loss_name, "config": {"from_logits": True, "reduction": tf.keras.losses.Reduction.NONE}, } ) except TypeError: self.loss = tf.keras.losses.get( {"class_name": self.args.loss_name, "config": {"reduction": tf.keras.losses.Reduction.NONE}} ) def _create_summary_writer(self) -> None: """ Create a summary writer to be able to read the logs in Tensorboard. """ self.writer = tf.summary.create_file_writer(self.args.logging_dir) def _prepare_dataset(self) -> None: """ Prepare the training, validation and test data. """ if self.train_dataset is not None: self.num_train_examples = self.train_dataset.reduce(tf.constant(0), lambda x, _: x + 1).numpy() if self.args.max_steps > 0: self.train_steps = self.args.max_steps else: self.train_steps: int = math.ceil(self.num_train_examples / self.args.train_batch_size) self.train_dataset = ( self.train_dataset.cache() .shuffle(self.num_train_examples) .batch(self.args.train_batch_size) .prefetch(tf.data.experimental.AUTOTUNE) ) if self.args.max_steps > 0: self.train_dataset = self.train_dataset.repeat(-1) self.train_dataset = self.args.strategy.experimental_distribute_dataset(self.train_dataset) else: self.train_steps = 0 if self.eval_dataset is not None: self.eval_dataset = ( self.eval_dataset.batch(self.args.eval_batch_size).cache().prefetch(tf.data.experimental.AUTOTUNE) ) self.eval_dataset = self.args.strategy.experimental_distribute_dataset(self.eval_dataset) def _create_optimizer(self) -> None: """ Create the training optimizer with its name. Allowed names are those listed in the Tensorflow documentation and those contained in the transformers library. """ if self.args.optimizer_name == "adamw": self.optimizer = create_optimizer( self.args.learning_rate, self.train_steps, self.args.warmup_steps, self.args.end_lr ) else: try: self.optimizer = tf.keras.optimizers.get( { "class_name": self.args.optimizer_name, "config": {"learning_rate": self.args.learning_rate, "epsilon": self.args.adam_epsilon}, } ) except TypeError: # This is for the case where the optimizer is not Adam-like such as SGD self.optimizer = tf.keras.optimizers.get( {"class_name": self.args.optimizer_name, "config": {"learning_rate": self.args.learning_rate}} ) logger.info("Created an/a {} optimizer".format(self.args.optimizer_name)) def _create_checkpoint_manager(self, max_to_keep: int = 5, load_model: bool = True) -> None: """ Create a checkpoint manager in order to be able to make the training fault-tolerant. Args: max_to_keep: the maximum number of checkpoints to keep in the checkpoint path. load_model: if we want to start the training from the latest checkpoint. """ ckpt = tf.train.Checkpoint(optimizer=self.optimizer, model=self.model) self.model.ckpt_manager = tf.train.CheckpointManager(ckpt, PREFIX_CHECKPOINT_DIR, max_to_keep=max_to_keep) if load_model: ckpt.restore(self.model.ckpt_manager.latest_checkpoint).expect_partial() @tf.function def _evaluate_steps(self, per_replica_features, per_replica_labels): """ One step evaluation across replica. Args: per_replica_features: the batched features. per_replica_labels: the batched labels. Returns: The loss corresponding to the given batch. """ per_replica_loss, per_replica_logits = self.args.strategy.experimental_run_v2( self._run_model, args=(per_replica_features, per_replica_labels, False) ) try: reduced_loss = self.args.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, axis=0) except ValueError: reduced_loss = self.args.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, None) return reduced_loss, per_replica_logits def _prediction_loop( self, dataset: tf.data.Dataset, description: str, prediction_loss_only: Optional[bool] = None ) -> PredictionOutput: logger.info("***** Running %s *****", description) logger.info(" Batch size = %d", self.args.eval_batch_size) label_ids: np.ndarray = None preds: np.ndarray = None step: int = 1 for features, labels in dataset: step = tf.convert_to_tensor(step, dtype=tf.int64) loss, logits = self._evaluate_steps(features, labels) loss = tf.reduce_mean(loss) if not prediction_loss_only: if self.args.n_gpu > 1: for val in logits.values: if preds is None: preds = val.numpy() else: preds = np.append(preds, val.numpy(), axis=0) for val in labels.values: if label_ids is None: label_ids = val.numpy() else: label_ids = np.append(label_ids, val.numpy(), axis=0) else: if preds is None: preds = logits.numpy() else: preds = np.append(preds, logits.numpy(), axis=0) if label_ids is None: label_ids = labels.numpy() else: label_ids = np.append(label_ids, labels.numpy(), axis=0) step += 1 if self.compute_metrics is not None and preds is not None and label_ids is not None: metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids)) else: metrics = {} metrics["eval_loss"] = loss.numpy() for key in list(metrics.keys()): if not key.startswith("eval_"): metrics[f"eval_{key}"] = metrics.pop(key) return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics) def evaluate( self, eval_dataset: Optional[tf.data.Dataset] = None, prediction_loss_only: Optional[bool] = None ) -> Dict[str, float]: """ Prediction/evaluation loop, shared by `evaluate()` and `predict()`. """ if eval_dataset is None: eval_dataset = self.eval_dataset output = self._prediction_loop(eval_dataset, description="Evaluation") return output.metrics def train(self) -> None: """ Train method to train the model. """ if self.args.debug: tf.summary.trace_on(graph=True, profiler=True) self.gradient_accumulator.reset() iterations = self.optimizer.iterations if iterations.numpy() > 0: logger.info("Start the training from the last checkpoint") start_epoch = (iterations.numpy() // self.train_steps) + 1 else: start_epoch = 1 tf.summary.experimental.set_step(iterations) epochs = 1 if self.args.max_steps > 0 else self.args.num_train_epochs logger.info("***** Running training *****") logger.info(" Num examples = %d", self.num_train_examples) logger.info(" Num Epochs = %d", epochs) logger.info(" Total optimization steps = %d", self.train_steps) for epoch in range(start_epoch, int(epochs + 1)): for training_loss in self._training_steps(): step = iterations.numpy() if self.args.debug: with self.writer.as_default(): tf.summary.scalar("loss", training_loss, step=step) if step == 1 and self.args.debug: with self.writer.as_default(): tf.summary.trace_export(name="training", step=step, profiler_outdir=self.args.logging_dir) if self.args.evaluate_during_training and step % self.args.eval_steps == 0: logs = {} results = self.evaluate() for key, value in results.items(): eval_key = "eval_{}".format(key) logs[eval_key] = value if callable(self.optimizer.learning_rate): logs["learning_rate"] = self.optimizer.learning_rate(step).numpy() else: logs["learning_rate"] = self.optimizer.learning_rate.numpy() logger.info("Epoch {} Step {} Validation Metrics {}".format(epoch, step, logs)) with self.writer.as_default(): for k, v in logs.items(): tf.summary.scalar(k, v, step=step) if step % self.args.logging_steps == 0: logger.info("Epoch {} Step {} Train Loss {:.4f}".format(epoch, step, training_loss.numpy())) if step % self.args.save_steps == 0: ckpt_save_path = self.model.ckpt_manager.save() logger.info("Saving checkpoint for step {} at {}".format(step, ckpt_save_path)) if step % self.train_steps == 0: break def _training_steps(self): """ Returns a generator over training steps (i.e. parameters update). """ for i, loss in enumerate(self._accumulate_next_gradients()): if i % self.args.gradient_accumulation_steps == 0: self._apply_gradients() yield loss @tf.function def _apply_gradients(self): """Applies the gradients (cross-replica).""" self.args.strategy.experimental_run_v2(self._step) def _step(self): """Applies gradients and resets accumulation.""" gradient_scale = self.gradient_accumulator.step * self.args.strategy.num_replicas_in_sync gradients = [ gradient / tf.cast(gradient_scale, gradient.dtype) for gradient in self.gradient_accumulator.gradients ] gradients = [(tf.clip_by_value(grad, -self.args.max_grad_norm, self.args.max_grad_norm)) for grad in gradients] self.optimizer.apply_gradients(list(zip(gradients, self.model.trainable_variables))) self.gradient_accumulator.reset() def _accumulate_next_gradients(self): """Accumulates the gradients from the next element in dataset.""" iterator = iter(self.train_dataset) @tf.function def _accumulate_next(): per_replica_features, per_replica_labels = next(iterator) return self._accumulate_gradients(per_replica_features, per_replica_labels) while True: try: yield _accumulate_next() except tf.errors.OutOfRangeError: break def _accumulate_gradients(self, per_replica_features, per_replica_labels): """Accumulates the gradients across all the replica.""" per_replica_loss = self.args.strategy.experimental_run_v2( self._forward, args=(per_replica_features, per_replica_labels) ) try: reduced_loss = self.args.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, axis=0) except ValueError: reduced_loss = self.args.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, None) return reduced_loss def _forward(self, features, labels): """Forwards a training example and accumulates the gradients.""" per_example_loss, _ = self._run_model(features, labels, True) gradients = tf.gradients(per_example_loss, self.model.trainable_variables) gradients = [ g if g is not None else tf.zeros_like(v) for g, v in zip(gradients, self.model.trainable_variables) ] self.gradient_accumulator(gradients) return per_example_loss def _run_model(self, features, labels, training): """ Computes the loss of the given features and labels pair. Args: features: the batched features. labels: the batched labels. training: run the model in training mode or not """ if self.args.mode == "text-classification" or self.args.mode == "token-classification": logits = self.model(features, training=training)[0] else: logits = self.model(features, training=training) if self.args.mode == "token-classification": active_loss = tf.reshape(labels, (-1,)) != -1 reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss) labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss) loss = self.loss(labels, reduced_logits) elif self.args.mode == "question-answering": start_loss = self.loss(labels["start_position"], logits[0]) end_loss = self.loss(labels["end_position"], logits[1]) loss = (start_loss + end_loss) / 2.0 else: loss = self.loss(labels, logits) loss += sum(self.model.losses) * (1.0 / self.args.n_gpu) return loss, logits # MASKED: predict function (lines 412-424) def save_model(self) -> None: """ Save the pretrained model and create a Tensorflow saved model. """ logger.info("Saving model in {}".format(self.args.output_dir)) path = os.path.join(self.args.output_dir, "saved_model") logger.info("Saving model in {}".format(path)) os.makedirs(path, exist_ok=True) self.model.save_pretrained(self.args.output_dir)
def predict(self, test_dataset: tf.data.Dataset) -> PredictionOutput: """ Run prediction and return predictions and potential metrics. Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method will also return metrics, like in evaluate(). Args: test_dataset: something similar to a PT Dataset. This is just temporary before to have a framework-agnostic approach for datasets. """ test_dataset = test_dataset.batch(self.args.eval_batch_size) test_dataset = self.args.strategy.experimental_distribute_dataset(test_dataset) return self._prediction_loop(test_dataset, description="Prediction")
412
424
"""Tensorflow trainer class.""" import logging import math import os from typing import Callable, Dict, Optional import numpy as np import tensorflow as tf from .modeling_tf_utils import TFPreTrainedModel, shape_list from .optimization_tf import GradientAccumulator, create_optimizer from .trainer_utils import PREFIX_CHECKPOINT_DIR, EvalPrediction, PredictionOutput from .training_args_tf import TFTrainingArguments logger = logging.getLogger(__name__) class TFTrainer: model: TFPreTrainedModel args: TFTrainingArguments # something similar to a PT Dataset. # This is just temporary before to have # a framework-agnostic approach for datasets. train_dataset: Optional[tf.data.Dataset] eval_dataset: Optional[tf.data.Dataset] compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None prediction_loss_only: bool def __init__( self, model: TFPreTrainedModel, args: TFTrainingArguments, train_dataset: Optional[tf.data.Dataset] = None, eval_dataset: Optional[tf.data.Dataset] = None, compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None, prediction_loss_only=False, ): self.model = model self.args = args self.train_dataset = train_dataset self.eval_dataset = eval_dataset self.compute_metrics = compute_metrics self.prediction_loss_only = prediction_loss_only self.gradient_accumulator = GradientAccumulator() self._setup_training() def _setup_training(self) -> None: """ Setup the different steps to train a model: - check if all the data are given - create the proper strategy - create the features - prepare the model settings """ self._prepare_dataset() with self.args.strategy.scope(): self._create_optimizer() _ = self.optimizer.iterations self._set_loss_and_metric() self._create_checkpoint_manager() self._create_summary_writer() def _set_loss_and_metric(self) -> None: """ Create the training loss and metric with their name. Allowed names are those listed in the Tensorflow documentation and those contained in the transformers library. """ try: self.loss = tf.keras.losses.get( { "class_name": self.args.loss_name, "config": {"from_logits": True, "reduction": tf.keras.losses.Reduction.NONE}, } ) except TypeError: self.loss = tf.keras.losses.get( {"class_name": self.args.loss_name, "config": {"reduction": tf.keras.losses.Reduction.NONE}} ) def _create_summary_writer(self) -> None: """ Create a summary writer to be able to read the logs in Tensorboard. """ self.writer = tf.summary.create_file_writer(self.args.logging_dir) def _prepare_dataset(self) -> None: """ Prepare the training, validation and test data. """ if self.train_dataset is not None: self.num_train_examples = self.train_dataset.reduce(tf.constant(0), lambda x, _: x + 1).numpy() if self.args.max_steps > 0: self.train_steps = self.args.max_steps else: self.train_steps: int = math.ceil(self.num_train_examples / self.args.train_batch_size) self.train_dataset = ( self.train_dataset.cache() .shuffle(self.num_train_examples) .batch(self.args.train_batch_size) .prefetch(tf.data.experimental.AUTOTUNE) ) if self.args.max_steps > 0: self.train_dataset = self.train_dataset.repeat(-1) self.train_dataset = self.args.strategy.experimental_distribute_dataset(self.train_dataset) else: self.train_steps = 0 if self.eval_dataset is not None: self.eval_dataset = ( self.eval_dataset.batch(self.args.eval_batch_size).cache().prefetch(tf.data.experimental.AUTOTUNE) ) self.eval_dataset = self.args.strategy.experimental_distribute_dataset(self.eval_dataset) def _create_optimizer(self) -> None: """ Create the training optimizer with its name. Allowed names are those listed in the Tensorflow documentation and those contained in the transformers library. """ if self.args.optimizer_name == "adamw": self.optimizer = create_optimizer( self.args.learning_rate, self.train_steps, self.args.warmup_steps, self.args.end_lr ) else: try: self.optimizer = tf.keras.optimizers.get( { "class_name": self.args.optimizer_name, "config": {"learning_rate": self.args.learning_rate, "epsilon": self.args.adam_epsilon}, } ) except TypeError: # This is for the case where the optimizer is not Adam-like such as SGD self.optimizer = tf.keras.optimizers.get( {"class_name": self.args.optimizer_name, "config": {"learning_rate": self.args.learning_rate}} ) logger.info("Created an/a {} optimizer".format(self.args.optimizer_name)) def _create_checkpoint_manager(self, max_to_keep: int = 5, load_model: bool = True) -> None: """ Create a checkpoint manager in order to be able to make the training fault-tolerant. Args: max_to_keep: the maximum number of checkpoints to keep in the checkpoint path. load_model: if we want to start the training from the latest checkpoint. """ ckpt = tf.train.Checkpoint(optimizer=self.optimizer, model=self.model) self.model.ckpt_manager = tf.train.CheckpointManager(ckpt, PREFIX_CHECKPOINT_DIR, max_to_keep=max_to_keep) if load_model: ckpt.restore(self.model.ckpt_manager.latest_checkpoint).expect_partial() @tf.function def _evaluate_steps(self, per_replica_features, per_replica_labels): """ One step evaluation across replica. Args: per_replica_features: the batched features. per_replica_labels: the batched labels. Returns: The loss corresponding to the given batch. """ per_replica_loss, per_replica_logits = self.args.strategy.experimental_run_v2( self._run_model, args=(per_replica_features, per_replica_labels, False) ) try: reduced_loss = self.args.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, axis=0) except ValueError: reduced_loss = self.args.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, None) return reduced_loss, per_replica_logits def _prediction_loop( self, dataset: tf.data.Dataset, description: str, prediction_loss_only: Optional[bool] = None ) -> PredictionOutput: logger.info("***** Running %s *****", description) logger.info(" Batch size = %d", self.args.eval_batch_size) label_ids: np.ndarray = None preds: np.ndarray = None step: int = 1 for features, labels in dataset: step = tf.convert_to_tensor(step, dtype=tf.int64) loss, logits = self._evaluate_steps(features, labels) loss = tf.reduce_mean(loss) if not prediction_loss_only: if self.args.n_gpu > 1: for val in logits.values: if preds is None: preds = val.numpy() else: preds = np.append(preds, val.numpy(), axis=0) for val in labels.values: if label_ids is None: label_ids = val.numpy() else: label_ids = np.append(label_ids, val.numpy(), axis=0) else: if preds is None: preds = logits.numpy() else: preds = np.append(preds, logits.numpy(), axis=0) if label_ids is None: label_ids = labels.numpy() else: label_ids = np.append(label_ids, labels.numpy(), axis=0) step += 1 if self.compute_metrics is not None and preds is not None and label_ids is not None: metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids)) else: metrics = {} metrics["eval_loss"] = loss.numpy() for key in list(metrics.keys()): if not key.startswith("eval_"): metrics[f"eval_{key}"] = metrics.pop(key) return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics) def evaluate( self, eval_dataset: Optional[tf.data.Dataset] = None, prediction_loss_only: Optional[bool] = None ) -> Dict[str, float]: """ Prediction/evaluation loop, shared by `evaluate()` and `predict()`. """ if eval_dataset is None: eval_dataset = self.eval_dataset output = self._prediction_loop(eval_dataset, description="Evaluation") return output.metrics def train(self) -> None: """ Train method to train the model. """ if self.args.debug: tf.summary.trace_on(graph=True, profiler=True) self.gradient_accumulator.reset() iterations = self.optimizer.iterations if iterations.numpy() > 0: logger.info("Start the training from the last checkpoint") start_epoch = (iterations.numpy() // self.train_steps) + 1 else: start_epoch = 1 tf.summary.experimental.set_step(iterations) epochs = 1 if self.args.max_steps > 0 else self.args.num_train_epochs logger.info("***** Running training *****") logger.info(" Num examples = %d", self.num_train_examples) logger.info(" Num Epochs = %d", epochs) logger.info(" Total optimization steps = %d", self.train_steps) for epoch in range(start_epoch, int(epochs + 1)): for training_loss in self._training_steps(): step = iterations.numpy() if self.args.debug: with self.writer.as_default(): tf.summary.scalar("loss", training_loss, step=step) if step == 1 and self.args.debug: with self.writer.as_default(): tf.summary.trace_export(name="training", step=step, profiler_outdir=self.args.logging_dir) if self.args.evaluate_during_training and step % self.args.eval_steps == 0: logs = {} results = self.evaluate() for key, value in results.items(): eval_key = "eval_{}".format(key) logs[eval_key] = value if callable(self.optimizer.learning_rate): logs["learning_rate"] = self.optimizer.learning_rate(step).numpy() else: logs["learning_rate"] = self.optimizer.learning_rate.numpy() logger.info("Epoch {} Step {} Validation Metrics {}".format(epoch, step, logs)) with self.writer.as_default(): for k, v in logs.items(): tf.summary.scalar(k, v, step=step) if step % self.args.logging_steps == 0: logger.info("Epoch {} Step {} Train Loss {:.4f}".format(epoch, step, training_loss.numpy())) if step % self.args.save_steps == 0: ckpt_save_path = self.model.ckpt_manager.save() logger.info("Saving checkpoint for step {} at {}".format(step, ckpt_save_path)) if step % self.train_steps == 0: break def _training_steps(self): """ Returns a generator over training steps (i.e. parameters update). """ for i, loss in enumerate(self._accumulate_next_gradients()): if i % self.args.gradient_accumulation_steps == 0: self._apply_gradients() yield loss @tf.function def _apply_gradients(self): """Applies the gradients (cross-replica).""" self.args.strategy.experimental_run_v2(self._step) def _step(self): """Applies gradients and resets accumulation.""" gradient_scale = self.gradient_accumulator.step * self.args.strategy.num_replicas_in_sync gradients = [ gradient / tf.cast(gradient_scale, gradient.dtype) for gradient in self.gradient_accumulator.gradients ] gradients = [(tf.clip_by_value(grad, -self.args.max_grad_norm, self.args.max_grad_norm)) for grad in gradients] self.optimizer.apply_gradients(list(zip(gradients, self.model.trainable_variables))) self.gradient_accumulator.reset() def _accumulate_next_gradients(self): """Accumulates the gradients from the next element in dataset.""" iterator = iter(self.train_dataset) @tf.function def _accumulate_next(): per_replica_features, per_replica_labels = next(iterator) return self._accumulate_gradients(per_replica_features, per_replica_labels) while True: try: yield _accumulate_next() except tf.errors.OutOfRangeError: break def _accumulate_gradients(self, per_replica_features, per_replica_labels): """Accumulates the gradients across all the replica.""" per_replica_loss = self.args.strategy.experimental_run_v2( self._forward, args=(per_replica_features, per_replica_labels) ) try: reduced_loss = self.args.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, axis=0) except ValueError: reduced_loss = self.args.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, None) return reduced_loss def _forward(self, features, labels): """Forwards a training example and accumulates the gradients.""" per_example_loss, _ = self._run_model(features, labels, True) gradients = tf.gradients(per_example_loss, self.model.trainable_variables) gradients = [ g if g is not None else tf.zeros_like(v) for g, v in zip(gradients, self.model.trainable_variables) ] self.gradient_accumulator(gradients) return per_example_loss def _run_model(self, features, labels, training): """ Computes the loss of the given features and labels pair. Args: features: the batched features. labels: the batched labels. training: run the model in training mode or not """ if self.args.mode == "text-classification" or self.args.mode == "token-classification": logits = self.model(features, training=training)[0] else: logits = self.model(features, training=training) if self.args.mode == "token-classification": active_loss = tf.reshape(labels, (-1,)) != -1 reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss) labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss) loss = self.loss(labels, reduced_logits) elif self.args.mode == "question-answering": start_loss = self.loss(labels["start_position"], logits[0]) end_loss = self.loss(labels["end_position"], logits[1]) loss = (start_loss + end_loss) / 2.0 else: loss = self.loss(labels, logits) loss += sum(self.model.losses) * (1.0 / self.args.n_gpu) return loss, logits def predict(self, test_dataset: tf.data.Dataset) -> PredictionOutput: """ Run prediction and return predictions and potential metrics. Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method will also return metrics, like in evaluate(). Args: test_dataset: something similar to a PT Dataset. This is just temporary before to have a framework-agnostic approach for datasets. """ test_dataset = test_dataset.batch(self.args.eval_batch_size) test_dataset = self.args.strategy.experimental_distribute_dataset(test_dataset) return self._prediction_loop(test_dataset, description="Prediction") def save_model(self) -> None: """ Save the pretrained model and create a Tensorflow saved model. """ logger.info("Saving model in {}".format(self.args.output_dir)) path = os.path.join(self.args.output_dir, "saved_model") logger.info("Saving model in {}".format(path)) os.makedirs(path, exist_ok=True) self.model.save_pretrained(self.args.output_dir)
create_tenant
Creates a new tenant. Note this route only works when run against Prefect Server. Args: - name (str): the name of the tenant to create - slug (str, optional): the slug of the tenant to create; defaults to name Returns: - str: the ID of the newly created tenant, or the ID of the currently active tenant Raises: - ValueError: if run against Prefect Cloud
import datetime import json import os import re import time import uuid import warnings from pathlib import Path from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Union from urllib.parse import urljoin # if simplejson is installed, `requests` defaults to using it instead of json # this allows the client to gracefully handle either json or simplejson try: from simplejson.errors import JSONDecodeError except ImportError: from json.decoder import JSONDecodeError import pendulum import toml from slugify import slugify import prefect from prefect.utilities.exceptions import ( AuthorizationError, ClientError, VersionLockError, ) from prefect.utilities.graphql import ( EnumValue, GraphQLResult, compress, parse_graphql, with_args, ) from prefect.utilities.logging import create_diagnostic_logger if TYPE_CHECKING: from prefect.core import Flow import requests JSONLike = Union[bool, dict, list, str, int, float, None] # type definitions for GraphQL results TaskRunInfoResult = NamedTuple( "TaskRunInfoResult", [ ("id", str), ("task_id", str), ("task_slug", str), ("version", int), ("state", "prefect.engine.state.State"), ], ) FlowRunInfoResult = NamedTuple( "FlowRunInfoResult", [ ("id", str), ("name", str), ("flow_id", str), ("parameters", Dict[str, Any]), ("context", Dict[str, Any]), ("version", int), ("scheduled_start_time", datetime.datetime), ("state", "prefect.engine.state.State"), ("task_runs", List[TaskRunInfoResult]), ], ) class Client: """ Client for communication with Prefect Cloud If the arguments aren't specified the client initialization first checks the prefect configuration and if the server is not set there it checks the current context. The token will only be present in the current context. Args: - api_server (str, optional): the URL to send all GraphQL requests to; if not provided, will be pulled from `cloud.graphql` config var - api_token (str, optional): a Prefect Cloud API token, taken from `config.cloud.auth_token` if not provided. If this token is USER-scoped, it may be used to log in to any tenant that the user is a member of. In that case, ephemeral JWTs will be loaded as necessary. Otherwise, the API token itself will be used as authorization. """ def __init__(self, api_server: str = None, api_token: str = None): self._access_token = None self._refresh_token = None self._access_token_expires_at = pendulum.now() self._active_tenant_id = None self._attached_headers = {} # type: Dict[str, str] self.logger = create_diagnostic_logger("Diagnostics") # store api server self.api_server = api_server or prefect.context.config.cloud.get("graphql") # store api token self._api_token = api_token or prefect.context.config.cloud.get( "auth_token", None ) if prefect.config.backend == "cloud": if not self._api_token: # if no api token was passed, attempt to load state from local storage settings = self._load_local_settings() self._api_token = settings.get("api_token") if self._api_token: self._active_tenant_id = settings.get("active_tenant_id") if self._active_tenant_id: try: self.login_to_tenant(tenant_id=self._active_tenant_id) except AuthorizationError: # if an authorization error is raised, then the token is invalid and should # be cleared self.logout_from_tenant() else: # TODO: Separate put this functionality and clean up initial tenant access handling if not self._active_tenant_id: tenant_info = self.graphql({"query": {"tenant": {"id"}}}) if tenant_info.data.tenant: self._active_tenant_id = tenant_info.data.tenant[0].id # MASKED: create_tenant function (lines 128-159) # ------------------------------------------------------------------------- # Utilities def get( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and GET request Args: - path (str): the path of the API url. For example, to GET http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the GET request to; defaults to `self.api_server` - headers (dict, optional): Headers to pass with the request - params (dict): GET parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="GET", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def post( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and POST request Args: - path (str): the path of the API url. For example, to POST http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the POST request to; defaults to `self.api_server` - headers(dict): headers to pass with the request - params (dict): POST parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="POST", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def graphql( self, query: Any, raise_on_error: bool = True, headers: Dict[str, str] = None, variables: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> GraphQLResult: """ Convenience function for running queries against the Prefect GraphQL API Args: - query (Any): A representation of a graphql query to be executed. It will be parsed by prefect.utilities.graphql.parse_graphql(). - raise_on_error (bool): if True, a `ClientError` will be raised if the GraphQL returns any `errors`. - headers (dict): any additional headers that should be passed as part of the request - variables (dict): Variables to be filled into a query with the key being equivalent to the variables that are accepted by the query - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Data returned from the GraphQL query Raises: - ClientError if there are errors raised by the GraphQL mutation """ result = self.post( path="", server=self.api_server, headers=headers, params=dict(query=parse_graphql(query), variables=json.dumps(variables)), token=token, retry_on_api_error=retry_on_api_error, ) if raise_on_error and "errors" in result: if "UNAUTHENTICATED" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif "Malformed Authorization header" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif ( result["errors"][0].get("extensions", {}).get("code") == "VERSION_LOCKING_ERROR" ): raise VersionLockError(result["errors"]) raise ClientError(result["errors"]) else: return GraphQLResult(result) # type: ignore def _send_request( self, session: "requests.Session", method: str, url: str, params: Dict[str, JSONLike] = None, headers: dict = None, ) -> "requests.models.Response": if prefect.context.config.cloud.get("diagnostics") is True: self.logger.debug(f"Preparing request to {url}") clean_headers = { head: re.sub("Bearer .*", "Bearer XXXX", val) for head, val in headers.items() # type: ignore } self.logger.debug(f"Headers: {clean_headers}") self.logger.debug(f"Request: {params}") start_time = time.time() if method == "GET": response = session.get(url, headers=headers, params=params, timeout=30) elif method == "POST": response = session.post(url, headers=headers, json=params, timeout=30) elif method == "DELETE": response = session.delete(url, headers=headers, timeout=30) else: raise ValueError("Invalid method: {}".format(method)) if prefect.context.config.cloud.get("diagnostics") is True: end_time = time.time() self.logger.debug(f"Response: {response.json()}") self.logger.debug( f"Request duration: {round(end_time - start_time, 4)} seconds" ) # Check if request returned a successful status response.raise_for_status() return response def _request( self, method: str, path: str, params: Dict[str, JSONLike] = None, server: str = None, headers: dict = None, token: str = None, retry_on_api_error: bool = True, ) -> "requests.models.Response": """ Runs any specified request (GET, POST, DELETE) against the server Args: - method (str): The type of request to be made (GET, POST, DELETE) - path (str): Path of the API URL - params (dict, optional): Parameters used for the request - server (str, optional): The server to make requests against, base API server is used if not specified - headers (dict, optional): Headers to pass with the request - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - requests.models.Response: The response returned from the request Raises: - ClientError: if the client token is not in the context (due to not being logged in) - ValueError: if a method is specified outside of the accepted GET, POST, DELETE - requests.HTTPError: if a status code is returned that is not `200` or `401` """ if server is None: server = self.api_server assert isinstance(server, str) # mypy assert if token is None: token = self.get_auth_token() # 'import requests' is expensive time-wise, we should do this just-in-time to keep # the 'import prefect' time low import requests url = urljoin(server, path.lstrip("/")).rstrip("/") params = params or {} headers = headers or {} if token: headers["Authorization"] = "Bearer {}".format(token) headers["X-PREFECT-CORE-VERSION"] = str(prefect.__version__) if self._attached_headers: headers.update(self._attached_headers) session = requests.Session() retry_total = 6 if prefect.config.backend == "cloud" else 1 retries = requests.packages.urllib3.util.retry.Retry( total=retry_total, backoff_factor=1, status_forcelist=[500, 502, 503, 504], method_whitelist=["DELETE", "GET", "POST"], ) session.mount("https://", requests.adapters.HTTPAdapter(max_retries=retries)) response = self._send_request( session=session, method=method, url=url, params=params, headers=headers ) # parse the response try: json_resp = response.json() except JSONDecodeError as exc: if prefect.config.backend == "cloud" and "Authorization" not in headers: raise ClientError( "Malformed response received from Cloud - please ensure that you " "have an API token properly configured." ) from exc else: raise ClientError("Malformed response received from API.") from exc # check if there was an API_ERROR code in the response if "API_ERROR" in str(json_resp.get("errors")) and retry_on_api_error: success, retry_count = False, 0 # retry up to six times while success is False and retry_count < 6: response = self._send_request( session=session, method=method, url=url, params=params, headers=headers, ) if "API_ERROR" in str(response.json().get("errors")): retry_count += 1 time.sleep(0.25 * (2 ** (retry_count - 1))) else: success = True return response def attach_headers(self, headers: dict) -> None: """ Set headers to be attached to this Client Args: - headers (dict): A dictionary of headers to attach to this client. These headers get added on to the existing dictionary of headers. """ self._attached_headers.update(headers) # ------------------------------------------------------------------------- # Auth # ------------------------------------------------------------------------- @property def _local_settings_path(self) -> Path: """ Returns the local settings directory corresponding to the current API servers """ path = "{home}/client/{server}".format( home=prefect.context.config.home_dir, server=slugify(self.api_server, regex_pattern=r"[^-\.a-z0-9]+"), ) return Path(os.path.expanduser(path)) / "settings.toml" def _save_local_settings(self, settings: dict) -> None: """ Writes settings to local storage """ self._local_settings_path.parent.mkdir(exist_ok=True, parents=True) with self._local_settings_path.open("w+") as f: toml.dump(settings, f) def _load_local_settings(self) -> dict: """ Loads settings from local storage """ if self._local_settings_path.exists(): with self._local_settings_path.open("r") as f: return toml.load(f) # type: ignore return {} def save_api_token(self) -> None: """ Saves the API token in local storage. """ settings = self._load_local_settings() settings["api_token"] = self._api_token self._save_local_settings(settings) def get_auth_token(self) -> str: """ Returns an auth token: - if no explicit access token is stored, returns the api token - if there is an access token: - if there's a refresh token and the access token expires in the next 30 seconds, then we refresh the access token and store the result - return the access token Returns: - str: the access token """ if not self._access_token: return self._api_token expiration = self._access_token_expires_at or pendulum.now() if self._refresh_token and pendulum.now().add(seconds=30) > expiration: self._refresh_access_token() return self._access_token def get_available_tenants(self) -> List[Dict]: """ Returns a list of available tenants. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - List[Dict]: a list of dictionaries containing the id, slug, and name of available tenants """ result = self.graphql( {"query": {"tenant(order_by: {slug: asc})": {"id", "slug", "name"}}}, # use the API token to see all available tenants token=self._api_token, ) # type: ignore return result.data.tenant # type: ignore def login_to_tenant(self, tenant_slug: str = None, tenant_id: str = None) -> bool: """ Log in to a specific tenant NOTE: this should only be called by users who have provided a USER-scoped API token. Args: - tenant_slug (str): the tenant's slug - tenant_id (str): the tenant's id Returns: - bool: True if the login was successful Raises: - ValueError: if at least one of `tenant_slug` or `tenant_id` isn't provided - ValueError: if the `tenant_id` is not a valid UUID - ValueError: if no matching tenants are found """ if tenant_slug is None and tenant_id is None: raise ValueError( "At least one of `tenant_slug` or `tenant_id` must be provided." ) elif tenant_id: try: uuid.UUID(tenant_id) except ValueError as exc: raise ValueError("The `tenant_id` must be a valid UUID.") from exc tenant = self.graphql( { "query($slug: String, $id: uuid)": { "tenant(where: {slug: { _eq: $slug }, id: { _eq: $id } })": {"id"} } }, variables=dict(slug=tenant_slug, id=tenant_id), # use the API token to query the tenant token=self._api_token, ) # type: ignore if not tenant.data.tenant: # type: ignore raise ValueError("No matching tenants found.") tenant_id = tenant.data.tenant[0].id # type: ignore if prefect.config.backend == "cloud": payload = self.graphql( { "mutation($input: switch_tenant_input!)": { "switch_tenant(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(tenant_id=tenant_id)), # Use the API token to switch tenants token=self._api_token, ) # type: ignore self._access_token = payload.data.switch_tenant.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.switch_tenant.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.switch_tenant.refresh_token # type: ignore self._active_tenant_id = tenant_id # save the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = self._active_tenant_id self._save_local_settings(settings) return True def logout_from_tenant(self) -> None: self._access_token = None self._refresh_token = None self._active_tenant_id = None # remove the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = None self._save_local_settings(settings) def _refresh_access_token(self) -> bool: """ Refresh the client's JWT access token. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - bool: True if the refresh succeeds """ payload = self.graphql( { "mutation($input: refresh_token_input!)": { "refresh_token(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(access_token=self._access_token)), # pass the refresh token as the auth header token=self._refresh_token, ) # type: ignore self._access_token = payload.data.refresh_token.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.refresh_token.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.refresh_token.refresh_token # type: ignore return True # ------------------------------------------------------------------------- # Actions # ------------------------------------------------------------------------- def register( self, flow: "Flow", project_name: str = None, build: bool = True, set_schedule_active: bool = True, version_group_id: str = None, compressed: bool = True, no_url: bool = False, ) -> str: """ Push a new flow to Prefect Cloud Args: - flow (Flow): a flow to register - project_name (str, optional): the project that should contain this flow. - build (bool, optional): if `True`, the flow's environment is built prior to serialization; defaults to `True` - set_schedule_active (bool, optional): if `False`, will set the schedule to inactive in the database to prevent auto-scheduling runs (if the Flow has a schedule). Defaults to `True`. This can be changed later. - version_group_id (str, optional): the UUID version group ID to use for versioning this Flow in Cloud; if not provided, the version group ID associated with this Flow's project and name will be used. - compressed (bool, optional): if `True`, the serialized flow will be; defaults to `True` compressed - no_url (bool, optional): if `True`, the stdout from this function will not contain the URL link to the newly-registered flow in the Cloud UI Returns: - str: the ID of the newly-registered flow Raises: - ClientError: if the register failed """ required_parameters = {p for p in flow.parameters() if p.required} if flow.schedule is not None and required_parameters: required_names = {p.name for p in required_parameters} if not all( [ required_names <= set(c.parameter_defaults.keys()) for c in flow.schedule.clocks ] ): raise ClientError( "Flows with required parameters can not be scheduled automatically." ) if any(e.key for e in flow.edges) and flow.result is None: warnings.warn( "No result handler was specified on your Flow. Cloud features such as " "input caching and resuming task runs from failure may not work properly.", stacklevel=2, ) if compressed: create_mutation = { "mutation($input: create_flow_from_compressed_string_input!)": { "create_flow_from_compressed_string(input: $input)": {"id"} } } else: create_mutation = { "mutation($input: create_flow_input!)": { "create_flow(input: $input)": {"id"} } } project = None if project_name is None: raise TypeError( "'project_name' is a required field when registering a flow." ) query_project = { "query": { with_args("project", {"where": {"name": {"_eq": project_name}}}): { "id": True } } } project = self.graphql(query_project).data.project # type: ignore if not project: raise ValueError( "Project {} not found. Run `prefect create project '{}'` to create it.".format( project_name, project_name ) ) serialized_flow = flow.serialize(build=build) # type: Any # Set Docker storage image in environment metadata if provided if isinstance(flow.storage, prefect.environments.storage.Docker): flow.environment.metadata["image"] = flow.storage.name serialized_flow = flow.serialize(build=False) # If no image ever set, default metadata to all_extras image on current version if not flow.environment.metadata.get("image"): version = prefect.__version__.split("+")[0] flow.environment.metadata[ "image" ] = f"prefecthq/prefect:all_extras-{version}" serialized_flow = flow.serialize(build=False) # verify that the serialized flow can be deserialized try: prefect.serialization.flow.FlowSchema().load(serialized_flow) except Exception as exc: raise ValueError( "Flow could not be deserialized successfully. Error was: {}".format( repr(exc) ) ) from exc if compressed: serialized_flow = compress(serialized_flow) res = self.graphql( create_mutation, variables=dict( input=dict( project_id=(project[0].id if project else None), serialized_flow=serialized_flow, set_schedule_active=set_schedule_active, version_group_id=version_group_id, ) ), retry_on_api_error=False, ) # type: Any flow_id = ( res.data.create_flow_from_compressed_string.id if compressed else res.data.create_flow.id ) if not no_url: # Generate direct link to Cloud flow flow_url = self.get_cloud_url("flow", flow_id) prefix = "└── " print("Flow URL: {}".format(flow_url)) # Extra information to improve visibility msg = ( f" {prefix}ID: {flow_id}\n" f" {prefix}Project: {project_name}\n" f" {prefix}Labels: {list(flow.environment.labels)}" ) print(msg) return flow_id def get_cloud_url(self, subdirectory: str, id: str, as_user: bool = True) -> str: """ Convenience method for creating Prefect Cloud URLs for a given subdirectory. Args: - subdirectory (str): the subdirectory to use (e.g., `"flow-run"`) - id (str): the ID of the page - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the URL corresponding to the appropriate base URL, tenant slug, subdirectory and ID Example: ```python from prefect import Client client = Client() client.get_cloud_url("flow-run", "424242-ca-94611-111-55") # returns "https://cloud.prefect.io/my-tenant-slug/flow-run/424242-ca-94611-111-55" ``` """ # Generate direct link to UI if prefect.config.backend == "cloud": tenant_slug = self.get_default_tenant_slug(as_user=as_user) else: tenant_slug = "" base_url = ( re.sub("api-", "", prefect.config.cloud.api) if re.search("api-", prefect.config.cloud.api) else re.sub("api", "cloud", prefect.config.cloud.api) ) full_url = prefect.config.cloud.api if tenant_slug: full_url = "/".join([base_url.rstrip("/"), tenant_slug, subdirectory, id]) elif prefect.config.backend == "server": full_url = "/".join([prefect.config.server.ui.endpoint, subdirectory, id]) return full_url def get_default_tenant_slug(self, as_user: bool = True) -> str: """ Get the default tenant slug for the currently authenticated user Args: - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the slug of the current default tenant for this user """ if as_user: query = { "query": {"user": {"default_membership": {"tenant": "slug"}}} } # type: dict else: query = {"query": {"tenant": {"slug"}}} res = self.graphql(query) if as_user: user = res.get("data").user[0] slug = user.default_membership.tenant.slug else: slug = res.get("data").tenant[0].slug return slug def create_project(self, project_name: str, project_description: str = None) -> str: """ Create a new Project Args: - project_name (str): the project that should contain this flow - project_description (str, optional): the project description Returns: - str: the ID of the newly-created project Raises: - ClientError: if the project creation failed """ project_mutation = { "mutation($input: create_project_input!)": { "create_project(input: $input)": {"id"} } } res = self.graphql( project_mutation, variables=dict( input=dict( name=project_name, description=project_description, tenant_id=self._active_tenant_id, ) ), ) # type: Any return res.data.create_project.id def create_flow_run( self, flow_id: str = None, context: dict = None, parameters: dict = None, scheduled_start_time: datetime.datetime = None, idempotency_key: str = None, run_name: str = None, version_group_id: str = None, ) -> str: """ Create a new flow run for the given flow id. If `start_time` is not provided, the flow run will be scheduled to start immediately. If both `flow_id` and `version_group_id` are provided, only the `flow_id` will be used. Args: - flow_id (str, optional): the id of the Flow you wish to schedule - context (dict, optional): the run context - parameters (dict, optional): a dictionary of parameter values to pass to the flow run - scheduled_start_time (datetime, optional): the time to schedule the execution for; if not provided, defaults to now - idempotency_key (str, optional): an idempotency key; if provided, this run will be cached for 24 hours. Any subsequent attempts to create a run with the same idempotency key will return the ID of the originally created run (no new run will be created after the first). An error will be raised if parameters or context are provided and don't match the original. Each subsequent request will reset the TTL for 24 hours. - run_name (str, optional): The name assigned to this flow run - version_group_id (str, optional): if provided, the unique unarchived flow within this version group will be scheduled to run. This input can be used as a stable API for running flows which are regularly updated. Returns: - str: the ID of the newly-created flow run Raises: - ClientError: if the GraphQL query is bad for any reason """ create_mutation = { "mutation($input: create_flow_run_input!)": { "create_flow_run(input: $input)": {"id": True} } } if not flow_id and not version_group_id: raise ValueError("One of flow_id or version_group_id must be provided") if flow_id: inputs = dict(flow_id=flow_id) else: inputs = dict(version_group_id=version_group_id) # type: ignore if parameters is not None: inputs.update(parameters=parameters) # type: ignore if context is not None: inputs.update(context=context) # type: ignore if idempotency_key is not None: inputs.update(idempotency_key=idempotency_key) # type: ignore if scheduled_start_time is not None: inputs.update( scheduled_start_time=scheduled_start_time.isoformat() ) # type: ignore if run_name is not None: inputs.update(flow_run_name=run_name) # type: ignore res = self.graphql(create_mutation, variables=dict(input=inputs)) return res.data.create_flow_run.id # type: ignore def get_flow_run_info(self, flow_run_id: str) -> FlowRunInfoResult: """ Retrieves version and current state information for the given flow run. Args: - flow_run_id (str): the id of the flow run to get information for Returns: - GraphQLResult: an object representing information about the flow run Raises: - ClientError: if the GraphQL mutation is bad for any reason """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "id": True, "name": True, "flow_id": True, "parameters": True, "context": True, "version": True, "scheduled_start_time": True, "serialized_state": True, # load all task runs except dynamic task runs with_args("task_runs", {"where": {"map_index": {"_eq": -1}}}): { "id": True, "task": {"id": True, "slug": True}, "version": True, "serialized_state": True, }, } } } result = self.graphql(query).data.flow_run_by_pk # type: ignore if result is None: raise ClientError('Flow run ID not found: "{}"'.format(flow_run_id)) # convert scheduled_start_time from string to datetime result.scheduled_start_time = pendulum.parse(result.scheduled_start_time) # create "state" attribute from serialized_state result.state = prefect.engine.state.State.deserialize( result.pop("serialized_state") ) # reformat task_runs task_runs = [] for tr in result.task_runs: tr.state = prefect.engine.state.State.deserialize( tr.pop("serialized_state") ) task_info = tr.pop("task") tr.task_id = task_info["id"] tr.task_slug = task_info["slug"] task_runs.append(TaskRunInfoResult(**tr)) result.task_runs = task_runs result.context = ( result.context.to_dict() if result.context is not None else None ) result.parameters = ( result.parameters.to_dict() if result.parameters is not None else None ) return FlowRunInfoResult(**result) def update_flow_run_heartbeat(self, flow_run_id: str) -> None: """ Convenience method for heartbeating a flow run. Does NOT raise an error if the update fails. Args: - flow_run_id (str): the flow run ID to heartbeat """ mutation = { "mutation": { with_args( "update_flow_run_heartbeat", {"input": {"flow_run_id": flow_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def update_task_run_heartbeat(self, task_run_id: str) -> None: """ Convenience method for heartbeating a task run. Does NOT raise an error if the update fails. Args: - task_run_id (str): the task run ID to heartbeat """ mutation = { "mutation": { with_args( "update_task_run_heartbeat", {"input": {"task_run_id": task_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def get_flow_run_state(self, flow_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a flow run. Args: - flow_run_id (str): the id for this flow run Returns: - State: a Prefect State object """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "serialized_state": True, } } } flow_run = self.graphql(query).data.flow_run_by_pk return prefect.engine.state.State.deserialize(flow_run.serialized_state) def set_flow_run_state( self, flow_run_id: str, state: "prefect.engine.state.State", version: int = None, ) -> "prefect.engine.state.State": """ Sets new state for a flow run in the database. Args: - flow_run_id (str): the id of the flow run to set state for - state (State): the new state for this flow run - version (int, optional): the current version of the flow run state. This is optional but it can be supplied to enforce version-locking. Returns: - State: the state the current flow run should be considered in Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation($input: set_flow_run_states_input!)": { "set_flow_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, flow_run_id=flow_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_flow_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def get_latest_cached_states( self, task_id: str, cache_key: Optional[str], created_after: datetime.datetime ) -> List["prefect.engine.state.State"]: """ Pulls all Cached states for the given task that were created after the provided date. Args: - task_id (str): the task id for this task run - cache_key (Optional[str]): the cache key for this Task's cache; if `None`, the task id alone will be used - created_after (datetime.datetime): the earliest date the state should have been created at Returns: - List[State]: a list of Cached states created after the given date """ args = { "where": { "state": {"_eq": "Cached"}, "state_timestamp": {"_gte": created_after.isoformat()}, }, "order_by": {"state_timestamp": EnumValue("desc")}, "limit": 100, } # type: Dict[str, Any] # if a cache key was provided, match it against all tasks if cache_key is not None: args["where"].update({"cache_key": {"_eq": cache_key}}) # otherwise match against only this task, across all cache keys else: args["where"].update({"task_id": {"_eq": task_id}}) query = {"query": {with_args("task_run", args): "serialized_state"}} result = self.graphql(query) # type: Any deserializer = prefect.engine.state.State.deserialize valid_states = [ deserializer(res.serialized_state) for res in result.data.task_run ] return valid_states def get_task_run_info( self, flow_run_id: str, task_id: str, map_index: Optional[int] = None ) -> TaskRunInfoResult: """ Retrieves version and current state information for the given task run. Args: - flow_run_id (str): the id of the flow run that this task run lives in - task_id (str): the task id for this task run - map_index (int, optional): the mapping index for this task run; if `None`, it is assumed this task is _not_ mapped Returns: - NamedTuple: a tuple containing `id, task_id, version, state` Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation": { with_args( "get_or_create_task_run", { "input": { "flow_run_id": flow_run_id, "task_id": task_id, "map_index": -1 if map_index is None else map_index, } }, ): { "id": True, } } } result = self.graphql(mutation) # type: Any if result is None: raise ClientError("Failed to create task run.") task_run_id = result.data.get_or_create_task_run.id query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "version": True, "serialized_state": True, "task": {"slug": True}, } } } task_run = self.graphql(query).data.task_run_by_pk # type: ignore if task_run is None: raise ClientError('Task run ID not found: "{}"'.format(task_run_id)) state = prefect.engine.state.State.deserialize(task_run.serialized_state) return TaskRunInfoResult( id=task_run_id, task_id=task_id, task_slug=task_run.task.slug, version=task_run.version, state=state, ) def set_task_run_name(self, task_run_id: str, name: str) -> bool: """ Set the name of a task run Args: - task_run_id (str): the id of a task run - name (str): a name for this task run Returns: - bool: whether or not the task run name was updated """ mutation = { "mutation($input: set_task_run_name_input!)": { "set_task_run_name(input: $input)": { "success": True, } } } result = self.graphql( mutation, variables=dict(input=dict(task_run_id=task_run_id, name=name)) ) return result.data.set_task_run_name.success def get_task_run_state(self, task_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a task run. Args: - task_run_id (str): the id for this task run Returns: - State: a Prefect State object """ query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "serialized_state": True, } } } task_run = self.graphql(query).data.task_run_by_pk return prefect.engine.state.State.deserialize(task_run.serialized_state) def set_task_run_state( self, task_run_id: str, state: "prefect.engine.state.State", version: int = None, cache_for: datetime.timedelta = None, ) -> "prefect.engine.state.State": """ Sets new state for a task run. Args: - task_run_id (str): the id of the task run to set state for - state (State): the new state for this task run - version (int, optional): the current version of the task run state. This is optional but it can be supplied to enforce version-locking. - cache_for (timedelta, optional): how long to store the result of this task for, using the serializer set in config; if not provided, no caching occurs Raises: - ClientError: if the GraphQL mutation is bad for any reason Returns: - State: the state the current task run should be considered in """ mutation = { "mutation($input: set_task_run_states_input!)": { "set_task_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, task_run_id=task_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_task_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def set_secret(self, name: str, value: Any) -> None: """ Set a secret with the given name and value. Args: - name (str): the name of the secret; used for retrieving the secret during task runs - value (Any): the value of the secret Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the secret-setting was unsuccessful """ mutation = { "mutation($input: set_secret_input!)": { "set_secret(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(name=name, value=value)) ) # type: Any if not result.data.set_secret.success: raise ValueError("Setting secret failed.") def get_task_tag_limit(self, tag: str) -> Optional[int]: """ Retrieve the current task tag concurrency limit for a given tag. Args: - tag (str): the tag to update Raises: - ClientError: if the GraphQL query fails """ query = { "query": { with_args("task_tag_limit", {"where": {"tag": {"_eq": tag}}}): { "limit": True } } } result = self.graphql(query) # type: Any if result.data.task_tag_limit: return result.data.task_tag_limit[0].limit else: return None def update_task_tag_limit(self, tag: str, limit: int) -> None: """ Update the task tag concurrency limit for a given tag; requires tenant admin permissions. Args: - tag (str): the tag to update - limit (int): the concurrency limit to enforce on the tag; should be a value >= 0 Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag limit-setting was unsuccessful, or if a bad limit was provided """ if limit < 0: raise ValueError("Concurrency limits must be >= 0") mutation = { "mutation($input: update_task_tag_limit_input!)": { "update_task_tag_limit(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict(input=dict(tag=tag, limit=limit)) ) # type: Any if not result.data.update_task_tag_limit.id: raise ValueError("Updating the task tag concurrency limit failed.") def delete_task_tag_limit(self, limit_id: str) -> None: """ Deletes a given task tag concurrency limit; requires tenant admin permissions. Args: - limit_id (str): the ID of the tag to delete Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag deletion was unsuccessful, or if a bad tag ID was provided """ mutation = { "mutation($input: delete_task_tag_limit_input!)": { "delete_task_tag_limit(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(limit_id=limit_id)) ) # type: Any if not result.data.delete_task_tag_limit.success: raise ValueError("Deleting the task tag concurrency limit failed.") def write_run_logs(self, logs: List[Dict]) -> None: """ Uploads a collection of logs to Cloud. Args: - logs (List[Dict]): a list of log entries to add Raises: - ValueError: if uploading the logs fail """ mutation = { "mutation($input: write_run_logs_input!)": { "write_run_logs(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(logs=logs)) ) # type: Any if not result.data.write_run_logs.success: raise ValueError("Writing logs failed.") def register_agent( self, agent_type: str, name: str = None, labels: List[str] = None, agent_config_id: str = None, ) -> str: """ Register an agent with a backend API Args: - agent_type (str): The type of agent being registered - name: (str, optional): The name of the agent being registered - labels (List[str], optional): A list of any present labels on the agent being registered - agent_config_id (str, optional): The ID of an agent configuration to register with Returns: - The agent ID as a string """ mutation = { "mutation($input: register_agent_input!)": { "register_agent(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict( input=dict( type=agent_type, name=name, labels=labels or [], tenant_id=self._active_tenant_id, agent_config_id=agent_config_id, ) ), ) if not result.data.register_agent.id: raise ValueError("Error registering agent") return result.data.register_agent.id def get_agent_config(self, agent_config_id: str) -> dict: """ Get agent config settings Args: - agent_config_id (str): The ID of an agent configuration to retrieve Returns: - dict: the agent configuration's `settings` """ query = { "query": { with_args( "agent_config", {"where": {"id": {"_eq": agent_config_id}}} ): {"settings": True} } } result = self.graphql(query) # type: Any return result.data.agent_config[0].settings
def create_tenant(self, name: str, slug: str = None) -> str: """ Creates a new tenant. Note this route only works when run against Prefect Server. Args: - name (str): the name of the tenant to create - slug (str, optional): the slug of the tenant to create; defaults to name Returns: - str: the ID of the newly created tenant, or the ID of the currently active tenant Raises: - ValueError: if run against Prefect Cloud """ if prefect.config.backend != "server": msg = "To create a tenant with Prefect Cloud, please signup at https://cloud.prefect.io/" raise ValueError(msg) if slug is None: slug = slugify(name) tenant_info = self.graphql( { "mutation($input: create_tenant_input!)": { "create_tenant(input: $input)": {"id"} } }, variables=dict(input=dict(name=name, slug=slug)), ) return tenant_info.data.create_tenant.id
128
159
import datetime import json import os import re import time import uuid import warnings from pathlib import Path from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Union from urllib.parse import urljoin # if simplejson is installed, `requests` defaults to using it instead of json # this allows the client to gracefully handle either json or simplejson try: from simplejson.errors import JSONDecodeError except ImportError: from json.decoder import JSONDecodeError import pendulum import toml from slugify import slugify import prefect from prefect.utilities.exceptions import ( AuthorizationError, ClientError, VersionLockError, ) from prefect.utilities.graphql import ( EnumValue, GraphQLResult, compress, parse_graphql, with_args, ) from prefect.utilities.logging import create_diagnostic_logger if TYPE_CHECKING: from prefect.core import Flow import requests JSONLike = Union[bool, dict, list, str, int, float, None] # type definitions for GraphQL results TaskRunInfoResult = NamedTuple( "TaskRunInfoResult", [ ("id", str), ("task_id", str), ("task_slug", str), ("version", int), ("state", "prefect.engine.state.State"), ], ) FlowRunInfoResult = NamedTuple( "FlowRunInfoResult", [ ("id", str), ("name", str), ("flow_id", str), ("parameters", Dict[str, Any]), ("context", Dict[str, Any]), ("version", int), ("scheduled_start_time", datetime.datetime), ("state", "prefect.engine.state.State"), ("task_runs", List[TaskRunInfoResult]), ], ) class Client: """ Client for communication with Prefect Cloud If the arguments aren't specified the client initialization first checks the prefect configuration and if the server is not set there it checks the current context. The token will only be present in the current context. Args: - api_server (str, optional): the URL to send all GraphQL requests to; if not provided, will be pulled from `cloud.graphql` config var - api_token (str, optional): a Prefect Cloud API token, taken from `config.cloud.auth_token` if not provided. If this token is USER-scoped, it may be used to log in to any tenant that the user is a member of. In that case, ephemeral JWTs will be loaded as necessary. Otherwise, the API token itself will be used as authorization. """ def __init__(self, api_server: str = None, api_token: str = None): self._access_token = None self._refresh_token = None self._access_token_expires_at = pendulum.now() self._active_tenant_id = None self._attached_headers = {} # type: Dict[str, str] self.logger = create_diagnostic_logger("Diagnostics") # store api server self.api_server = api_server or prefect.context.config.cloud.get("graphql") # store api token self._api_token = api_token or prefect.context.config.cloud.get( "auth_token", None ) if prefect.config.backend == "cloud": if not self._api_token: # if no api token was passed, attempt to load state from local storage settings = self._load_local_settings() self._api_token = settings.get("api_token") if self._api_token: self._active_tenant_id = settings.get("active_tenant_id") if self._active_tenant_id: try: self.login_to_tenant(tenant_id=self._active_tenant_id) except AuthorizationError: # if an authorization error is raised, then the token is invalid and should # be cleared self.logout_from_tenant() else: # TODO: Separate put this functionality and clean up initial tenant access handling if not self._active_tenant_id: tenant_info = self.graphql({"query": {"tenant": {"id"}}}) if tenant_info.data.tenant: self._active_tenant_id = tenant_info.data.tenant[0].id def create_tenant(self, name: str, slug: str = None) -> str: """ Creates a new tenant. Note this route only works when run against Prefect Server. Args: - name (str): the name of the tenant to create - slug (str, optional): the slug of the tenant to create; defaults to name Returns: - str: the ID of the newly created tenant, or the ID of the currently active tenant Raises: - ValueError: if run against Prefect Cloud """ if prefect.config.backend != "server": msg = "To create a tenant with Prefect Cloud, please signup at https://cloud.prefect.io/" raise ValueError(msg) if slug is None: slug = slugify(name) tenant_info = self.graphql( { "mutation($input: create_tenant_input!)": { "create_tenant(input: $input)": {"id"} } }, variables=dict(input=dict(name=name, slug=slug)), ) return tenant_info.data.create_tenant.id # ------------------------------------------------------------------------- # Utilities def get( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and GET request Args: - path (str): the path of the API url. For example, to GET http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the GET request to; defaults to `self.api_server` - headers (dict, optional): Headers to pass with the request - params (dict): GET parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="GET", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def post( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and POST request Args: - path (str): the path of the API url. For example, to POST http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the POST request to; defaults to `self.api_server` - headers(dict): headers to pass with the request - params (dict): POST parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="POST", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def graphql( self, query: Any, raise_on_error: bool = True, headers: Dict[str, str] = None, variables: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> GraphQLResult: """ Convenience function for running queries against the Prefect GraphQL API Args: - query (Any): A representation of a graphql query to be executed. It will be parsed by prefect.utilities.graphql.parse_graphql(). - raise_on_error (bool): if True, a `ClientError` will be raised if the GraphQL returns any `errors`. - headers (dict): any additional headers that should be passed as part of the request - variables (dict): Variables to be filled into a query with the key being equivalent to the variables that are accepted by the query - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Data returned from the GraphQL query Raises: - ClientError if there are errors raised by the GraphQL mutation """ result = self.post( path="", server=self.api_server, headers=headers, params=dict(query=parse_graphql(query), variables=json.dumps(variables)), token=token, retry_on_api_error=retry_on_api_error, ) if raise_on_error and "errors" in result: if "UNAUTHENTICATED" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif "Malformed Authorization header" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif ( result["errors"][0].get("extensions", {}).get("code") == "VERSION_LOCKING_ERROR" ): raise VersionLockError(result["errors"]) raise ClientError(result["errors"]) else: return GraphQLResult(result) # type: ignore def _send_request( self, session: "requests.Session", method: str, url: str, params: Dict[str, JSONLike] = None, headers: dict = None, ) -> "requests.models.Response": if prefect.context.config.cloud.get("diagnostics") is True: self.logger.debug(f"Preparing request to {url}") clean_headers = { head: re.sub("Bearer .*", "Bearer XXXX", val) for head, val in headers.items() # type: ignore } self.logger.debug(f"Headers: {clean_headers}") self.logger.debug(f"Request: {params}") start_time = time.time() if method == "GET": response = session.get(url, headers=headers, params=params, timeout=30) elif method == "POST": response = session.post(url, headers=headers, json=params, timeout=30) elif method == "DELETE": response = session.delete(url, headers=headers, timeout=30) else: raise ValueError("Invalid method: {}".format(method)) if prefect.context.config.cloud.get("diagnostics") is True: end_time = time.time() self.logger.debug(f"Response: {response.json()}") self.logger.debug( f"Request duration: {round(end_time - start_time, 4)} seconds" ) # Check if request returned a successful status response.raise_for_status() return response def _request( self, method: str, path: str, params: Dict[str, JSONLike] = None, server: str = None, headers: dict = None, token: str = None, retry_on_api_error: bool = True, ) -> "requests.models.Response": """ Runs any specified request (GET, POST, DELETE) against the server Args: - method (str): The type of request to be made (GET, POST, DELETE) - path (str): Path of the API URL - params (dict, optional): Parameters used for the request - server (str, optional): The server to make requests against, base API server is used if not specified - headers (dict, optional): Headers to pass with the request - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - requests.models.Response: The response returned from the request Raises: - ClientError: if the client token is not in the context (due to not being logged in) - ValueError: if a method is specified outside of the accepted GET, POST, DELETE - requests.HTTPError: if a status code is returned that is not `200` or `401` """ if server is None: server = self.api_server assert isinstance(server, str) # mypy assert if token is None: token = self.get_auth_token() # 'import requests' is expensive time-wise, we should do this just-in-time to keep # the 'import prefect' time low import requests url = urljoin(server, path.lstrip("/")).rstrip("/") params = params or {} headers = headers or {} if token: headers["Authorization"] = "Bearer {}".format(token) headers["X-PREFECT-CORE-VERSION"] = str(prefect.__version__) if self._attached_headers: headers.update(self._attached_headers) session = requests.Session() retry_total = 6 if prefect.config.backend == "cloud" else 1 retries = requests.packages.urllib3.util.retry.Retry( total=retry_total, backoff_factor=1, status_forcelist=[500, 502, 503, 504], method_whitelist=["DELETE", "GET", "POST"], ) session.mount("https://", requests.adapters.HTTPAdapter(max_retries=retries)) response = self._send_request( session=session, method=method, url=url, params=params, headers=headers ) # parse the response try: json_resp = response.json() except JSONDecodeError as exc: if prefect.config.backend == "cloud" and "Authorization" not in headers: raise ClientError( "Malformed response received from Cloud - please ensure that you " "have an API token properly configured." ) from exc else: raise ClientError("Malformed response received from API.") from exc # check if there was an API_ERROR code in the response if "API_ERROR" in str(json_resp.get("errors")) and retry_on_api_error: success, retry_count = False, 0 # retry up to six times while success is False and retry_count < 6: response = self._send_request( session=session, method=method, url=url, params=params, headers=headers, ) if "API_ERROR" in str(response.json().get("errors")): retry_count += 1 time.sleep(0.25 * (2 ** (retry_count - 1))) else: success = True return response def attach_headers(self, headers: dict) -> None: """ Set headers to be attached to this Client Args: - headers (dict): A dictionary of headers to attach to this client. These headers get added on to the existing dictionary of headers. """ self._attached_headers.update(headers) # ------------------------------------------------------------------------- # Auth # ------------------------------------------------------------------------- @property def _local_settings_path(self) -> Path: """ Returns the local settings directory corresponding to the current API servers """ path = "{home}/client/{server}".format( home=prefect.context.config.home_dir, server=slugify(self.api_server, regex_pattern=r"[^-\.a-z0-9]+"), ) return Path(os.path.expanduser(path)) / "settings.toml" def _save_local_settings(self, settings: dict) -> None: """ Writes settings to local storage """ self._local_settings_path.parent.mkdir(exist_ok=True, parents=True) with self._local_settings_path.open("w+") as f: toml.dump(settings, f) def _load_local_settings(self) -> dict: """ Loads settings from local storage """ if self._local_settings_path.exists(): with self._local_settings_path.open("r") as f: return toml.load(f) # type: ignore return {} def save_api_token(self) -> None: """ Saves the API token in local storage. """ settings = self._load_local_settings() settings["api_token"] = self._api_token self._save_local_settings(settings) def get_auth_token(self) -> str: """ Returns an auth token: - if no explicit access token is stored, returns the api token - if there is an access token: - if there's a refresh token and the access token expires in the next 30 seconds, then we refresh the access token and store the result - return the access token Returns: - str: the access token """ if not self._access_token: return self._api_token expiration = self._access_token_expires_at or pendulum.now() if self._refresh_token and pendulum.now().add(seconds=30) > expiration: self._refresh_access_token() return self._access_token def get_available_tenants(self) -> List[Dict]: """ Returns a list of available tenants. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - List[Dict]: a list of dictionaries containing the id, slug, and name of available tenants """ result = self.graphql( {"query": {"tenant(order_by: {slug: asc})": {"id", "slug", "name"}}}, # use the API token to see all available tenants token=self._api_token, ) # type: ignore return result.data.tenant # type: ignore def login_to_tenant(self, tenant_slug: str = None, tenant_id: str = None) -> bool: """ Log in to a specific tenant NOTE: this should only be called by users who have provided a USER-scoped API token. Args: - tenant_slug (str): the tenant's slug - tenant_id (str): the tenant's id Returns: - bool: True if the login was successful Raises: - ValueError: if at least one of `tenant_slug` or `tenant_id` isn't provided - ValueError: if the `tenant_id` is not a valid UUID - ValueError: if no matching tenants are found """ if tenant_slug is None and tenant_id is None: raise ValueError( "At least one of `tenant_slug` or `tenant_id` must be provided." ) elif tenant_id: try: uuid.UUID(tenant_id) except ValueError as exc: raise ValueError("The `tenant_id` must be a valid UUID.") from exc tenant = self.graphql( { "query($slug: String, $id: uuid)": { "tenant(where: {slug: { _eq: $slug }, id: { _eq: $id } })": {"id"} } }, variables=dict(slug=tenant_slug, id=tenant_id), # use the API token to query the tenant token=self._api_token, ) # type: ignore if not tenant.data.tenant: # type: ignore raise ValueError("No matching tenants found.") tenant_id = tenant.data.tenant[0].id # type: ignore if prefect.config.backend == "cloud": payload = self.graphql( { "mutation($input: switch_tenant_input!)": { "switch_tenant(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(tenant_id=tenant_id)), # Use the API token to switch tenants token=self._api_token, ) # type: ignore self._access_token = payload.data.switch_tenant.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.switch_tenant.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.switch_tenant.refresh_token # type: ignore self._active_tenant_id = tenant_id # save the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = self._active_tenant_id self._save_local_settings(settings) return True def logout_from_tenant(self) -> None: self._access_token = None self._refresh_token = None self._active_tenant_id = None # remove the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = None self._save_local_settings(settings) def _refresh_access_token(self) -> bool: """ Refresh the client's JWT access token. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - bool: True if the refresh succeeds """ payload = self.graphql( { "mutation($input: refresh_token_input!)": { "refresh_token(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(access_token=self._access_token)), # pass the refresh token as the auth header token=self._refresh_token, ) # type: ignore self._access_token = payload.data.refresh_token.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.refresh_token.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.refresh_token.refresh_token # type: ignore return True # ------------------------------------------------------------------------- # Actions # ------------------------------------------------------------------------- def register( self, flow: "Flow", project_name: str = None, build: bool = True, set_schedule_active: bool = True, version_group_id: str = None, compressed: bool = True, no_url: bool = False, ) -> str: """ Push a new flow to Prefect Cloud Args: - flow (Flow): a flow to register - project_name (str, optional): the project that should contain this flow. - build (bool, optional): if `True`, the flow's environment is built prior to serialization; defaults to `True` - set_schedule_active (bool, optional): if `False`, will set the schedule to inactive in the database to prevent auto-scheduling runs (if the Flow has a schedule). Defaults to `True`. This can be changed later. - version_group_id (str, optional): the UUID version group ID to use for versioning this Flow in Cloud; if not provided, the version group ID associated with this Flow's project and name will be used. - compressed (bool, optional): if `True`, the serialized flow will be; defaults to `True` compressed - no_url (bool, optional): if `True`, the stdout from this function will not contain the URL link to the newly-registered flow in the Cloud UI Returns: - str: the ID of the newly-registered flow Raises: - ClientError: if the register failed """ required_parameters = {p for p in flow.parameters() if p.required} if flow.schedule is not None and required_parameters: required_names = {p.name for p in required_parameters} if not all( [ required_names <= set(c.parameter_defaults.keys()) for c in flow.schedule.clocks ] ): raise ClientError( "Flows with required parameters can not be scheduled automatically." ) if any(e.key for e in flow.edges) and flow.result is None: warnings.warn( "No result handler was specified on your Flow. Cloud features such as " "input caching and resuming task runs from failure may not work properly.", stacklevel=2, ) if compressed: create_mutation = { "mutation($input: create_flow_from_compressed_string_input!)": { "create_flow_from_compressed_string(input: $input)": {"id"} } } else: create_mutation = { "mutation($input: create_flow_input!)": { "create_flow(input: $input)": {"id"} } } project = None if project_name is None: raise TypeError( "'project_name' is a required field when registering a flow." ) query_project = { "query": { with_args("project", {"where": {"name": {"_eq": project_name}}}): { "id": True } } } project = self.graphql(query_project).data.project # type: ignore if not project: raise ValueError( "Project {} not found. Run `prefect create project '{}'` to create it.".format( project_name, project_name ) ) serialized_flow = flow.serialize(build=build) # type: Any # Set Docker storage image in environment metadata if provided if isinstance(flow.storage, prefect.environments.storage.Docker): flow.environment.metadata["image"] = flow.storage.name serialized_flow = flow.serialize(build=False) # If no image ever set, default metadata to all_extras image on current version if not flow.environment.metadata.get("image"): version = prefect.__version__.split("+")[0] flow.environment.metadata[ "image" ] = f"prefecthq/prefect:all_extras-{version}" serialized_flow = flow.serialize(build=False) # verify that the serialized flow can be deserialized try: prefect.serialization.flow.FlowSchema().load(serialized_flow) except Exception as exc: raise ValueError( "Flow could not be deserialized successfully. Error was: {}".format( repr(exc) ) ) from exc if compressed: serialized_flow = compress(serialized_flow) res = self.graphql( create_mutation, variables=dict( input=dict( project_id=(project[0].id if project else None), serialized_flow=serialized_flow, set_schedule_active=set_schedule_active, version_group_id=version_group_id, ) ), retry_on_api_error=False, ) # type: Any flow_id = ( res.data.create_flow_from_compressed_string.id if compressed else res.data.create_flow.id ) if not no_url: # Generate direct link to Cloud flow flow_url = self.get_cloud_url("flow", flow_id) prefix = "└── " print("Flow URL: {}".format(flow_url)) # Extra information to improve visibility msg = ( f" {prefix}ID: {flow_id}\n" f" {prefix}Project: {project_name}\n" f" {prefix}Labels: {list(flow.environment.labels)}" ) print(msg) return flow_id def get_cloud_url(self, subdirectory: str, id: str, as_user: bool = True) -> str: """ Convenience method for creating Prefect Cloud URLs for a given subdirectory. Args: - subdirectory (str): the subdirectory to use (e.g., `"flow-run"`) - id (str): the ID of the page - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the URL corresponding to the appropriate base URL, tenant slug, subdirectory and ID Example: ```python from prefect import Client client = Client() client.get_cloud_url("flow-run", "424242-ca-94611-111-55") # returns "https://cloud.prefect.io/my-tenant-slug/flow-run/424242-ca-94611-111-55" ``` """ # Generate direct link to UI if prefect.config.backend == "cloud": tenant_slug = self.get_default_tenant_slug(as_user=as_user) else: tenant_slug = "" base_url = ( re.sub("api-", "", prefect.config.cloud.api) if re.search("api-", prefect.config.cloud.api) else re.sub("api", "cloud", prefect.config.cloud.api) ) full_url = prefect.config.cloud.api if tenant_slug: full_url = "/".join([base_url.rstrip("/"), tenant_slug, subdirectory, id]) elif prefect.config.backend == "server": full_url = "/".join([prefect.config.server.ui.endpoint, subdirectory, id]) return full_url def get_default_tenant_slug(self, as_user: bool = True) -> str: """ Get the default tenant slug for the currently authenticated user Args: - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the slug of the current default tenant for this user """ if as_user: query = { "query": {"user": {"default_membership": {"tenant": "slug"}}} } # type: dict else: query = {"query": {"tenant": {"slug"}}} res = self.graphql(query) if as_user: user = res.get("data").user[0] slug = user.default_membership.tenant.slug else: slug = res.get("data").tenant[0].slug return slug def create_project(self, project_name: str, project_description: str = None) -> str: """ Create a new Project Args: - project_name (str): the project that should contain this flow - project_description (str, optional): the project description Returns: - str: the ID of the newly-created project Raises: - ClientError: if the project creation failed """ project_mutation = { "mutation($input: create_project_input!)": { "create_project(input: $input)": {"id"} } } res = self.graphql( project_mutation, variables=dict( input=dict( name=project_name, description=project_description, tenant_id=self._active_tenant_id, ) ), ) # type: Any return res.data.create_project.id def create_flow_run( self, flow_id: str = None, context: dict = None, parameters: dict = None, scheduled_start_time: datetime.datetime = None, idempotency_key: str = None, run_name: str = None, version_group_id: str = None, ) -> str: """ Create a new flow run for the given flow id. If `start_time` is not provided, the flow run will be scheduled to start immediately. If both `flow_id` and `version_group_id` are provided, only the `flow_id` will be used. Args: - flow_id (str, optional): the id of the Flow you wish to schedule - context (dict, optional): the run context - parameters (dict, optional): a dictionary of parameter values to pass to the flow run - scheduled_start_time (datetime, optional): the time to schedule the execution for; if not provided, defaults to now - idempotency_key (str, optional): an idempotency key; if provided, this run will be cached for 24 hours. Any subsequent attempts to create a run with the same idempotency key will return the ID of the originally created run (no new run will be created after the first). An error will be raised if parameters or context are provided and don't match the original. Each subsequent request will reset the TTL for 24 hours. - run_name (str, optional): The name assigned to this flow run - version_group_id (str, optional): if provided, the unique unarchived flow within this version group will be scheduled to run. This input can be used as a stable API for running flows which are regularly updated. Returns: - str: the ID of the newly-created flow run Raises: - ClientError: if the GraphQL query is bad for any reason """ create_mutation = { "mutation($input: create_flow_run_input!)": { "create_flow_run(input: $input)": {"id": True} } } if not flow_id and not version_group_id: raise ValueError("One of flow_id or version_group_id must be provided") if flow_id: inputs = dict(flow_id=flow_id) else: inputs = dict(version_group_id=version_group_id) # type: ignore if parameters is not None: inputs.update(parameters=parameters) # type: ignore if context is not None: inputs.update(context=context) # type: ignore if idempotency_key is not None: inputs.update(idempotency_key=idempotency_key) # type: ignore if scheduled_start_time is not None: inputs.update( scheduled_start_time=scheduled_start_time.isoformat() ) # type: ignore if run_name is not None: inputs.update(flow_run_name=run_name) # type: ignore res = self.graphql(create_mutation, variables=dict(input=inputs)) return res.data.create_flow_run.id # type: ignore def get_flow_run_info(self, flow_run_id: str) -> FlowRunInfoResult: """ Retrieves version and current state information for the given flow run. Args: - flow_run_id (str): the id of the flow run to get information for Returns: - GraphQLResult: an object representing information about the flow run Raises: - ClientError: if the GraphQL mutation is bad for any reason """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "id": True, "name": True, "flow_id": True, "parameters": True, "context": True, "version": True, "scheduled_start_time": True, "serialized_state": True, # load all task runs except dynamic task runs with_args("task_runs", {"where": {"map_index": {"_eq": -1}}}): { "id": True, "task": {"id": True, "slug": True}, "version": True, "serialized_state": True, }, } } } result = self.graphql(query).data.flow_run_by_pk # type: ignore if result is None: raise ClientError('Flow run ID not found: "{}"'.format(flow_run_id)) # convert scheduled_start_time from string to datetime result.scheduled_start_time = pendulum.parse(result.scheduled_start_time) # create "state" attribute from serialized_state result.state = prefect.engine.state.State.deserialize( result.pop("serialized_state") ) # reformat task_runs task_runs = [] for tr in result.task_runs: tr.state = prefect.engine.state.State.deserialize( tr.pop("serialized_state") ) task_info = tr.pop("task") tr.task_id = task_info["id"] tr.task_slug = task_info["slug"] task_runs.append(TaskRunInfoResult(**tr)) result.task_runs = task_runs result.context = ( result.context.to_dict() if result.context is not None else None ) result.parameters = ( result.parameters.to_dict() if result.parameters is not None else None ) return FlowRunInfoResult(**result) def update_flow_run_heartbeat(self, flow_run_id: str) -> None: """ Convenience method for heartbeating a flow run. Does NOT raise an error if the update fails. Args: - flow_run_id (str): the flow run ID to heartbeat """ mutation = { "mutation": { with_args( "update_flow_run_heartbeat", {"input": {"flow_run_id": flow_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def update_task_run_heartbeat(self, task_run_id: str) -> None: """ Convenience method for heartbeating a task run. Does NOT raise an error if the update fails. Args: - task_run_id (str): the task run ID to heartbeat """ mutation = { "mutation": { with_args( "update_task_run_heartbeat", {"input": {"task_run_id": task_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def get_flow_run_state(self, flow_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a flow run. Args: - flow_run_id (str): the id for this flow run Returns: - State: a Prefect State object """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "serialized_state": True, } } } flow_run = self.graphql(query).data.flow_run_by_pk return prefect.engine.state.State.deserialize(flow_run.serialized_state) def set_flow_run_state( self, flow_run_id: str, state: "prefect.engine.state.State", version: int = None, ) -> "prefect.engine.state.State": """ Sets new state for a flow run in the database. Args: - flow_run_id (str): the id of the flow run to set state for - state (State): the new state for this flow run - version (int, optional): the current version of the flow run state. This is optional but it can be supplied to enforce version-locking. Returns: - State: the state the current flow run should be considered in Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation($input: set_flow_run_states_input!)": { "set_flow_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, flow_run_id=flow_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_flow_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def get_latest_cached_states( self, task_id: str, cache_key: Optional[str], created_after: datetime.datetime ) -> List["prefect.engine.state.State"]: """ Pulls all Cached states for the given task that were created after the provided date. Args: - task_id (str): the task id for this task run - cache_key (Optional[str]): the cache key for this Task's cache; if `None`, the task id alone will be used - created_after (datetime.datetime): the earliest date the state should have been created at Returns: - List[State]: a list of Cached states created after the given date """ args = { "where": { "state": {"_eq": "Cached"}, "state_timestamp": {"_gte": created_after.isoformat()}, }, "order_by": {"state_timestamp": EnumValue("desc")}, "limit": 100, } # type: Dict[str, Any] # if a cache key was provided, match it against all tasks if cache_key is not None: args["where"].update({"cache_key": {"_eq": cache_key}}) # otherwise match against only this task, across all cache keys else: args["where"].update({"task_id": {"_eq": task_id}}) query = {"query": {with_args("task_run", args): "serialized_state"}} result = self.graphql(query) # type: Any deserializer = prefect.engine.state.State.deserialize valid_states = [ deserializer(res.serialized_state) for res in result.data.task_run ] return valid_states def get_task_run_info( self, flow_run_id: str, task_id: str, map_index: Optional[int] = None ) -> TaskRunInfoResult: """ Retrieves version and current state information for the given task run. Args: - flow_run_id (str): the id of the flow run that this task run lives in - task_id (str): the task id for this task run - map_index (int, optional): the mapping index for this task run; if `None`, it is assumed this task is _not_ mapped Returns: - NamedTuple: a tuple containing `id, task_id, version, state` Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation": { with_args( "get_or_create_task_run", { "input": { "flow_run_id": flow_run_id, "task_id": task_id, "map_index": -1 if map_index is None else map_index, } }, ): { "id": True, } } } result = self.graphql(mutation) # type: Any if result is None: raise ClientError("Failed to create task run.") task_run_id = result.data.get_or_create_task_run.id query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "version": True, "serialized_state": True, "task": {"slug": True}, } } } task_run = self.graphql(query).data.task_run_by_pk # type: ignore if task_run is None: raise ClientError('Task run ID not found: "{}"'.format(task_run_id)) state = prefect.engine.state.State.deserialize(task_run.serialized_state) return TaskRunInfoResult( id=task_run_id, task_id=task_id, task_slug=task_run.task.slug, version=task_run.version, state=state, ) def set_task_run_name(self, task_run_id: str, name: str) -> bool: """ Set the name of a task run Args: - task_run_id (str): the id of a task run - name (str): a name for this task run Returns: - bool: whether or not the task run name was updated """ mutation = { "mutation($input: set_task_run_name_input!)": { "set_task_run_name(input: $input)": { "success": True, } } } result = self.graphql( mutation, variables=dict(input=dict(task_run_id=task_run_id, name=name)) ) return result.data.set_task_run_name.success def get_task_run_state(self, task_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a task run. Args: - task_run_id (str): the id for this task run Returns: - State: a Prefect State object """ query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "serialized_state": True, } } } task_run = self.graphql(query).data.task_run_by_pk return prefect.engine.state.State.deserialize(task_run.serialized_state) def set_task_run_state( self, task_run_id: str, state: "prefect.engine.state.State", version: int = None, cache_for: datetime.timedelta = None, ) -> "prefect.engine.state.State": """ Sets new state for a task run. Args: - task_run_id (str): the id of the task run to set state for - state (State): the new state for this task run - version (int, optional): the current version of the task run state. This is optional but it can be supplied to enforce version-locking. - cache_for (timedelta, optional): how long to store the result of this task for, using the serializer set in config; if not provided, no caching occurs Raises: - ClientError: if the GraphQL mutation is bad for any reason Returns: - State: the state the current task run should be considered in """ mutation = { "mutation($input: set_task_run_states_input!)": { "set_task_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, task_run_id=task_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_task_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def set_secret(self, name: str, value: Any) -> None: """ Set a secret with the given name and value. Args: - name (str): the name of the secret; used for retrieving the secret during task runs - value (Any): the value of the secret Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the secret-setting was unsuccessful """ mutation = { "mutation($input: set_secret_input!)": { "set_secret(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(name=name, value=value)) ) # type: Any if not result.data.set_secret.success: raise ValueError("Setting secret failed.") def get_task_tag_limit(self, tag: str) -> Optional[int]: """ Retrieve the current task tag concurrency limit for a given tag. Args: - tag (str): the tag to update Raises: - ClientError: if the GraphQL query fails """ query = { "query": { with_args("task_tag_limit", {"where": {"tag": {"_eq": tag}}}): { "limit": True } } } result = self.graphql(query) # type: Any if result.data.task_tag_limit: return result.data.task_tag_limit[0].limit else: return None def update_task_tag_limit(self, tag: str, limit: int) -> None: """ Update the task tag concurrency limit for a given tag; requires tenant admin permissions. Args: - tag (str): the tag to update - limit (int): the concurrency limit to enforce on the tag; should be a value >= 0 Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag limit-setting was unsuccessful, or if a bad limit was provided """ if limit < 0: raise ValueError("Concurrency limits must be >= 0") mutation = { "mutation($input: update_task_tag_limit_input!)": { "update_task_tag_limit(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict(input=dict(tag=tag, limit=limit)) ) # type: Any if not result.data.update_task_tag_limit.id: raise ValueError("Updating the task tag concurrency limit failed.") def delete_task_tag_limit(self, limit_id: str) -> None: """ Deletes a given task tag concurrency limit; requires tenant admin permissions. Args: - limit_id (str): the ID of the tag to delete Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag deletion was unsuccessful, or if a bad tag ID was provided """ mutation = { "mutation($input: delete_task_tag_limit_input!)": { "delete_task_tag_limit(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(limit_id=limit_id)) ) # type: Any if not result.data.delete_task_tag_limit.success: raise ValueError("Deleting the task tag concurrency limit failed.") def write_run_logs(self, logs: List[Dict]) -> None: """ Uploads a collection of logs to Cloud. Args: - logs (List[Dict]): a list of log entries to add Raises: - ValueError: if uploading the logs fail """ mutation = { "mutation($input: write_run_logs_input!)": { "write_run_logs(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(logs=logs)) ) # type: Any if not result.data.write_run_logs.success: raise ValueError("Writing logs failed.") def register_agent( self, agent_type: str, name: str = None, labels: List[str] = None, agent_config_id: str = None, ) -> str: """ Register an agent with a backend API Args: - agent_type (str): The type of agent being registered - name: (str, optional): The name of the agent being registered - labels (List[str], optional): A list of any present labels on the agent being registered - agent_config_id (str, optional): The ID of an agent configuration to register with Returns: - The agent ID as a string """ mutation = { "mutation($input: register_agent_input!)": { "register_agent(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict( input=dict( type=agent_type, name=name, labels=labels or [], tenant_id=self._active_tenant_id, agent_config_id=agent_config_id, ) ), ) if not result.data.register_agent.id: raise ValueError("Error registering agent") return result.data.register_agent.id def get_agent_config(self, agent_config_id: str) -> dict: """ Get agent config settings Args: - agent_config_id (str): The ID of an agent configuration to retrieve Returns: - dict: the agent configuration's `settings` """ query = { "query": { with_args( "agent_config", {"where": {"id": {"_eq": agent_config_id}}} ): {"settings": True} } } result = self.graphql(query) # type: Any return result.data.agent_config[0].settings
get
Convenience function for calling the Prefect API with token auth and GET request Args: - path (str): the path of the API url. For example, to GET http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the GET request to; defaults to `self.api_server` - headers (dict, optional): Headers to pass with the request - params (dict): GET parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made
import datetime import json import os import re import time import uuid import warnings from pathlib import Path from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Union from urllib.parse import urljoin # if simplejson is installed, `requests` defaults to using it instead of json # this allows the client to gracefully handle either json or simplejson try: from simplejson.errors import JSONDecodeError except ImportError: from json.decoder import JSONDecodeError import pendulum import toml from slugify import slugify import prefect from prefect.utilities.exceptions import ( AuthorizationError, ClientError, VersionLockError, ) from prefect.utilities.graphql import ( EnumValue, GraphQLResult, compress, parse_graphql, with_args, ) from prefect.utilities.logging import create_diagnostic_logger if TYPE_CHECKING: from prefect.core import Flow import requests JSONLike = Union[bool, dict, list, str, int, float, None] # type definitions for GraphQL results TaskRunInfoResult = NamedTuple( "TaskRunInfoResult", [ ("id", str), ("task_id", str), ("task_slug", str), ("version", int), ("state", "prefect.engine.state.State"), ], ) FlowRunInfoResult = NamedTuple( "FlowRunInfoResult", [ ("id", str), ("name", str), ("flow_id", str), ("parameters", Dict[str, Any]), ("context", Dict[str, Any]), ("version", int), ("scheduled_start_time", datetime.datetime), ("state", "prefect.engine.state.State"), ("task_runs", List[TaskRunInfoResult]), ], ) class Client: """ Client for communication with Prefect Cloud If the arguments aren't specified the client initialization first checks the prefect configuration and if the server is not set there it checks the current context. The token will only be present in the current context. Args: - api_server (str, optional): the URL to send all GraphQL requests to; if not provided, will be pulled from `cloud.graphql` config var - api_token (str, optional): a Prefect Cloud API token, taken from `config.cloud.auth_token` if not provided. If this token is USER-scoped, it may be used to log in to any tenant that the user is a member of. In that case, ephemeral JWTs will be loaded as necessary. Otherwise, the API token itself will be used as authorization. """ def __init__(self, api_server: str = None, api_token: str = None): self._access_token = None self._refresh_token = None self._access_token_expires_at = pendulum.now() self._active_tenant_id = None self._attached_headers = {} # type: Dict[str, str] self.logger = create_diagnostic_logger("Diagnostics") # store api server self.api_server = api_server or prefect.context.config.cloud.get("graphql") # store api token self._api_token = api_token or prefect.context.config.cloud.get( "auth_token", None ) if prefect.config.backend == "cloud": if not self._api_token: # if no api token was passed, attempt to load state from local storage settings = self._load_local_settings() self._api_token = settings.get("api_token") if self._api_token: self._active_tenant_id = settings.get("active_tenant_id") if self._active_tenant_id: try: self.login_to_tenant(tenant_id=self._active_tenant_id) except AuthorizationError: # if an authorization error is raised, then the token is invalid and should # be cleared self.logout_from_tenant() else: # TODO: Separate put this functionality and clean up initial tenant access handling if not self._active_tenant_id: tenant_info = self.graphql({"query": {"tenant": {"id"}}}) if tenant_info.data.tenant: self._active_tenant_id = tenant_info.data.tenant[0].id def create_tenant(self, name: str, slug: str = None) -> str: """ Creates a new tenant. Note this route only works when run against Prefect Server. Args: - name (str): the name of the tenant to create - slug (str, optional): the slug of the tenant to create; defaults to name Returns: - str: the ID of the newly created tenant, or the ID of the currently active tenant Raises: - ValueError: if run against Prefect Cloud """ if prefect.config.backend != "server": msg = "To create a tenant with Prefect Cloud, please signup at https://cloud.prefect.io/" raise ValueError(msg) if slug is None: slug = slugify(name) tenant_info = self.graphql( { "mutation($input: create_tenant_input!)": { "create_tenant(input: $input)": {"id"} } }, variables=dict(input=dict(name=name, slug=slug)), ) return tenant_info.data.create_tenant.id # ------------------------------------------------------------------------- # Utilities # MASKED: get function (lines 164-202) def post( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and POST request Args: - path (str): the path of the API url. For example, to POST http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the POST request to; defaults to `self.api_server` - headers(dict): headers to pass with the request - params (dict): POST parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="POST", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def graphql( self, query: Any, raise_on_error: bool = True, headers: Dict[str, str] = None, variables: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> GraphQLResult: """ Convenience function for running queries against the Prefect GraphQL API Args: - query (Any): A representation of a graphql query to be executed. It will be parsed by prefect.utilities.graphql.parse_graphql(). - raise_on_error (bool): if True, a `ClientError` will be raised if the GraphQL returns any `errors`. - headers (dict): any additional headers that should be passed as part of the request - variables (dict): Variables to be filled into a query with the key being equivalent to the variables that are accepted by the query - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Data returned from the GraphQL query Raises: - ClientError if there are errors raised by the GraphQL mutation """ result = self.post( path="", server=self.api_server, headers=headers, params=dict(query=parse_graphql(query), variables=json.dumps(variables)), token=token, retry_on_api_error=retry_on_api_error, ) if raise_on_error and "errors" in result: if "UNAUTHENTICATED" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif "Malformed Authorization header" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif ( result["errors"][0].get("extensions", {}).get("code") == "VERSION_LOCKING_ERROR" ): raise VersionLockError(result["errors"]) raise ClientError(result["errors"]) else: return GraphQLResult(result) # type: ignore def _send_request( self, session: "requests.Session", method: str, url: str, params: Dict[str, JSONLike] = None, headers: dict = None, ) -> "requests.models.Response": if prefect.context.config.cloud.get("diagnostics") is True: self.logger.debug(f"Preparing request to {url}") clean_headers = { head: re.sub("Bearer .*", "Bearer XXXX", val) for head, val in headers.items() # type: ignore } self.logger.debug(f"Headers: {clean_headers}") self.logger.debug(f"Request: {params}") start_time = time.time() if method == "GET": response = session.get(url, headers=headers, params=params, timeout=30) elif method == "POST": response = session.post(url, headers=headers, json=params, timeout=30) elif method == "DELETE": response = session.delete(url, headers=headers, timeout=30) else: raise ValueError("Invalid method: {}".format(method)) if prefect.context.config.cloud.get("diagnostics") is True: end_time = time.time() self.logger.debug(f"Response: {response.json()}") self.logger.debug( f"Request duration: {round(end_time - start_time, 4)} seconds" ) # Check if request returned a successful status response.raise_for_status() return response def _request( self, method: str, path: str, params: Dict[str, JSONLike] = None, server: str = None, headers: dict = None, token: str = None, retry_on_api_error: bool = True, ) -> "requests.models.Response": """ Runs any specified request (GET, POST, DELETE) against the server Args: - method (str): The type of request to be made (GET, POST, DELETE) - path (str): Path of the API URL - params (dict, optional): Parameters used for the request - server (str, optional): The server to make requests against, base API server is used if not specified - headers (dict, optional): Headers to pass with the request - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - requests.models.Response: The response returned from the request Raises: - ClientError: if the client token is not in the context (due to not being logged in) - ValueError: if a method is specified outside of the accepted GET, POST, DELETE - requests.HTTPError: if a status code is returned that is not `200` or `401` """ if server is None: server = self.api_server assert isinstance(server, str) # mypy assert if token is None: token = self.get_auth_token() # 'import requests' is expensive time-wise, we should do this just-in-time to keep # the 'import prefect' time low import requests url = urljoin(server, path.lstrip("/")).rstrip("/") params = params or {} headers = headers or {} if token: headers["Authorization"] = "Bearer {}".format(token) headers["X-PREFECT-CORE-VERSION"] = str(prefect.__version__) if self._attached_headers: headers.update(self._attached_headers) session = requests.Session() retry_total = 6 if prefect.config.backend == "cloud" else 1 retries = requests.packages.urllib3.util.retry.Retry( total=retry_total, backoff_factor=1, status_forcelist=[500, 502, 503, 504], method_whitelist=["DELETE", "GET", "POST"], ) session.mount("https://", requests.adapters.HTTPAdapter(max_retries=retries)) response = self._send_request( session=session, method=method, url=url, params=params, headers=headers ) # parse the response try: json_resp = response.json() except JSONDecodeError as exc: if prefect.config.backend == "cloud" and "Authorization" not in headers: raise ClientError( "Malformed response received from Cloud - please ensure that you " "have an API token properly configured." ) from exc else: raise ClientError("Malformed response received from API.") from exc # check if there was an API_ERROR code in the response if "API_ERROR" in str(json_resp.get("errors")) and retry_on_api_error: success, retry_count = False, 0 # retry up to six times while success is False and retry_count < 6: response = self._send_request( session=session, method=method, url=url, params=params, headers=headers, ) if "API_ERROR" in str(response.json().get("errors")): retry_count += 1 time.sleep(0.25 * (2 ** (retry_count - 1))) else: success = True return response def attach_headers(self, headers: dict) -> None: """ Set headers to be attached to this Client Args: - headers (dict): A dictionary of headers to attach to this client. These headers get added on to the existing dictionary of headers. """ self._attached_headers.update(headers) # ------------------------------------------------------------------------- # Auth # ------------------------------------------------------------------------- @property def _local_settings_path(self) -> Path: """ Returns the local settings directory corresponding to the current API servers """ path = "{home}/client/{server}".format( home=prefect.context.config.home_dir, server=slugify(self.api_server, regex_pattern=r"[^-\.a-z0-9]+"), ) return Path(os.path.expanduser(path)) / "settings.toml" def _save_local_settings(self, settings: dict) -> None: """ Writes settings to local storage """ self._local_settings_path.parent.mkdir(exist_ok=True, parents=True) with self._local_settings_path.open("w+") as f: toml.dump(settings, f) def _load_local_settings(self) -> dict: """ Loads settings from local storage """ if self._local_settings_path.exists(): with self._local_settings_path.open("r") as f: return toml.load(f) # type: ignore return {} def save_api_token(self) -> None: """ Saves the API token in local storage. """ settings = self._load_local_settings() settings["api_token"] = self._api_token self._save_local_settings(settings) def get_auth_token(self) -> str: """ Returns an auth token: - if no explicit access token is stored, returns the api token - if there is an access token: - if there's a refresh token and the access token expires in the next 30 seconds, then we refresh the access token and store the result - return the access token Returns: - str: the access token """ if not self._access_token: return self._api_token expiration = self._access_token_expires_at or pendulum.now() if self._refresh_token and pendulum.now().add(seconds=30) > expiration: self._refresh_access_token() return self._access_token def get_available_tenants(self) -> List[Dict]: """ Returns a list of available tenants. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - List[Dict]: a list of dictionaries containing the id, slug, and name of available tenants """ result = self.graphql( {"query": {"tenant(order_by: {slug: asc})": {"id", "slug", "name"}}}, # use the API token to see all available tenants token=self._api_token, ) # type: ignore return result.data.tenant # type: ignore def login_to_tenant(self, tenant_slug: str = None, tenant_id: str = None) -> bool: """ Log in to a specific tenant NOTE: this should only be called by users who have provided a USER-scoped API token. Args: - tenant_slug (str): the tenant's slug - tenant_id (str): the tenant's id Returns: - bool: True if the login was successful Raises: - ValueError: if at least one of `tenant_slug` or `tenant_id` isn't provided - ValueError: if the `tenant_id` is not a valid UUID - ValueError: if no matching tenants are found """ if tenant_slug is None and tenant_id is None: raise ValueError( "At least one of `tenant_slug` or `tenant_id` must be provided." ) elif tenant_id: try: uuid.UUID(tenant_id) except ValueError as exc: raise ValueError("The `tenant_id` must be a valid UUID.") from exc tenant = self.graphql( { "query($slug: String, $id: uuid)": { "tenant(where: {slug: { _eq: $slug }, id: { _eq: $id } })": {"id"} } }, variables=dict(slug=tenant_slug, id=tenant_id), # use the API token to query the tenant token=self._api_token, ) # type: ignore if not tenant.data.tenant: # type: ignore raise ValueError("No matching tenants found.") tenant_id = tenant.data.tenant[0].id # type: ignore if prefect.config.backend == "cloud": payload = self.graphql( { "mutation($input: switch_tenant_input!)": { "switch_tenant(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(tenant_id=tenant_id)), # Use the API token to switch tenants token=self._api_token, ) # type: ignore self._access_token = payload.data.switch_tenant.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.switch_tenant.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.switch_tenant.refresh_token # type: ignore self._active_tenant_id = tenant_id # save the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = self._active_tenant_id self._save_local_settings(settings) return True def logout_from_tenant(self) -> None: self._access_token = None self._refresh_token = None self._active_tenant_id = None # remove the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = None self._save_local_settings(settings) def _refresh_access_token(self) -> bool: """ Refresh the client's JWT access token. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - bool: True if the refresh succeeds """ payload = self.graphql( { "mutation($input: refresh_token_input!)": { "refresh_token(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(access_token=self._access_token)), # pass the refresh token as the auth header token=self._refresh_token, ) # type: ignore self._access_token = payload.data.refresh_token.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.refresh_token.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.refresh_token.refresh_token # type: ignore return True # ------------------------------------------------------------------------- # Actions # ------------------------------------------------------------------------- def register( self, flow: "Flow", project_name: str = None, build: bool = True, set_schedule_active: bool = True, version_group_id: str = None, compressed: bool = True, no_url: bool = False, ) -> str: """ Push a new flow to Prefect Cloud Args: - flow (Flow): a flow to register - project_name (str, optional): the project that should contain this flow. - build (bool, optional): if `True`, the flow's environment is built prior to serialization; defaults to `True` - set_schedule_active (bool, optional): if `False`, will set the schedule to inactive in the database to prevent auto-scheduling runs (if the Flow has a schedule). Defaults to `True`. This can be changed later. - version_group_id (str, optional): the UUID version group ID to use for versioning this Flow in Cloud; if not provided, the version group ID associated with this Flow's project and name will be used. - compressed (bool, optional): if `True`, the serialized flow will be; defaults to `True` compressed - no_url (bool, optional): if `True`, the stdout from this function will not contain the URL link to the newly-registered flow in the Cloud UI Returns: - str: the ID of the newly-registered flow Raises: - ClientError: if the register failed """ required_parameters = {p for p in flow.parameters() if p.required} if flow.schedule is not None and required_parameters: required_names = {p.name for p in required_parameters} if not all( [ required_names <= set(c.parameter_defaults.keys()) for c in flow.schedule.clocks ] ): raise ClientError( "Flows with required parameters can not be scheduled automatically." ) if any(e.key for e in flow.edges) and flow.result is None: warnings.warn( "No result handler was specified on your Flow. Cloud features such as " "input caching and resuming task runs from failure may not work properly.", stacklevel=2, ) if compressed: create_mutation = { "mutation($input: create_flow_from_compressed_string_input!)": { "create_flow_from_compressed_string(input: $input)": {"id"} } } else: create_mutation = { "mutation($input: create_flow_input!)": { "create_flow(input: $input)": {"id"} } } project = None if project_name is None: raise TypeError( "'project_name' is a required field when registering a flow." ) query_project = { "query": { with_args("project", {"where": {"name": {"_eq": project_name}}}): { "id": True } } } project = self.graphql(query_project).data.project # type: ignore if not project: raise ValueError( "Project {} not found. Run `prefect create project '{}'` to create it.".format( project_name, project_name ) ) serialized_flow = flow.serialize(build=build) # type: Any # Set Docker storage image in environment metadata if provided if isinstance(flow.storage, prefect.environments.storage.Docker): flow.environment.metadata["image"] = flow.storage.name serialized_flow = flow.serialize(build=False) # If no image ever set, default metadata to all_extras image on current version if not flow.environment.metadata.get("image"): version = prefect.__version__.split("+")[0] flow.environment.metadata[ "image" ] = f"prefecthq/prefect:all_extras-{version}" serialized_flow = flow.serialize(build=False) # verify that the serialized flow can be deserialized try: prefect.serialization.flow.FlowSchema().load(serialized_flow) except Exception as exc: raise ValueError( "Flow could not be deserialized successfully. Error was: {}".format( repr(exc) ) ) from exc if compressed: serialized_flow = compress(serialized_flow) res = self.graphql( create_mutation, variables=dict( input=dict( project_id=(project[0].id if project else None), serialized_flow=serialized_flow, set_schedule_active=set_schedule_active, version_group_id=version_group_id, ) ), retry_on_api_error=False, ) # type: Any flow_id = ( res.data.create_flow_from_compressed_string.id if compressed else res.data.create_flow.id ) if not no_url: # Generate direct link to Cloud flow flow_url = self.get_cloud_url("flow", flow_id) prefix = "└── " print("Flow URL: {}".format(flow_url)) # Extra information to improve visibility msg = ( f" {prefix}ID: {flow_id}\n" f" {prefix}Project: {project_name}\n" f" {prefix}Labels: {list(flow.environment.labels)}" ) print(msg) return flow_id def get_cloud_url(self, subdirectory: str, id: str, as_user: bool = True) -> str: """ Convenience method for creating Prefect Cloud URLs for a given subdirectory. Args: - subdirectory (str): the subdirectory to use (e.g., `"flow-run"`) - id (str): the ID of the page - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the URL corresponding to the appropriate base URL, tenant slug, subdirectory and ID Example: ```python from prefect import Client client = Client() client.get_cloud_url("flow-run", "424242-ca-94611-111-55") # returns "https://cloud.prefect.io/my-tenant-slug/flow-run/424242-ca-94611-111-55" ``` """ # Generate direct link to UI if prefect.config.backend == "cloud": tenant_slug = self.get_default_tenant_slug(as_user=as_user) else: tenant_slug = "" base_url = ( re.sub("api-", "", prefect.config.cloud.api) if re.search("api-", prefect.config.cloud.api) else re.sub("api", "cloud", prefect.config.cloud.api) ) full_url = prefect.config.cloud.api if tenant_slug: full_url = "/".join([base_url.rstrip("/"), tenant_slug, subdirectory, id]) elif prefect.config.backend == "server": full_url = "/".join([prefect.config.server.ui.endpoint, subdirectory, id]) return full_url def get_default_tenant_slug(self, as_user: bool = True) -> str: """ Get the default tenant slug for the currently authenticated user Args: - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the slug of the current default tenant for this user """ if as_user: query = { "query": {"user": {"default_membership": {"tenant": "slug"}}} } # type: dict else: query = {"query": {"tenant": {"slug"}}} res = self.graphql(query) if as_user: user = res.get("data").user[0] slug = user.default_membership.tenant.slug else: slug = res.get("data").tenant[0].slug return slug def create_project(self, project_name: str, project_description: str = None) -> str: """ Create a new Project Args: - project_name (str): the project that should contain this flow - project_description (str, optional): the project description Returns: - str: the ID of the newly-created project Raises: - ClientError: if the project creation failed """ project_mutation = { "mutation($input: create_project_input!)": { "create_project(input: $input)": {"id"} } } res = self.graphql( project_mutation, variables=dict( input=dict( name=project_name, description=project_description, tenant_id=self._active_tenant_id, ) ), ) # type: Any return res.data.create_project.id def create_flow_run( self, flow_id: str = None, context: dict = None, parameters: dict = None, scheduled_start_time: datetime.datetime = None, idempotency_key: str = None, run_name: str = None, version_group_id: str = None, ) -> str: """ Create a new flow run for the given flow id. If `start_time` is not provided, the flow run will be scheduled to start immediately. If both `flow_id` and `version_group_id` are provided, only the `flow_id` will be used. Args: - flow_id (str, optional): the id of the Flow you wish to schedule - context (dict, optional): the run context - parameters (dict, optional): a dictionary of parameter values to pass to the flow run - scheduled_start_time (datetime, optional): the time to schedule the execution for; if not provided, defaults to now - idempotency_key (str, optional): an idempotency key; if provided, this run will be cached for 24 hours. Any subsequent attempts to create a run with the same idempotency key will return the ID of the originally created run (no new run will be created after the first). An error will be raised if parameters or context are provided and don't match the original. Each subsequent request will reset the TTL for 24 hours. - run_name (str, optional): The name assigned to this flow run - version_group_id (str, optional): if provided, the unique unarchived flow within this version group will be scheduled to run. This input can be used as a stable API for running flows which are regularly updated. Returns: - str: the ID of the newly-created flow run Raises: - ClientError: if the GraphQL query is bad for any reason """ create_mutation = { "mutation($input: create_flow_run_input!)": { "create_flow_run(input: $input)": {"id": True} } } if not flow_id and not version_group_id: raise ValueError("One of flow_id or version_group_id must be provided") if flow_id: inputs = dict(flow_id=flow_id) else: inputs = dict(version_group_id=version_group_id) # type: ignore if parameters is not None: inputs.update(parameters=parameters) # type: ignore if context is not None: inputs.update(context=context) # type: ignore if idempotency_key is not None: inputs.update(idempotency_key=idempotency_key) # type: ignore if scheduled_start_time is not None: inputs.update( scheduled_start_time=scheduled_start_time.isoformat() ) # type: ignore if run_name is not None: inputs.update(flow_run_name=run_name) # type: ignore res = self.graphql(create_mutation, variables=dict(input=inputs)) return res.data.create_flow_run.id # type: ignore def get_flow_run_info(self, flow_run_id: str) -> FlowRunInfoResult: """ Retrieves version and current state information for the given flow run. Args: - flow_run_id (str): the id of the flow run to get information for Returns: - GraphQLResult: an object representing information about the flow run Raises: - ClientError: if the GraphQL mutation is bad for any reason """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "id": True, "name": True, "flow_id": True, "parameters": True, "context": True, "version": True, "scheduled_start_time": True, "serialized_state": True, # load all task runs except dynamic task runs with_args("task_runs", {"where": {"map_index": {"_eq": -1}}}): { "id": True, "task": {"id": True, "slug": True}, "version": True, "serialized_state": True, }, } } } result = self.graphql(query).data.flow_run_by_pk # type: ignore if result is None: raise ClientError('Flow run ID not found: "{}"'.format(flow_run_id)) # convert scheduled_start_time from string to datetime result.scheduled_start_time = pendulum.parse(result.scheduled_start_time) # create "state" attribute from serialized_state result.state = prefect.engine.state.State.deserialize( result.pop("serialized_state") ) # reformat task_runs task_runs = [] for tr in result.task_runs: tr.state = prefect.engine.state.State.deserialize( tr.pop("serialized_state") ) task_info = tr.pop("task") tr.task_id = task_info["id"] tr.task_slug = task_info["slug"] task_runs.append(TaskRunInfoResult(**tr)) result.task_runs = task_runs result.context = ( result.context.to_dict() if result.context is not None else None ) result.parameters = ( result.parameters.to_dict() if result.parameters is not None else None ) return FlowRunInfoResult(**result) def update_flow_run_heartbeat(self, flow_run_id: str) -> None: """ Convenience method for heartbeating a flow run. Does NOT raise an error if the update fails. Args: - flow_run_id (str): the flow run ID to heartbeat """ mutation = { "mutation": { with_args( "update_flow_run_heartbeat", {"input": {"flow_run_id": flow_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def update_task_run_heartbeat(self, task_run_id: str) -> None: """ Convenience method for heartbeating a task run. Does NOT raise an error if the update fails. Args: - task_run_id (str): the task run ID to heartbeat """ mutation = { "mutation": { with_args( "update_task_run_heartbeat", {"input": {"task_run_id": task_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def get_flow_run_state(self, flow_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a flow run. Args: - flow_run_id (str): the id for this flow run Returns: - State: a Prefect State object """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "serialized_state": True, } } } flow_run = self.graphql(query).data.flow_run_by_pk return prefect.engine.state.State.deserialize(flow_run.serialized_state) def set_flow_run_state( self, flow_run_id: str, state: "prefect.engine.state.State", version: int = None, ) -> "prefect.engine.state.State": """ Sets new state for a flow run in the database. Args: - flow_run_id (str): the id of the flow run to set state for - state (State): the new state for this flow run - version (int, optional): the current version of the flow run state. This is optional but it can be supplied to enforce version-locking. Returns: - State: the state the current flow run should be considered in Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation($input: set_flow_run_states_input!)": { "set_flow_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, flow_run_id=flow_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_flow_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def get_latest_cached_states( self, task_id: str, cache_key: Optional[str], created_after: datetime.datetime ) -> List["prefect.engine.state.State"]: """ Pulls all Cached states for the given task that were created after the provided date. Args: - task_id (str): the task id for this task run - cache_key (Optional[str]): the cache key for this Task's cache; if `None`, the task id alone will be used - created_after (datetime.datetime): the earliest date the state should have been created at Returns: - List[State]: a list of Cached states created after the given date """ args = { "where": { "state": {"_eq": "Cached"}, "state_timestamp": {"_gte": created_after.isoformat()}, }, "order_by": {"state_timestamp": EnumValue("desc")}, "limit": 100, } # type: Dict[str, Any] # if a cache key was provided, match it against all tasks if cache_key is not None: args["where"].update({"cache_key": {"_eq": cache_key}}) # otherwise match against only this task, across all cache keys else: args["where"].update({"task_id": {"_eq": task_id}}) query = {"query": {with_args("task_run", args): "serialized_state"}} result = self.graphql(query) # type: Any deserializer = prefect.engine.state.State.deserialize valid_states = [ deserializer(res.serialized_state) for res in result.data.task_run ] return valid_states def get_task_run_info( self, flow_run_id: str, task_id: str, map_index: Optional[int] = None ) -> TaskRunInfoResult: """ Retrieves version and current state information for the given task run. Args: - flow_run_id (str): the id of the flow run that this task run lives in - task_id (str): the task id for this task run - map_index (int, optional): the mapping index for this task run; if `None`, it is assumed this task is _not_ mapped Returns: - NamedTuple: a tuple containing `id, task_id, version, state` Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation": { with_args( "get_or_create_task_run", { "input": { "flow_run_id": flow_run_id, "task_id": task_id, "map_index": -1 if map_index is None else map_index, } }, ): { "id": True, } } } result = self.graphql(mutation) # type: Any if result is None: raise ClientError("Failed to create task run.") task_run_id = result.data.get_or_create_task_run.id query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "version": True, "serialized_state": True, "task": {"slug": True}, } } } task_run = self.graphql(query).data.task_run_by_pk # type: ignore if task_run is None: raise ClientError('Task run ID not found: "{}"'.format(task_run_id)) state = prefect.engine.state.State.deserialize(task_run.serialized_state) return TaskRunInfoResult( id=task_run_id, task_id=task_id, task_slug=task_run.task.slug, version=task_run.version, state=state, ) def set_task_run_name(self, task_run_id: str, name: str) -> bool: """ Set the name of a task run Args: - task_run_id (str): the id of a task run - name (str): a name for this task run Returns: - bool: whether or not the task run name was updated """ mutation = { "mutation($input: set_task_run_name_input!)": { "set_task_run_name(input: $input)": { "success": True, } } } result = self.graphql( mutation, variables=dict(input=dict(task_run_id=task_run_id, name=name)) ) return result.data.set_task_run_name.success def get_task_run_state(self, task_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a task run. Args: - task_run_id (str): the id for this task run Returns: - State: a Prefect State object """ query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "serialized_state": True, } } } task_run = self.graphql(query).data.task_run_by_pk return prefect.engine.state.State.deserialize(task_run.serialized_state) def set_task_run_state( self, task_run_id: str, state: "prefect.engine.state.State", version: int = None, cache_for: datetime.timedelta = None, ) -> "prefect.engine.state.State": """ Sets new state for a task run. Args: - task_run_id (str): the id of the task run to set state for - state (State): the new state for this task run - version (int, optional): the current version of the task run state. This is optional but it can be supplied to enforce version-locking. - cache_for (timedelta, optional): how long to store the result of this task for, using the serializer set in config; if not provided, no caching occurs Raises: - ClientError: if the GraphQL mutation is bad for any reason Returns: - State: the state the current task run should be considered in """ mutation = { "mutation($input: set_task_run_states_input!)": { "set_task_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, task_run_id=task_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_task_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def set_secret(self, name: str, value: Any) -> None: """ Set a secret with the given name and value. Args: - name (str): the name of the secret; used for retrieving the secret during task runs - value (Any): the value of the secret Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the secret-setting was unsuccessful """ mutation = { "mutation($input: set_secret_input!)": { "set_secret(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(name=name, value=value)) ) # type: Any if not result.data.set_secret.success: raise ValueError("Setting secret failed.") def get_task_tag_limit(self, tag: str) -> Optional[int]: """ Retrieve the current task tag concurrency limit for a given tag. Args: - tag (str): the tag to update Raises: - ClientError: if the GraphQL query fails """ query = { "query": { with_args("task_tag_limit", {"where": {"tag": {"_eq": tag}}}): { "limit": True } } } result = self.graphql(query) # type: Any if result.data.task_tag_limit: return result.data.task_tag_limit[0].limit else: return None def update_task_tag_limit(self, tag: str, limit: int) -> None: """ Update the task tag concurrency limit for a given tag; requires tenant admin permissions. Args: - tag (str): the tag to update - limit (int): the concurrency limit to enforce on the tag; should be a value >= 0 Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag limit-setting was unsuccessful, or if a bad limit was provided """ if limit < 0: raise ValueError("Concurrency limits must be >= 0") mutation = { "mutation($input: update_task_tag_limit_input!)": { "update_task_tag_limit(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict(input=dict(tag=tag, limit=limit)) ) # type: Any if not result.data.update_task_tag_limit.id: raise ValueError("Updating the task tag concurrency limit failed.") def delete_task_tag_limit(self, limit_id: str) -> None: """ Deletes a given task tag concurrency limit; requires tenant admin permissions. Args: - limit_id (str): the ID of the tag to delete Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag deletion was unsuccessful, or if a bad tag ID was provided """ mutation = { "mutation($input: delete_task_tag_limit_input!)": { "delete_task_tag_limit(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(limit_id=limit_id)) ) # type: Any if not result.data.delete_task_tag_limit.success: raise ValueError("Deleting the task tag concurrency limit failed.") def write_run_logs(self, logs: List[Dict]) -> None: """ Uploads a collection of logs to Cloud. Args: - logs (List[Dict]): a list of log entries to add Raises: - ValueError: if uploading the logs fail """ mutation = { "mutation($input: write_run_logs_input!)": { "write_run_logs(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(logs=logs)) ) # type: Any if not result.data.write_run_logs.success: raise ValueError("Writing logs failed.") def register_agent( self, agent_type: str, name: str = None, labels: List[str] = None, agent_config_id: str = None, ) -> str: """ Register an agent with a backend API Args: - agent_type (str): The type of agent being registered - name: (str, optional): The name of the agent being registered - labels (List[str], optional): A list of any present labels on the agent being registered - agent_config_id (str, optional): The ID of an agent configuration to register with Returns: - The agent ID as a string """ mutation = { "mutation($input: register_agent_input!)": { "register_agent(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict( input=dict( type=agent_type, name=name, labels=labels or [], tenant_id=self._active_tenant_id, agent_config_id=agent_config_id, ) ), ) if not result.data.register_agent.id: raise ValueError("Error registering agent") return result.data.register_agent.id def get_agent_config(self, agent_config_id: str) -> dict: """ Get agent config settings Args: - agent_config_id (str): The ID of an agent configuration to retrieve Returns: - dict: the agent configuration's `settings` """ query = { "query": { with_args( "agent_config", {"where": {"id": {"_eq": agent_config_id}}} ): {"settings": True} } } result = self.graphql(query) # type: Any return result.data.agent_config[0].settings
def get( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and GET request Args: - path (str): the path of the API url. For example, to GET http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the GET request to; defaults to `self.api_server` - headers (dict, optional): Headers to pass with the request - params (dict): GET parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="GET", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {}
164
202
import datetime import json import os import re import time import uuid import warnings from pathlib import Path from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Union from urllib.parse import urljoin # if simplejson is installed, `requests` defaults to using it instead of json # this allows the client to gracefully handle either json or simplejson try: from simplejson.errors import JSONDecodeError except ImportError: from json.decoder import JSONDecodeError import pendulum import toml from slugify import slugify import prefect from prefect.utilities.exceptions import ( AuthorizationError, ClientError, VersionLockError, ) from prefect.utilities.graphql import ( EnumValue, GraphQLResult, compress, parse_graphql, with_args, ) from prefect.utilities.logging import create_diagnostic_logger if TYPE_CHECKING: from prefect.core import Flow import requests JSONLike = Union[bool, dict, list, str, int, float, None] # type definitions for GraphQL results TaskRunInfoResult = NamedTuple( "TaskRunInfoResult", [ ("id", str), ("task_id", str), ("task_slug", str), ("version", int), ("state", "prefect.engine.state.State"), ], ) FlowRunInfoResult = NamedTuple( "FlowRunInfoResult", [ ("id", str), ("name", str), ("flow_id", str), ("parameters", Dict[str, Any]), ("context", Dict[str, Any]), ("version", int), ("scheduled_start_time", datetime.datetime), ("state", "prefect.engine.state.State"), ("task_runs", List[TaskRunInfoResult]), ], ) class Client: """ Client for communication with Prefect Cloud If the arguments aren't specified the client initialization first checks the prefect configuration and if the server is not set there it checks the current context. The token will only be present in the current context. Args: - api_server (str, optional): the URL to send all GraphQL requests to; if not provided, will be pulled from `cloud.graphql` config var - api_token (str, optional): a Prefect Cloud API token, taken from `config.cloud.auth_token` if not provided. If this token is USER-scoped, it may be used to log in to any tenant that the user is a member of. In that case, ephemeral JWTs will be loaded as necessary. Otherwise, the API token itself will be used as authorization. """ def __init__(self, api_server: str = None, api_token: str = None): self._access_token = None self._refresh_token = None self._access_token_expires_at = pendulum.now() self._active_tenant_id = None self._attached_headers = {} # type: Dict[str, str] self.logger = create_diagnostic_logger("Diagnostics") # store api server self.api_server = api_server or prefect.context.config.cloud.get("graphql") # store api token self._api_token = api_token or prefect.context.config.cloud.get( "auth_token", None ) if prefect.config.backend == "cloud": if not self._api_token: # if no api token was passed, attempt to load state from local storage settings = self._load_local_settings() self._api_token = settings.get("api_token") if self._api_token: self._active_tenant_id = settings.get("active_tenant_id") if self._active_tenant_id: try: self.login_to_tenant(tenant_id=self._active_tenant_id) except AuthorizationError: # if an authorization error is raised, then the token is invalid and should # be cleared self.logout_from_tenant() else: # TODO: Separate put this functionality and clean up initial tenant access handling if not self._active_tenant_id: tenant_info = self.graphql({"query": {"tenant": {"id"}}}) if tenant_info.data.tenant: self._active_tenant_id = tenant_info.data.tenant[0].id def create_tenant(self, name: str, slug: str = None) -> str: """ Creates a new tenant. Note this route only works when run against Prefect Server. Args: - name (str): the name of the tenant to create - slug (str, optional): the slug of the tenant to create; defaults to name Returns: - str: the ID of the newly created tenant, or the ID of the currently active tenant Raises: - ValueError: if run against Prefect Cloud """ if prefect.config.backend != "server": msg = "To create a tenant with Prefect Cloud, please signup at https://cloud.prefect.io/" raise ValueError(msg) if slug is None: slug = slugify(name) tenant_info = self.graphql( { "mutation($input: create_tenant_input!)": { "create_tenant(input: $input)": {"id"} } }, variables=dict(input=dict(name=name, slug=slug)), ) return tenant_info.data.create_tenant.id # ------------------------------------------------------------------------- # Utilities def get( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and GET request Args: - path (str): the path of the API url. For example, to GET http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the GET request to; defaults to `self.api_server` - headers (dict, optional): Headers to pass with the request - params (dict): GET parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="GET", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def post( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and POST request Args: - path (str): the path of the API url. For example, to POST http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the POST request to; defaults to `self.api_server` - headers(dict): headers to pass with the request - params (dict): POST parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="POST", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def graphql( self, query: Any, raise_on_error: bool = True, headers: Dict[str, str] = None, variables: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> GraphQLResult: """ Convenience function for running queries against the Prefect GraphQL API Args: - query (Any): A representation of a graphql query to be executed. It will be parsed by prefect.utilities.graphql.parse_graphql(). - raise_on_error (bool): if True, a `ClientError` will be raised if the GraphQL returns any `errors`. - headers (dict): any additional headers that should be passed as part of the request - variables (dict): Variables to be filled into a query with the key being equivalent to the variables that are accepted by the query - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Data returned from the GraphQL query Raises: - ClientError if there are errors raised by the GraphQL mutation """ result = self.post( path="", server=self.api_server, headers=headers, params=dict(query=parse_graphql(query), variables=json.dumps(variables)), token=token, retry_on_api_error=retry_on_api_error, ) if raise_on_error and "errors" in result: if "UNAUTHENTICATED" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif "Malformed Authorization header" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif ( result["errors"][0].get("extensions", {}).get("code") == "VERSION_LOCKING_ERROR" ): raise VersionLockError(result["errors"]) raise ClientError(result["errors"]) else: return GraphQLResult(result) # type: ignore def _send_request( self, session: "requests.Session", method: str, url: str, params: Dict[str, JSONLike] = None, headers: dict = None, ) -> "requests.models.Response": if prefect.context.config.cloud.get("diagnostics") is True: self.logger.debug(f"Preparing request to {url}") clean_headers = { head: re.sub("Bearer .*", "Bearer XXXX", val) for head, val in headers.items() # type: ignore } self.logger.debug(f"Headers: {clean_headers}") self.logger.debug(f"Request: {params}") start_time = time.time() if method == "GET": response = session.get(url, headers=headers, params=params, timeout=30) elif method == "POST": response = session.post(url, headers=headers, json=params, timeout=30) elif method == "DELETE": response = session.delete(url, headers=headers, timeout=30) else: raise ValueError("Invalid method: {}".format(method)) if prefect.context.config.cloud.get("diagnostics") is True: end_time = time.time() self.logger.debug(f"Response: {response.json()}") self.logger.debug( f"Request duration: {round(end_time - start_time, 4)} seconds" ) # Check if request returned a successful status response.raise_for_status() return response def _request( self, method: str, path: str, params: Dict[str, JSONLike] = None, server: str = None, headers: dict = None, token: str = None, retry_on_api_error: bool = True, ) -> "requests.models.Response": """ Runs any specified request (GET, POST, DELETE) against the server Args: - method (str): The type of request to be made (GET, POST, DELETE) - path (str): Path of the API URL - params (dict, optional): Parameters used for the request - server (str, optional): The server to make requests against, base API server is used if not specified - headers (dict, optional): Headers to pass with the request - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - requests.models.Response: The response returned from the request Raises: - ClientError: if the client token is not in the context (due to not being logged in) - ValueError: if a method is specified outside of the accepted GET, POST, DELETE - requests.HTTPError: if a status code is returned that is not `200` or `401` """ if server is None: server = self.api_server assert isinstance(server, str) # mypy assert if token is None: token = self.get_auth_token() # 'import requests' is expensive time-wise, we should do this just-in-time to keep # the 'import prefect' time low import requests url = urljoin(server, path.lstrip("/")).rstrip("/") params = params or {} headers = headers or {} if token: headers["Authorization"] = "Bearer {}".format(token) headers["X-PREFECT-CORE-VERSION"] = str(prefect.__version__) if self._attached_headers: headers.update(self._attached_headers) session = requests.Session() retry_total = 6 if prefect.config.backend == "cloud" else 1 retries = requests.packages.urllib3.util.retry.Retry( total=retry_total, backoff_factor=1, status_forcelist=[500, 502, 503, 504], method_whitelist=["DELETE", "GET", "POST"], ) session.mount("https://", requests.adapters.HTTPAdapter(max_retries=retries)) response = self._send_request( session=session, method=method, url=url, params=params, headers=headers ) # parse the response try: json_resp = response.json() except JSONDecodeError as exc: if prefect.config.backend == "cloud" and "Authorization" not in headers: raise ClientError( "Malformed response received from Cloud - please ensure that you " "have an API token properly configured." ) from exc else: raise ClientError("Malformed response received from API.") from exc # check if there was an API_ERROR code in the response if "API_ERROR" in str(json_resp.get("errors")) and retry_on_api_error: success, retry_count = False, 0 # retry up to six times while success is False and retry_count < 6: response = self._send_request( session=session, method=method, url=url, params=params, headers=headers, ) if "API_ERROR" in str(response.json().get("errors")): retry_count += 1 time.sleep(0.25 * (2 ** (retry_count - 1))) else: success = True return response def attach_headers(self, headers: dict) -> None: """ Set headers to be attached to this Client Args: - headers (dict): A dictionary of headers to attach to this client. These headers get added on to the existing dictionary of headers. """ self._attached_headers.update(headers) # ------------------------------------------------------------------------- # Auth # ------------------------------------------------------------------------- @property def _local_settings_path(self) -> Path: """ Returns the local settings directory corresponding to the current API servers """ path = "{home}/client/{server}".format( home=prefect.context.config.home_dir, server=slugify(self.api_server, regex_pattern=r"[^-\.a-z0-9]+"), ) return Path(os.path.expanduser(path)) / "settings.toml" def _save_local_settings(self, settings: dict) -> None: """ Writes settings to local storage """ self._local_settings_path.parent.mkdir(exist_ok=True, parents=True) with self._local_settings_path.open("w+") as f: toml.dump(settings, f) def _load_local_settings(self) -> dict: """ Loads settings from local storage """ if self._local_settings_path.exists(): with self._local_settings_path.open("r") as f: return toml.load(f) # type: ignore return {} def save_api_token(self) -> None: """ Saves the API token in local storage. """ settings = self._load_local_settings() settings["api_token"] = self._api_token self._save_local_settings(settings) def get_auth_token(self) -> str: """ Returns an auth token: - if no explicit access token is stored, returns the api token - if there is an access token: - if there's a refresh token and the access token expires in the next 30 seconds, then we refresh the access token and store the result - return the access token Returns: - str: the access token """ if not self._access_token: return self._api_token expiration = self._access_token_expires_at or pendulum.now() if self._refresh_token and pendulum.now().add(seconds=30) > expiration: self._refresh_access_token() return self._access_token def get_available_tenants(self) -> List[Dict]: """ Returns a list of available tenants. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - List[Dict]: a list of dictionaries containing the id, slug, and name of available tenants """ result = self.graphql( {"query": {"tenant(order_by: {slug: asc})": {"id", "slug", "name"}}}, # use the API token to see all available tenants token=self._api_token, ) # type: ignore return result.data.tenant # type: ignore def login_to_tenant(self, tenant_slug: str = None, tenant_id: str = None) -> bool: """ Log in to a specific tenant NOTE: this should only be called by users who have provided a USER-scoped API token. Args: - tenant_slug (str): the tenant's slug - tenant_id (str): the tenant's id Returns: - bool: True if the login was successful Raises: - ValueError: if at least one of `tenant_slug` or `tenant_id` isn't provided - ValueError: if the `tenant_id` is not a valid UUID - ValueError: if no matching tenants are found """ if tenant_slug is None and tenant_id is None: raise ValueError( "At least one of `tenant_slug` or `tenant_id` must be provided." ) elif tenant_id: try: uuid.UUID(tenant_id) except ValueError as exc: raise ValueError("The `tenant_id` must be a valid UUID.") from exc tenant = self.graphql( { "query($slug: String, $id: uuid)": { "tenant(where: {slug: { _eq: $slug }, id: { _eq: $id } })": {"id"} } }, variables=dict(slug=tenant_slug, id=tenant_id), # use the API token to query the tenant token=self._api_token, ) # type: ignore if not tenant.data.tenant: # type: ignore raise ValueError("No matching tenants found.") tenant_id = tenant.data.tenant[0].id # type: ignore if prefect.config.backend == "cloud": payload = self.graphql( { "mutation($input: switch_tenant_input!)": { "switch_tenant(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(tenant_id=tenant_id)), # Use the API token to switch tenants token=self._api_token, ) # type: ignore self._access_token = payload.data.switch_tenant.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.switch_tenant.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.switch_tenant.refresh_token # type: ignore self._active_tenant_id = tenant_id # save the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = self._active_tenant_id self._save_local_settings(settings) return True def logout_from_tenant(self) -> None: self._access_token = None self._refresh_token = None self._active_tenant_id = None # remove the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = None self._save_local_settings(settings) def _refresh_access_token(self) -> bool: """ Refresh the client's JWT access token. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - bool: True if the refresh succeeds """ payload = self.graphql( { "mutation($input: refresh_token_input!)": { "refresh_token(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(access_token=self._access_token)), # pass the refresh token as the auth header token=self._refresh_token, ) # type: ignore self._access_token = payload.data.refresh_token.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.refresh_token.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.refresh_token.refresh_token # type: ignore return True # ------------------------------------------------------------------------- # Actions # ------------------------------------------------------------------------- def register( self, flow: "Flow", project_name: str = None, build: bool = True, set_schedule_active: bool = True, version_group_id: str = None, compressed: bool = True, no_url: bool = False, ) -> str: """ Push a new flow to Prefect Cloud Args: - flow (Flow): a flow to register - project_name (str, optional): the project that should contain this flow. - build (bool, optional): if `True`, the flow's environment is built prior to serialization; defaults to `True` - set_schedule_active (bool, optional): if `False`, will set the schedule to inactive in the database to prevent auto-scheduling runs (if the Flow has a schedule). Defaults to `True`. This can be changed later. - version_group_id (str, optional): the UUID version group ID to use for versioning this Flow in Cloud; if not provided, the version group ID associated with this Flow's project and name will be used. - compressed (bool, optional): if `True`, the serialized flow will be; defaults to `True` compressed - no_url (bool, optional): if `True`, the stdout from this function will not contain the URL link to the newly-registered flow in the Cloud UI Returns: - str: the ID of the newly-registered flow Raises: - ClientError: if the register failed """ required_parameters = {p for p in flow.parameters() if p.required} if flow.schedule is not None and required_parameters: required_names = {p.name for p in required_parameters} if not all( [ required_names <= set(c.parameter_defaults.keys()) for c in flow.schedule.clocks ] ): raise ClientError( "Flows with required parameters can not be scheduled automatically." ) if any(e.key for e in flow.edges) and flow.result is None: warnings.warn( "No result handler was specified on your Flow. Cloud features such as " "input caching and resuming task runs from failure may not work properly.", stacklevel=2, ) if compressed: create_mutation = { "mutation($input: create_flow_from_compressed_string_input!)": { "create_flow_from_compressed_string(input: $input)": {"id"} } } else: create_mutation = { "mutation($input: create_flow_input!)": { "create_flow(input: $input)": {"id"} } } project = None if project_name is None: raise TypeError( "'project_name' is a required field when registering a flow." ) query_project = { "query": { with_args("project", {"where": {"name": {"_eq": project_name}}}): { "id": True } } } project = self.graphql(query_project).data.project # type: ignore if not project: raise ValueError( "Project {} not found. Run `prefect create project '{}'` to create it.".format( project_name, project_name ) ) serialized_flow = flow.serialize(build=build) # type: Any # Set Docker storage image in environment metadata if provided if isinstance(flow.storage, prefect.environments.storage.Docker): flow.environment.metadata["image"] = flow.storage.name serialized_flow = flow.serialize(build=False) # If no image ever set, default metadata to all_extras image on current version if not flow.environment.metadata.get("image"): version = prefect.__version__.split("+")[0] flow.environment.metadata[ "image" ] = f"prefecthq/prefect:all_extras-{version}" serialized_flow = flow.serialize(build=False) # verify that the serialized flow can be deserialized try: prefect.serialization.flow.FlowSchema().load(serialized_flow) except Exception as exc: raise ValueError( "Flow could not be deserialized successfully. Error was: {}".format( repr(exc) ) ) from exc if compressed: serialized_flow = compress(serialized_flow) res = self.graphql( create_mutation, variables=dict( input=dict( project_id=(project[0].id if project else None), serialized_flow=serialized_flow, set_schedule_active=set_schedule_active, version_group_id=version_group_id, ) ), retry_on_api_error=False, ) # type: Any flow_id = ( res.data.create_flow_from_compressed_string.id if compressed else res.data.create_flow.id ) if not no_url: # Generate direct link to Cloud flow flow_url = self.get_cloud_url("flow", flow_id) prefix = "└── " print("Flow URL: {}".format(flow_url)) # Extra information to improve visibility msg = ( f" {prefix}ID: {flow_id}\n" f" {prefix}Project: {project_name}\n" f" {prefix}Labels: {list(flow.environment.labels)}" ) print(msg) return flow_id def get_cloud_url(self, subdirectory: str, id: str, as_user: bool = True) -> str: """ Convenience method for creating Prefect Cloud URLs for a given subdirectory. Args: - subdirectory (str): the subdirectory to use (e.g., `"flow-run"`) - id (str): the ID of the page - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the URL corresponding to the appropriate base URL, tenant slug, subdirectory and ID Example: ```python from prefect import Client client = Client() client.get_cloud_url("flow-run", "424242-ca-94611-111-55") # returns "https://cloud.prefect.io/my-tenant-slug/flow-run/424242-ca-94611-111-55" ``` """ # Generate direct link to UI if prefect.config.backend == "cloud": tenant_slug = self.get_default_tenant_slug(as_user=as_user) else: tenant_slug = "" base_url = ( re.sub("api-", "", prefect.config.cloud.api) if re.search("api-", prefect.config.cloud.api) else re.sub("api", "cloud", prefect.config.cloud.api) ) full_url = prefect.config.cloud.api if tenant_slug: full_url = "/".join([base_url.rstrip("/"), tenant_slug, subdirectory, id]) elif prefect.config.backend == "server": full_url = "/".join([prefect.config.server.ui.endpoint, subdirectory, id]) return full_url def get_default_tenant_slug(self, as_user: bool = True) -> str: """ Get the default tenant slug for the currently authenticated user Args: - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the slug of the current default tenant for this user """ if as_user: query = { "query": {"user": {"default_membership": {"tenant": "slug"}}} } # type: dict else: query = {"query": {"tenant": {"slug"}}} res = self.graphql(query) if as_user: user = res.get("data").user[0] slug = user.default_membership.tenant.slug else: slug = res.get("data").tenant[0].slug return slug def create_project(self, project_name: str, project_description: str = None) -> str: """ Create a new Project Args: - project_name (str): the project that should contain this flow - project_description (str, optional): the project description Returns: - str: the ID of the newly-created project Raises: - ClientError: if the project creation failed """ project_mutation = { "mutation($input: create_project_input!)": { "create_project(input: $input)": {"id"} } } res = self.graphql( project_mutation, variables=dict( input=dict( name=project_name, description=project_description, tenant_id=self._active_tenant_id, ) ), ) # type: Any return res.data.create_project.id def create_flow_run( self, flow_id: str = None, context: dict = None, parameters: dict = None, scheduled_start_time: datetime.datetime = None, idempotency_key: str = None, run_name: str = None, version_group_id: str = None, ) -> str: """ Create a new flow run for the given flow id. If `start_time` is not provided, the flow run will be scheduled to start immediately. If both `flow_id` and `version_group_id` are provided, only the `flow_id` will be used. Args: - flow_id (str, optional): the id of the Flow you wish to schedule - context (dict, optional): the run context - parameters (dict, optional): a dictionary of parameter values to pass to the flow run - scheduled_start_time (datetime, optional): the time to schedule the execution for; if not provided, defaults to now - idempotency_key (str, optional): an idempotency key; if provided, this run will be cached for 24 hours. Any subsequent attempts to create a run with the same idempotency key will return the ID of the originally created run (no new run will be created after the first). An error will be raised if parameters or context are provided and don't match the original. Each subsequent request will reset the TTL for 24 hours. - run_name (str, optional): The name assigned to this flow run - version_group_id (str, optional): if provided, the unique unarchived flow within this version group will be scheduled to run. This input can be used as a stable API for running flows which are regularly updated. Returns: - str: the ID of the newly-created flow run Raises: - ClientError: if the GraphQL query is bad for any reason """ create_mutation = { "mutation($input: create_flow_run_input!)": { "create_flow_run(input: $input)": {"id": True} } } if not flow_id and not version_group_id: raise ValueError("One of flow_id or version_group_id must be provided") if flow_id: inputs = dict(flow_id=flow_id) else: inputs = dict(version_group_id=version_group_id) # type: ignore if parameters is not None: inputs.update(parameters=parameters) # type: ignore if context is not None: inputs.update(context=context) # type: ignore if idempotency_key is not None: inputs.update(idempotency_key=idempotency_key) # type: ignore if scheduled_start_time is not None: inputs.update( scheduled_start_time=scheduled_start_time.isoformat() ) # type: ignore if run_name is not None: inputs.update(flow_run_name=run_name) # type: ignore res = self.graphql(create_mutation, variables=dict(input=inputs)) return res.data.create_flow_run.id # type: ignore def get_flow_run_info(self, flow_run_id: str) -> FlowRunInfoResult: """ Retrieves version and current state information for the given flow run. Args: - flow_run_id (str): the id of the flow run to get information for Returns: - GraphQLResult: an object representing information about the flow run Raises: - ClientError: if the GraphQL mutation is bad for any reason """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "id": True, "name": True, "flow_id": True, "parameters": True, "context": True, "version": True, "scheduled_start_time": True, "serialized_state": True, # load all task runs except dynamic task runs with_args("task_runs", {"where": {"map_index": {"_eq": -1}}}): { "id": True, "task": {"id": True, "slug": True}, "version": True, "serialized_state": True, }, } } } result = self.graphql(query).data.flow_run_by_pk # type: ignore if result is None: raise ClientError('Flow run ID not found: "{}"'.format(flow_run_id)) # convert scheduled_start_time from string to datetime result.scheduled_start_time = pendulum.parse(result.scheduled_start_time) # create "state" attribute from serialized_state result.state = prefect.engine.state.State.deserialize( result.pop("serialized_state") ) # reformat task_runs task_runs = [] for tr in result.task_runs: tr.state = prefect.engine.state.State.deserialize( tr.pop("serialized_state") ) task_info = tr.pop("task") tr.task_id = task_info["id"] tr.task_slug = task_info["slug"] task_runs.append(TaskRunInfoResult(**tr)) result.task_runs = task_runs result.context = ( result.context.to_dict() if result.context is not None else None ) result.parameters = ( result.parameters.to_dict() if result.parameters is not None else None ) return FlowRunInfoResult(**result) def update_flow_run_heartbeat(self, flow_run_id: str) -> None: """ Convenience method for heartbeating a flow run. Does NOT raise an error if the update fails. Args: - flow_run_id (str): the flow run ID to heartbeat """ mutation = { "mutation": { with_args( "update_flow_run_heartbeat", {"input": {"flow_run_id": flow_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def update_task_run_heartbeat(self, task_run_id: str) -> None: """ Convenience method for heartbeating a task run. Does NOT raise an error if the update fails. Args: - task_run_id (str): the task run ID to heartbeat """ mutation = { "mutation": { with_args( "update_task_run_heartbeat", {"input": {"task_run_id": task_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def get_flow_run_state(self, flow_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a flow run. Args: - flow_run_id (str): the id for this flow run Returns: - State: a Prefect State object """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "serialized_state": True, } } } flow_run = self.graphql(query).data.flow_run_by_pk return prefect.engine.state.State.deserialize(flow_run.serialized_state) def set_flow_run_state( self, flow_run_id: str, state: "prefect.engine.state.State", version: int = None, ) -> "prefect.engine.state.State": """ Sets new state for a flow run in the database. Args: - flow_run_id (str): the id of the flow run to set state for - state (State): the new state for this flow run - version (int, optional): the current version of the flow run state. This is optional but it can be supplied to enforce version-locking. Returns: - State: the state the current flow run should be considered in Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation($input: set_flow_run_states_input!)": { "set_flow_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, flow_run_id=flow_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_flow_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def get_latest_cached_states( self, task_id: str, cache_key: Optional[str], created_after: datetime.datetime ) -> List["prefect.engine.state.State"]: """ Pulls all Cached states for the given task that were created after the provided date. Args: - task_id (str): the task id for this task run - cache_key (Optional[str]): the cache key for this Task's cache; if `None`, the task id alone will be used - created_after (datetime.datetime): the earliest date the state should have been created at Returns: - List[State]: a list of Cached states created after the given date """ args = { "where": { "state": {"_eq": "Cached"}, "state_timestamp": {"_gte": created_after.isoformat()}, }, "order_by": {"state_timestamp": EnumValue("desc")}, "limit": 100, } # type: Dict[str, Any] # if a cache key was provided, match it against all tasks if cache_key is not None: args["where"].update({"cache_key": {"_eq": cache_key}}) # otherwise match against only this task, across all cache keys else: args["where"].update({"task_id": {"_eq": task_id}}) query = {"query": {with_args("task_run", args): "serialized_state"}} result = self.graphql(query) # type: Any deserializer = prefect.engine.state.State.deserialize valid_states = [ deserializer(res.serialized_state) for res in result.data.task_run ] return valid_states def get_task_run_info( self, flow_run_id: str, task_id: str, map_index: Optional[int] = None ) -> TaskRunInfoResult: """ Retrieves version and current state information for the given task run. Args: - flow_run_id (str): the id of the flow run that this task run lives in - task_id (str): the task id for this task run - map_index (int, optional): the mapping index for this task run; if `None`, it is assumed this task is _not_ mapped Returns: - NamedTuple: a tuple containing `id, task_id, version, state` Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation": { with_args( "get_or_create_task_run", { "input": { "flow_run_id": flow_run_id, "task_id": task_id, "map_index": -1 if map_index is None else map_index, } }, ): { "id": True, } } } result = self.graphql(mutation) # type: Any if result is None: raise ClientError("Failed to create task run.") task_run_id = result.data.get_or_create_task_run.id query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "version": True, "serialized_state": True, "task": {"slug": True}, } } } task_run = self.graphql(query).data.task_run_by_pk # type: ignore if task_run is None: raise ClientError('Task run ID not found: "{}"'.format(task_run_id)) state = prefect.engine.state.State.deserialize(task_run.serialized_state) return TaskRunInfoResult( id=task_run_id, task_id=task_id, task_slug=task_run.task.slug, version=task_run.version, state=state, ) def set_task_run_name(self, task_run_id: str, name: str) -> bool: """ Set the name of a task run Args: - task_run_id (str): the id of a task run - name (str): a name for this task run Returns: - bool: whether or not the task run name was updated """ mutation = { "mutation($input: set_task_run_name_input!)": { "set_task_run_name(input: $input)": { "success": True, } } } result = self.graphql( mutation, variables=dict(input=dict(task_run_id=task_run_id, name=name)) ) return result.data.set_task_run_name.success def get_task_run_state(self, task_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a task run. Args: - task_run_id (str): the id for this task run Returns: - State: a Prefect State object """ query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "serialized_state": True, } } } task_run = self.graphql(query).data.task_run_by_pk return prefect.engine.state.State.deserialize(task_run.serialized_state) def set_task_run_state( self, task_run_id: str, state: "prefect.engine.state.State", version: int = None, cache_for: datetime.timedelta = None, ) -> "prefect.engine.state.State": """ Sets new state for a task run. Args: - task_run_id (str): the id of the task run to set state for - state (State): the new state for this task run - version (int, optional): the current version of the task run state. This is optional but it can be supplied to enforce version-locking. - cache_for (timedelta, optional): how long to store the result of this task for, using the serializer set in config; if not provided, no caching occurs Raises: - ClientError: if the GraphQL mutation is bad for any reason Returns: - State: the state the current task run should be considered in """ mutation = { "mutation($input: set_task_run_states_input!)": { "set_task_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, task_run_id=task_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_task_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def set_secret(self, name: str, value: Any) -> None: """ Set a secret with the given name and value. Args: - name (str): the name of the secret; used for retrieving the secret during task runs - value (Any): the value of the secret Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the secret-setting was unsuccessful """ mutation = { "mutation($input: set_secret_input!)": { "set_secret(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(name=name, value=value)) ) # type: Any if not result.data.set_secret.success: raise ValueError("Setting secret failed.") def get_task_tag_limit(self, tag: str) -> Optional[int]: """ Retrieve the current task tag concurrency limit for a given tag. Args: - tag (str): the tag to update Raises: - ClientError: if the GraphQL query fails """ query = { "query": { with_args("task_tag_limit", {"where": {"tag": {"_eq": tag}}}): { "limit": True } } } result = self.graphql(query) # type: Any if result.data.task_tag_limit: return result.data.task_tag_limit[0].limit else: return None def update_task_tag_limit(self, tag: str, limit: int) -> None: """ Update the task tag concurrency limit for a given tag; requires tenant admin permissions. Args: - tag (str): the tag to update - limit (int): the concurrency limit to enforce on the tag; should be a value >= 0 Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag limit-setting was unsuccessful, or if a bad limit was provided """ if limit < 0: raise ValueError("Concurrency limits must be >= 0") mutation = { "mutation($input: update_task_tag_limit_input!)": { "update_task_tag_limit(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict(input=dict(tag=tag, limit=limit)) ) # type: Any if not result.data.update_task_tag_limit.id: raise ValueError("Updating the task tag concurrency limit failed.") def delete_task_tag_limit(self, limit_id: str) -> None: """ Deletes a given task tag concurrency limit; requires tenant admin permissions. Args: - limit_id (str): the ID of the tag to delete Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag deletion was unsuccessful, or if a bad tag ID was provided """ mutation = { "mutation($input: delete_task_tag_limit_input!)": { "delete_task_tag_limit(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(limit_id=limit_id)) ) # type: Any if not result.data.delete_task_tag_limit.success: raise ValueError("Deleting the task tag concurrency limit failed.") def write_run_logs(self, logs: List[Dict]) -> None: """ Uploads a collection of logs to Cloud. Args: - logs (List[Dict]): a list of log entries to add Raises: - ValueError: if uploading the logs fail """ mutation = { "mutation($input: write_run_logs_input!)": { "write_run_logs(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(logs=logs)) ) # type: Any if not result.data.write_run_logs.success: raise ValueError("Writing logs failed.") def register_agent( self, agent_type: str, name: str = None, labels: List[str] = None, agent_config_id: str = None, ) -> str: """ Register an agent with a backend API Args: - agent_type (str): The type of agent being registered - name: (str, optional): The name of the agent being registered - labels (List[str], optional): A list of any present labels on the agent being registered - agent_config_id (str, optional): The ID of an agent configuration to register with Returns: - The agent ID as a string """ mutation = { "mutation($input: register_agent_input!)": { "register_agent(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict( input=dict( type=agent_type, name=name, labels=labels or [], tenant_id=self._active_tenant_id, agent_config_id=agent_config_id, ) ), ) if not result.data.register_agent.id: raise ValueError("Error registering agent") return result.data.register_agent.id def get_agent_config(self, agent_config_id: str) -> dict: """ Get agent config settings Args: - agent_config_id (str): The ID of an agent configuration to retrieve Returns: - dict: the agent configuration's `settings` """ query = { "query": { with_args( "agent_config", {"where": {"id": {"_eq": agent_config_id}}} ): {"settings": True} } } result = self.graphql(query) # type: Any return result.data.agent_config[0].settings
post
Convenience function for calling the Prefect API with token auth and POST request Args: - path (str): the path of the API url. For example, to POST http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the POST request to; defaults to `self.api_server` - headers(dict): headers to pass with the request - params (dict): POST parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made
import datetime import json import os import re import time import uuid import warnings from pathlib import Path from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Union from urllib.parse import urljoin # if simplejson is installed, `requests` defaults to using it instead of json # this allows the client to gracefully handle either json or simplejson try: from simplejson.errors import JSONDecodeError except ImportError: from json.decoder import JSONDecodeError import pendulum import toml from slugify import slugify import prefect from prefect.utilities.exceptions import ( AuthorizationError, ClientError, VersionLockError, ) from prefect.utilities.graphql import ( EnumValue, GraphQLResult, compress, parse_graphql, with_args, ) from prefect.utilities.logging import create_diagnostic_logger if TYPE_CHECKING: from prefect.core import Flow import requests JSONLike = Union[bool, dict, list, str, int, float, None] # type definitions for GraphQL results TaskRunInfoResult = NamedTuple( "TaskRunInfoResult", [ ("id", str), ("task_id", str), ("task_slug", str), ("version", int), ("state", "prefect.engine.state.State"), ], ) FlowRunInfoResult = NamedTuple( "FlowRunInfoResult", [ ("id", str), ("name", str), ("flow_id", str), ("parameters", Dict[str, Any]), ("context", Dict[str, Any]), ("version", int), ("scheduled_start_time", datetime.datetime), ("state", "prefect.engine.state.State"), ("task_runs", List[TaskRunInfoResult]), ], ) class Client: """ Client for communication with Prefect Cloud If the arguments aren't specified the client initialization first checks the prefect configuration and if the server is not set there it checks the current context. The token will only be present in the current context. Args: - api_server (str, optional): the URL to send all GraphQL requests to; if not provided, will be pulled from `cloud.graphql` config var - api_token (str, optional): a Prefect Cloud API token, taken from `config.cloud.auth_token` if not provided. If this token is USER-scoped, it may be used to log in to any tenant that the user is a member of. In that case, ephemeral JWTs will be loaded as necessary. Otherwise, the API token itself will be used as authorization. """ def __init__(self, api_server: str = None, api_token: str = None): self._access_token = None self._refresh_token = None self._access_token_expires_at = pendulum.now() self._active_tenant_id = None self._attached_headers = {} # type: Dict[str, str] self.logger = create_diagnostic_logger("Diagnostics") # store api server self.api_server = api_server or prefect.context.config.cloud.get("graphql") # store api token self._api_token = api_token or prefect.context.config.cloud.get( "auth_token", None ) if prefect.config.backend == "cloud": if not self._api_token: # if no api token was passed, attempt to load state from local storage settings = self._load_local_settings() self._api_token = settings.get("api_token") if self._api_token: self._active_tenant_id = settings.get("active_tenant_id") if self._active_tenant_id: try: self.login_to_tenant(tenant_id=self._active_tenant_id) except AuthorizationError: # if an authorization error is raised, then the token is invalid and should # be cleared self.logout_from_tenant() else: # TODO: Separate put this functionality and clean up initial tenant access handling if not self._active_tenant_id: tenant_info = self.graphql({"query": {"tenant": {"id"}}}) if tenant_info.data.tenant: self._active_tenant_id = tenant_info.data.tenant[0].id def create_tenant(self, name: str, slug: str = None) -> str: """ Creates a new tenant. Note this route only works when run against Prefect Server. Args: - name (str): the name of the tenant to create - slug (str, optional): the slug of the tenant to create; defaults to name Returns: - str: the ID of the newly created tenant, or the ID of the currently active tenant Raises: - ValueError: if run against Prefect Cloud """ if prefect.config.backend != "server": msg = "To create a tenant with Prefect Cloud, please signup at https://cloud.prefect.io/" raise ValueError(msg) if slug is None: slug = slugify(name) tenant_info = self.graphql( { "mutation($input: create_tenant_input!)": { "create_tenant(input: $input)": {"id"} } }, variables=dict(input=dict(name=name, slug=slug)), ) return tenant_info.data.create_tenant.id # ------------------------------------------------------------------------- # Utilities def get( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and GET request Args: - path (str): the path of the API url. For example, to GET http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the GET request to; defaults to `self.api_server` - headers (dict, optional): Headers to pass with the request - params (dict): GET parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="GET", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} # MASKED: post function (lines 204-242) def graphql( self, query: Any, raise_on_error: bool = True, headers: Dict[str, str] = None, variables: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> GraphQLResult: """ Convenience function for running queries against the Prefect GraphQL API Args: - query (Any): A representation of a graphql query to be executed. It will be parsed by prefect.utilities.graphql.parse_graphql(). - raise_on_error (bool): if True, a `ClientError` will be raised if the GraphQL returns any `errors`. - headers (dict): any additional headers that should be passed as part of the request - variables (dict): Variables to be filled into a query with the key being equivalent to the variables that are accepted by the query - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Data returned from the GraphQL query Raises: - ClientError if there are errors raised by the GraphQL mutation """ result = self.post( path="", server=self.api_server, headers=headers, params=dict(query=parse_graphql(query), variables=json.dumps(variables)), token=token, retry_on_api_error=retry_on_api_error, ) if raise_on_error and "errors" in result: if "UNAUTHENTICATED" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif "Malformed Authorization header" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif ( result["errors"][0].get("extensions", {}).get("code") == "VERSION_LOCKING_ERROR" ): raise VersionLockError(result["errors"]) raise ClientError(result["errors"]) else: return GraphQLResult(result) # type: ignore def _send_request( self, session: "requests.Session", method: str, url: str, params: Dict[str, JSONLike] = None, headers: dict = None, ) -> "requests.models.Response": if prefect.context.config.cloud.get("diagnostics") is True: self.logger.debug(f"Preparing request to {url}") clean_headers = { head: re.sub("Bearer .*", "Bearer XXXX", val) for head, val in headers.items() # type: ignore } self.logger.debug(f"Headers: {clean_headers}") self.logger.debug(f"Request: {params}") start_time = time.time() if method == "GET": response = session.get(url, headers=headers, params=params, timeout=30) elif method == "POST": response = session.post(url, headers=headers, json=params, timeout=30) elif method == "DELETE": response = session.delete(url, headers=headers, timeout=30) else: raise ValueError("Invalid method: {}".format(method)) if prefect.context.config.cloud.get("diagnostics") is True: end_time = time.time() self.logger.debug(f"Response: {response.json()}") self.logger.debug( f"Request duration: {round(end_time - start_time, 4)} seconds" ) # Check if request returned a successful status response.raise_for_status() return response def _request( self, method: str, path: str, params: Dict[str, JSONLike] = None, server: str = None, headers: dict = None, token: str = None, retry_on_api_error: bool = True, ) -> "requests.models.Response": """ Runs any specified request (GET, POST, DELETE) against the server Args: - method (str): The type of request to be made (GET, POST, DELETE) - path (str): Path of the API URL - params (dict, optional): Parameters used for the request - server (str, optional): The server to make requests against, base API server is used if not specified - headers (dict, optional): Headers to pass with the request - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - requests.models.Response: The response returned from the request Raises: - ClientError: if the client token is not in the context (due to not being logged in) - ValueError: if a method is specified outside of the accepted GET, POST, DELETE - requests.HTTPError: if a status code is returned that is not `200` or `401` """ if server is None: server = self.api_server assert isinstance(server, str) # mypy assert if token is None: token = self.get_auth_token() # 'import requests' is expensive time-wise, we should do this just-in-time to keep # the 'import prefect' time low import requests url = urljoin(server, path.lstrip("/")).rstrip("/") params = params or {} headers = headers or {} if token: headers["Authorization"] = "Bearer {}".format(token) headers["X-PREFECT-CORE-VERSION"] = str(prefect.__version__) if self._attached_headers: headers.update(self._attached_headers) session = requests.Session() retry_total = 6 if prefect.config.backend == "cloud" else 1 retries = requests.packages.urllib3.util.retry.Retry( total=retry_total, backoff_factor=1, status_forcelist=[500, 502, 503, 504], method_whitelist=["DELETE", "GET", "POST"], ) session.mount("https://", requests.adapters.HTTPAdapter(max_retries=retries)) response = self._send_request( session=session, method=method, url=url, params=params, headers=headers ) # parse the response try: json_resp = response.json() except JSONDecodeError as exc: if prefect.config.backend == "cloud" and "Authorization" not in headers: raise ClientError( "Malformed response received from Cloud - please ensure that you " "have an API token properly configured." ) from exc else: raise ClientError("Malformed response received from API.") from exc # check if there was an API_ERROR code in the response if "API_ERROR" in str(json_resp.get("errors")) and retry_on_api_error: success, retry_count = False, 0 # retry up to six times while success is False and retry_count < 6: response = self._send_request( session=session, method=method, url=url, params=params, headers=headers, ) if "API_ERROR" in str(response.json().get("errors")): retry_count += 1 time.sleep(0.25 * (2 ** (retry_count - 1))) else: success = True return response def attach_headers(self, headers: dict) -> None: """ Set headers to be attached to this Client Args: - headers (dict): A dictionary of headers to attach to this client. These headers get added on to the existing dictionary of headers. """ self._attached_headers.update(headers) # ------------------------------------------------------------------------- # Auth # ------------------------------------------------------------------------- @property def _local_settings_path(self) -> Path: """ Returns the local settings directory corresponding to the current API servers """ path = "{home}/client/{server}".format( home=prefect.context.config.home_dir, server=slugify(self.api_server, regex_pattern=r"[^-\.a-z0-9]+"), ) return Path(os.path.expanduser(path)) / "settings.toml" def _save_local_settings(self, settings: dict) -> None: """ Writes settings to local storage """ self._local_settings_path.parent.mkdir(exist_ok=True, parents=True) with self._local_settings_path.open("w+") as f: toml.dump(settings, f) def _load_local_settings(self) -> dict: """ Loads settings from local storage """ if self._local_settings_path.exists(): with self._local_settings_path.open("r") as f: return toml.load(f) # type: ignore return {} def save_api_token(self) -> None: """ Saves the API token in local storage. """ settings = self._load_local_settings() settings["api_token"] = self._api_token self._save_local_settings(settings) def get_auth_token(self) -> str: """ Returns an auth token: - if no explicit access token is stored, returns the api token - if there is an access token: - if there's a refresh token and the access token expires in the next 30 seconds, then we refresh the access token and store the result - return the access token Returns: - str: the access token """ if not self._access_token: return self._api_token expiration = self._access_token_expires_at or pendulum.now() if self._refresh_token and pendulum.now().add(seconds=30) > expiration: self._refresh_access_token() return self._access_token def get_available_tenants(self) -> List[Dict]: """ Returns a list of available tenants. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - List[Dict]: a list of dictionaries containing the id, slug, and name of available tenants """ result = self.graphql( {"query": {"tenant(order_by: {slug: asc})": {"id", "slug", "name"}}}, # use the API token to see all available tenants token=self._api_token, ) # type: ignore return result.data.tenant # type: ignore def login_to_tenant(self, tenant_slug: str = None, tenant_id: str = None) -> bool: """ Log in to a specific tenant NOTE: this should only be called by users who have provided a USER-scoped API token. Args: - tenant_slug (str): the tenant's slug - tenant_id (str): the tenant's id Returns: - bool: True if the login was successful Raises: - ValueError: if at least one of `tenant_slug` or `tenant_id` isn't provided - ValueError: if the `tenant_id` is not a valid UUID - ValueError: if no matching tenants are found """ if tenant_slug is None and tenant_id is None: raise ValueError( "At least one of `tenant_slug` or `tenant_id` must be provided." ) elif tenant_id: try: uuid.UUID(tenant_id) except ValueError as exc: raise ValueError("The `tenant_id` must be a valid UUID.") from exc tenant = self.graphql( { "query($slug: String, $id: uuid)": { "tenant(where: {slug: { _eq: $slug }, id: { _eq: $id } })": {"id"} } }, variables=dict(slug=tenant_slug, id=tenant_id), # use the API token to query the tenant token=self._api_token, ) # type: ignore if not tenant.data.tenant: # type: ignore raise ValueError("No matching tenants found.") tenant_id = tenant.data.tenant[0].id # type: ignore if prefect.config.backend == "cloud": payload = self.graphql( { "mutation($input: switch_tenant_input!)": { "switch_tenant(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(tenant_id=tenant_id)), # Use the API token to switch tenants token=self._api_token, ) # type: ignore self._access_token = payload.data.switch_tenant.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.switch_tenant.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.switch_tenant.refresh_token # type: ignore self._active_tenant_id = tenant_id # save the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = self._active_tenant_id self._save_local_settings(settings) return True def logout_from_tenant(self) -> None: self._access_token = None self._refresh_token = None self._active_tenant_id = None # remove the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = None self._save_local_settings(settings) def _refresh_access_token(self) -> bool: """ Refresh the client's JWT access token. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - bool: True if the refresh succeeds """ payload = self.graphql( { "mutation($input: refresh_token_input!)": { "refresh_token(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(access_token=self._access_token)), # pass the refresh token as the auth header token=self._refresh_token, ) # type: ignore self._access_token = payload.data.refresh_token.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.refresh_token.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.refresh_token.refresh_token # type: ignore return True # ------------------------------------------------------------------------- # Actions # ------------------------------------------------------------------------- def register( self, flow: "Flow", project_name: str = None, build: bool = True, set_schedule_active: bool = True, version_group_id: str = None, compressed: bool = True, no_url: bool = False, ) -> str: """ Push a new flow to Prefect Cloud Args: - flow (Flow): a flow to register - project_name (str, optional): the project that should contain this flow. - build (bool, optional): if `True`, the flow's environment is built prior to serialization; defaults to `True` - set_schedule_active (bool, optional): if `False`, will set the schedule to inactive in the database to prevent auto-scheduling runs (if the Flow has a schedule). Defaults to `True`. This can be changed later. - version_group_id (str, optional): the UUID version group ID to use for versioning this Flow in Cloud; if not provided, the version group ID associated with this Flow's project and name will be used. - compressed (bool, optional): if `True`, the serialized flow will be; defaults to `True` compressed - no_url (bool, optional): if `True`, the stdout from this function will not contain the URL link to the newly-registered flow in the Cloud UI Returns: - str: the ID of the newly-registered flow Raises: - ClientError: if the register failed """ required_parameters = {p for p in flow.parameters() if p.required} if flow.schedule is not None and required_parameters: required_names = {p.name for p in required_parameters} if not all( [ required_names <= set(c.parameter_defaults.keys()) for c in flow.schedule.clocks ] ): raise ClientError( "Flows with required parameters can not be scheduled automatically." ) if any(e.key for e in flow.edges) and flow.result is None: warnings.warn( "No result handler was specified on your Flow. Cloud features such as " "input caching and resuming task runs from failure may not work properly.", stacklevel=2, ) if compressed: create_mutation = { "mutation($input: create_flow_from_compressed_string_input!)": { "create_flow_from_compressed_string(input: $input)": {"id"} } } else: create_mutation = { "mutation($input: create_flow_input!)": { "create_flow(input: $input)": {"id"} } } project = None if project_name is None: raise TypeError( "'project_name' is a required field when registering a flow." ) query_project = { "query": { with_args("project", {"where": {"name": {"_eq": project_name}}}): { "id": True } } } project = self.graphql(query_project).data.project # type: ignore if not project: raise ValueError( "Project {} not found. Run `prefect create project '{}'` to create it.".format( project_name, project_name ) ) serialized_flow = flow.serialize(build=build) # type: Any # Set Docker storage image in environment metadata if provided if isinstance(flow.storage, prefect.environments.storage.Docker): flow.environment.metadata["image"] = flow.storage.name serialized_flow = flow.serialize(build=False) # If no image ever set, default metadata to all_extras image on current version if not flow.environment.metadata.get("image"): version = prefect.__version__.split("+")[0] flow.environment.metadata[ "image" ] = f"prefecthq/prefect:all_extras-{version}" serialized_flow = flow.serialize(build=False) # verify that the serialized flow can be deserialized try: prefect.serialization.flow.FlowSchema().load(serialized_flow) except Exception as exc: raise ValueError( "Flow could not be deserialized successfully. Error was: {}".format( repr(exc) ) ) from exc if compressed: serialized_flow = compress(serialized_flow) res = self.graphql( create_mutation, variables=dict( input=dict( project_id=(project[0].id if project else None), serialized_flow=serialized_flow, set_schedule_active=set_schedule_active, version_group_id=version_group_id, ) ), retry_on_api_error=False, ) # type: Any flow_id = ( res.data.create_flow_from_compressed_string.id if compressed else res.data.create_flow.id ) if not no_url: # Generate direct link to Cloud flow flow_url = self.get_cloud_url("flow", flow_id) prefix = "└── " print("Flow URL: {}".format(flow_url)) # Extra information to improve visibility msg = ( f" {prefix}ID: {flow_id}\n" f" {prefix}Project: {project_name}\n" f" {prefix}Labels: {list(flow.environment.labels)}" ) print(msg) return flow_id def get_cloud_url(self, subdirectory: str, id: str, as_user: bool = True) -> str: """ Convenience method for creating Prefect Cloud URLs for a given subdirectory. Args: - subdirectory (str): the subdirectory to use (e.g., `"flow-run"`) - id (str): the ID of the page - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the URL corresponding to the appropriate base URL, tenant slug, subdirectory and ID Example: ```python from prefect import Client client = Client() client.get_cloud_url("flow-run", "424242-ca-94611-111-55") # returns "https://cloud.prefect.io/my-tenant-slug/flow-run/424242-ca-94611-111-55" ``` """ # Generate direct link to UI if prefect.config.backend == "cloud": tenant_slug = self.get_default_tenant_slug(as_user=as_user) else: tenant_slug = "" base_url = ( re.sub("api-", "", prefect.config.cloud.api) if re.search("api-", prefect.config.cloud.api) else re.sub("api", "cloud", prefect.config.cloud.api) ) full_url = prefect.config.cloud.api if tenant_slug: full_url = "/".join([base_url.rstrip("/"), tenant_slug, subdirectory, id]) elif prefect.config.backend == "server": full_url = "/".join([prefect.config.server.ui.endpoint, subdirectory, id]) return full_url def get_default_tenant_slug(self, as_user: bool = True) -> str: """ Get the default tenant slug for the currently authenticated user Args: - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the slug of the current default tenant for this user """ if as_user: query = { "query": {"user": {"default_membership": {"tenant": "slug"}}} } # type: dict else: query = {"query": {"tenant": {"slug"}}} res = self.graphql(query) if as_user: user = res.get("data").user[0] slug = user.default_membership.tenant.slug else: slug = res.get("data").tenant[0].slug return slug def create_project(self, project_name: str, project_description: str = None) -> str: """ Create a new Project Args: - project_name (str): the project that should contain this flow - project_description (str, optional): the project description Returns: - str: the ID of the newly-created project Raises: - ClientError: if the project creation failed """ project_mutation = { "mutation($input: create_project_input!)": { "create_project(input: $input)": {"id"} } } res = self.graphql( project_mutation, variables=dict( input=dict( name=project_name, description=project_description, tenant_id=self._active_tenant_id, ) ), ) # type: Any return res.data.create_project.id def create_flow_run( self, flow_id: str = None, context: dict = None, parameters: dict = None, scheduled_start_time: datetime.datetime = None, idempotency_key: str = None, run_name: str = None, version_group_id: str = None, ) -> str: """ Create a new flow run for the given flow id. If `start_time` is not provided, the flow run will be scheduled to start immediately. If both `flow_id` and `version_group_id` are provided, only the `flow_id` will be used. Args: - flow_id (str, optional): the id of the Flow you wish to schedule - context (dict, optional): the run context - parameters (dict, optional): a dictionary of parameter values to pass to the flow run - scheduled_start_time (datetime, optional): the time to schedule the execution for; if not provided, defaults to now - idempotency_key (str, optional): an idempotency key; if provided, this run will be cached for 24 hours. Any subsequent attempts to create a run with the same idempotency key will return the ID of the originally created run (no new run will be created after the first). An error will be raised if parameters or context are provided and don't match the original. Each subsequent request will reset the TTL for 24 hours. - run_name (str, optional): The name assigned to this flow run - version_group_id (str, optional): if provided, the unique unarchived flow within this version group will be scheduled to run. This input can be used as a stable API for running flows which are regularly updated. Returns: - str: the ID of the newly-created flow run Raises: - ClientError: if the GraphQL query is bad for any reason """ create_mutation = { "mutation($input: create_flow_run_input!)": { "create_flow_run(input: $input)": {"id": True} } } if not flow_id and not version_group_id: raise ValueError("One of flow_id or version_group_id must be provided") if flow_id: inputs = dict(flow_id=flow_id) else: inputs = dict(version_group_id=version_group_id) # type: ignore if parameters is not None: inputs.update(parameters=parameters) # type: ignore if context is not None: inputs.update(context=context) # type: ignore if idempotency_key is not None: inputs.update(idempotency_key=idempotency_key) # type: ignore if scheduled_start_time is not None: inputs.update( scheduled_start_time=scheduled_start_time.isoformat() ) # type: ignore if run_name is not None: inputs.update(flow_run_name=run_name) # type: ignore res = self.graphql(create_mutation, variables=dict(input=inputs)) return res.data.create_flow_run.id # type: ignore def get_flow_run_info(self, flow_run_id: str) -> FlowRunInfoResult: """ Retrieves version and current state information for the given flow run. Args: - flow_run_id (str): the id of the flow run to get information for Returns: - GraphQLResult: an object representing information about the flow run Raises: - ClientError: if the GraphQL mutation is bad for any reason """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "id": True, "name": True, "flow_id": True, "parameters": True, "context": True, "version": True, "scheduled_start_time": True, "serialized_state": True, # load all task runs except dynamic task runs with_args("task_runs", {"where": {"map_index": {"_eq": -1}}}): { "id": True, "task": {"id": True, "slug": True}, "version": True, "serialized_state": True, }, } } } result = self.graphql(query).data.flow_run_by_pk # type: ignore if result is None: raise ClientError('Flow run ID not found: "{}"'.format(flow_run_id)) # convert scheduled_start_time from string to datetime result.scheduled_start_time = pendulum.parse(result.scheduled_start_time) # create "state" attribute from serialized_state result.state = prefect.engine.state.State.deserialize( result.pop("serialized_state") ) # reformat task_runs task_runs = [] for tr in result.task_runs: tr.state = prefect.engine.state.State.deserialize( tr.pop("serialized_state") ) task_info = tr.pop("task") tr.task_id = task_info["id"] tr.task_slug = task_info["slug"] task_runs.append(TaskRunInfoResult(**tr)) result.task_runs = task_runs result.context = ( result.context.to_dict() if result.context is not None else None ) result.parameters = ( result.parameters.to_dict() if result.parameters is not None else None ) return FlowRunInfoResult(**result) def update_flow_run_heartbeat(self, flow_run_id: str) -> None: """ Convenience method for heartbeating a flow run. Does NOT raise an error if the update fails. Args: - flow_run_id (str): the flow run ID to heartbeat """ mutation = { "mutation": { with_args( "update_flow_run_heartbeat", {"input": {"flow_run_id": flow_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def update_task_run_heartbeat(self, task_run_id: str) -> None: """ Convenience method for heartbeating a task run. Does NOT raise an error if the update fails. Args: - task_run_id (str): the task run ID to heartbeat """ mutation = { "mutation": { with_args( "update_task_run_heartbeat", {"input": {"task_run_id": task_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def get_flow_run_state(self, flow_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a flow run. Args: - flow_run_id (str): the id for this flow run Returns: - State: a Prefect State object """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "serialized_state": True, } } } flow_run = self.graphql(query).data.flow_run_by_pk return prefect.engine.state.State.deserialize(flow_run.serialized_state) def set_flow_run_state( self, flow_run_id: str, state: "prefect.engine.state.State", version: int = None, ) -> "prefect.engine.state.State": """ Sets new state for a flow run in the database. Args: - flow_run_id (str): the id of the flow run to set state for - state (State): the new state for this flow run - version (int, optional): the current version of the flow run state. This is optional but it can be supplied to enforce version-locking. Returns: - State: the state the current flow run should be considered in Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation($input: set_flow_run_states_input!)": { "set_flow_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, flow_run_id=flow_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_flow_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def get_latest_cached_states( self, task_id: str, cache_key: Optional[str], created_after: datetime.datetime ) -> List["prefect.engine.state.State"]: """ Pulls all Cached states for the given task that were created after the provided date. Args: - task_id (str): the task id for this task run - cache_key (Optional[str]): the cache key for this Task's cache; if `None`, the task id alone will be used - created_after (datetime.datetime): the earliest date the state should have been created at Returns: - List[State]: a list of Cached states created after the given date """ args = { "where": { "state": {"_eq": "Cached"}, "state_timestamp": {"_gte": created_after.isoformat()}, }, "order_by": {"state_timestamp": EnumValue("desc")}, "limit": 100, } # type: Dict[str, Any] # if a cache key was provided, match it against all tasks if cache_key is not None: args["where"].update({"cache_key": {"_eq": cache_key}}) # otherwise match against only this task, across all cache keys else: args["where"].update({"task_id": {"_eq": task_id}}) query = {"query": {with_args("task_run", args): "serialized_state"}} result = self.graphql(query) # type: Any deserializer = prefect.engine.state.State.deserialize valid_states = [ deserializer(res.serialized_state) for res in result.data.task_run ] return valid_states def get_task_run_info( self, flow_run_id: str, task_id: str, map_index: Optional[int] = None ) -> TaskRunInfoResult: """ Retrieves version and current state information for the given task run. Args: - flow_run_id (str): the id of the flow run that this task run lives in - task_id (str): the task id for this task run - map_index (int, optional): the mapping index for this task run; if `None`, it is assumed this task is _not_ mapped Returns: - NamedTuple: a tuple containing `id, task_id, version, state` Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation": { with_args( "get_or_create_task_run", { "input": { "flow_run_id": flow_run_id, "task_id": task_id, "map_index": -1 if map_index is None else map_index, } }, ): { "id": True, } } } result = self.graphql(mutation) # type: Any if result is None: raise ClientError("Failed to create task run.") task_run_id = result.data.get_or_create_task_run.id query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "version": True, "serialized_state": True, "task": {"slug": True}, } } } task_run = self.graphql(query).data.task_run_by_pk # type: ignore if task_run is None: raise ClientError('Task run ID not found: "{}"'.format(task_run_id)) state = prefect.engine.state.State.deserialize(task_run.serialized_state) return TaskRunInfoResult( id=task_run_id, task_id=task_id, task_slug=task_run.task.slug, version=task_run.version, state=state, ) def set_task_run_name(self, task_run_id: str, name: str) -> bool: """ Set the name of a task run Args: - task_run_id (str): the id of a task run - name (str): a name for this task run Returns: - bool: whether or not the task run name was updated """ mutation = { "mutation($input: set_task_run_name_input!)": { "set_task_run_name(input: $input)": { "success": True, } } } result = self.graphql( mutation, variables=dict(input=dict(task_run_id=task_run_id, name=name)) ) return result.data.set_task_run_name.success def get_task_run_state(self, task_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a task run. Args: - task_run_id (str): the id for this task run Returns: - State: a Prefect State object """ query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "serialized_state": True, } } } task_run = self.graphql(query).data.task_run_by_pk return prefect.engine.state.State.deserialize(task_run.serialized_state) def set_task_run_state( self, task_run_id: str, state: "prefect.engine.state.State", version: int = None, cache_for: datetime.timedelta = None, ) -> "prefect.engine.state.State": """ Sets new state for a task run. Args: - task_run_id (str): the id of the task run to set state for - state (State): the new state for this task run - version (int, optional): the current version of the task run state. This is optional but it can be supplied to enforce version-locking. - cache_for (timedelta, optional): how long to store the result of this task for, using the serializer set in config; if not provided, no caching occurs Raises: - ClientError: if the GraphQL mutation is bad for any reason Returns: - State: the state the current task run should be considered in """ mutation = { "mutation($input: set_task_run_states_input!)": { "set_task_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, task_run_id=task_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_task_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def set_secret(self, name: str, value: Any) -> None: """ Set a secret with the given name and value. Args: - name (str): the name of the secret; used for retrieving the secret during task runs - value (Any): the value of the secret Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the secret-setting was unsuccessful """ mutation = { "mutation($input: set_secret_input!)": { "set_secret(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(name=name, value=value)) ) # type: Any if not result.data.set_secret.success: raise ValueError("Setting secret failed.") def get_task_tag_limit(self, tag: str) -> Optional[int]: """ Retrieve the current task tag concurrency limit for a given tag. Args: - tag (str): the tag to update Raises: - ClientError: if the GraphQL query fails """ query = { "query": { with_args("task_tag_limit", {"where": {"tag": {"_eq": tag}}}): { "limit": True } } } result = self.graphql(query) # type: Any if result.data.task_tag_limit: return result.data.task_tag_limit[0].limit else: return None def update_task_tag_limit(self, tag: str, limit: int) -> None: """ Update the task tag concurrency limit for a given tag; requires tenant admin permissions. Args: - tag (str): the tag to update - limit (int): the concurrency limit to enforce on the tag; should be a value >= 0 Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag limit-setting was unsuccessful, or if a bad limit was provided """ if limit < 0: raise ValueError("Concurrency limits must be >= 0") mutation = { "mutation($input: update_task_tag_limit_input!)": { "update_task_tag_limit(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict(input=dict(tag=tag, limit=limit)) ) # type: Any if not result.data.update_task_tag_limit.id: raise ValueError("Updating the task tag concurrency limit failed.") def delete_task_tag_limit(self, limit_id: str) -> None: """ Deletes a given task tag concurrency limit; requires tenant admin permissions. Args: - limit_id (str): the ID of the tag to delete Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag deletion was unsuccessful, or if a bad tag ID was provided """ mutation = { "mutation($input: delete_task_tag_limit_input!)": { "delete_task_tag_limit(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(limit_id=limit_id)) ) # type: Any if not result.data.delete_task_tag_limit.success: raise ValueError("Deleting the task tag concurrency limit failed.") def write_run_logs(self, logs: List[Dict]) -> None: """ Uploads a collection of logs to Cloud. Args: - logs (List[Dict]): a list of log entries to add Raises: - ValueError: if uploading the logs fail """ mutation = { "mutation($input: write_run_logs_input!)": { "write_run_logs(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(logs=logs)) ) # type: Any if not result.data.write_run_logs.success: raise ValueError("Writing logs failed.") def register_agent( self, agent_type: str, name: str = None, labels: List[str] = None, agent_config_id: str = None, ) -> str: """ Register an agent with a backend API Args: - agent_type (str): The type of agent being registered - name: (str, optional): The name of the agent being registered - labels (List[str], optional): A list of any present labels on the agent being registered - agent_config_id (str, optional): The ID of an agent configuration to register with Returns: - The agent ID as a string """ mutation = { "mutation($input: register_agent_input!)": { "register_agent(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict( input=dict( type=agent_type, name=name, labels=labels or [], tenant_id=self._active_tenant_id, agent_config_id=agent_config_id, ) ), ) if not result.data.register_agent.id: raise ValueError("Error registering agent") return result.data.register_agent.id def get_agent_config(self, agent_config_id: str) -> dict: """ Get agent config settings Args: - agent_config_id (str): The ID of an agent configuration to retrieve Returns: - dict: the agent configuration's `settings` """ query = { "query": { with_args( "agent_config", {"where": {"id": {"_eq": agent_config_id}}} ): {"settings": True} } } result = self.graphql(query) # type: Any return result.data.agent_config[0].settings
def post( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and POST request Args: - path (str): the path of the API url. For example, to POST http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the POST request to; defaults to `self.api_server` - headers(dict): headers to pass with the request - params (dict): POST parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="POST", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {}
204
242
import datetime import json import os import re import time import uuid import warnings from pathlib import Path from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Union from urllib.parse import urljoin # if simplejson is installed, `requests` defaults to using it instead of json # this allows the client to gracefully handle either json or simplejson try: from simplejson.errors import JSONDecodeError except ImportError: from json.decoder import JSONDecodeError import pendulum import toml from slugify import slugify import prefect from prefect.utilities.exceptions import ( AuthorizationError, ClientError, VersionLockError, ) from prefect.utilities.graphql import ( EnumValue, GraphQLResult, compress, parse_graphql, with_args, ) from prefect.utilities.logging import create_diagnostic_logger if TYPE_CHECKING: from prefect.core import Flow import requests JSONLike = Union[bool, dict, list, str, int, float, None] # type definitions for GraphQL results TaskRunInfoResult = NamedTuple( "TaskRunInfoResult", [ ("id", str), ("task_id", str), ("task_slug", str), ("version", int), ("state", "prefect.engine.state.State"), ], ) FlowRunInfoResult = NamedTuple( "FlowRunInfoResult", [ ("id", str), ("name", str), ("flow_id", str), ("parameters", Dict[str, Any]), ("context", Dict[str, Any]), ("version", int), ("scheduled_start_time", datetime.datetime), ("state", "prefect.engine.state.State"), ("task_runs", List[TaskRunInfoResult]), ], ) class Client: """ Client for communication with Prefect Cloud If the arguments aren't specified the client initialization first checks the prefect configuration and if the server is not set there it checks the current context. The token will only be present in the current context. Args: - api_server (str, optional): the URL to send all GraphQL requests to; if not provided, will be pulled from `cloud.graphql` config var - api_token (str, optional): a Prefect Cloud API token, taken from `config.cloud.auth_token` if not provided. If this token is USER-scoped, it may be used to log in to any tenant that the user is a member of. In that case, ephemeral JWTs will be loaded as necessary. Otherwise, the API token itself will be used as authorization. """ def __init__(self, api_server: str = None, api_token: str = None): self._access_token = None self._refresh_token = None self._access_token_expires_at = pendulum.now() self._active_tenant_id = None self._attached_headers = {} # type: Dict[str, str] self.logger = create_diagnostic_logger("Diagnostics") # store api server self.api_server = api_server or prefect.context.config.cloud.get("graphql") # store api token self._api_token = api_token or prefect.context.config.cloud.get( "auth_token", None ) if prefect.config.backend == "cloud": if not self._api_token: # if no api token was passed, attempt to load state from local storage settings = self._load_local_settings() self._api_token = settings.get("api_token") if self._api_token: self._active_tenant_id = settings.get("active_tenant_id") if self._active_tenant_id: try: self.login_to_tenant(tenant_id=self._active_tenant_id) except AuthorizationError: # if an authorization error is raised, then the token is invalid and should # be cleared self.logout_from_tenant() else: # TODO: Separate put this functionality and clean up initial tenant access handling if not self._active_tenant_id: tenant_info = self.graphql({"query": {"tenant": {"id"}}}) if tenant_info.data.tenant: self._active_tenant_id = tenant_info.data.tenant[0].id def create_tenant(self, name: str, slug: str = None) -> str: """ Creates a new tenant. Note this route only works when run against Prefect Server. Args: - name (str): the name of the tenant to create - slug (str, optional): the slug of the tenant to create; defaults to name Returns: - str: the ID of the newly created tenant, or the ID of the currently active tenant Raises: - ValueError: if run against Prefect Cloud """ if prefect.config.backend != "server": msg = "To create a tenant with Prefect Cloud, please signup at https://cloud.prefect.io/" raise ValueError(msg) if slug is None: slug = slugify(name) tenant_info = self.graphql( { "mutation($input: create_tenant_input!)": { "create_tenant(input: $input)": {"id"} } }, variables=dict(input=dict(name=name, slug=slug)), ) return tenant_info.data.create_tenant.id # ------------------------------------------------------------------------- # Utilities def get( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and GET request Args: - path (str): the path of the API url. For example, to GET http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the GET request to; defaults to `self.api_server` - headers (dict, optional): Headers to pass with the request - params (dict): GET parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="GET", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def post( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and POST request Args: - path (str): the path of the API url. For example, to POST http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the POST request to; defaults to `self.api_server` - headers(dict): headers to pass with the request - params (dict): POST parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="POST", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def graphql( self, query: Any, raise_on_error: bool = True, headers: Dict[str, str] = None, variables: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> GraphQLResult: """ Convenience function for running queries against the Prefect GraphQL API Args: - query (Any): A representation of a graphql query to be executed. It will be parsed by prefect.utilities.graphql.parse_graphql(). - raise_on_error (bool): if True, a `ClientError` will be raised if the GraphQL returns any `errors`. - headers (dict): any additional headers that should be passed as part of the request - variables (dict): Variables to be filled into a query with the key being equivalent to the variables that are accepted by the query - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Data returned from the GraphQL query Raises: - ClientError if there are errors raised by the GraphQL mutation """ result = self.post( path="", server=self.api_server, headers=headers, params=dict(query=parse_graphql(query), variables=json.dumps(variables)), token=token, retry_on_api_error=retry_on_api_error, ) if raise_on_error and "errors" in result: if "UNAUTHENTICATED" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif "Malformed Authorization header" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif ( result["errors"][0].get("extensions", {}).get("code") == "VERSION_LOCKING_ERROR" ): raise VersionLockError(result["errors"]) raise ClientError(result["errors"]) else: return GraphQLResult(result) # type: ignore def _send_request( self, session: "requests.Session", method: str, url: str, params: Dict[str, JSONLike] = None, headers: dict = None, ) -> "requests.models.Response": if prefect.context.config.cloud.get("diagnostics") is True: self.logger.debug(f"Preparing request to {url}") clean_headers = { head: re.sub("Bearer .*", "Bearer XXXX", val) for head, val in headers.items() # type: ignore } self.logger.debug(f"Headers: {clean_headers}") self.logger.debug(f"Request: {params}") start_time = time.time() if method == "GET": response = session.get(url, headers=headers, params=params, timeout=30) elif method == "POST": response = session.post(url, headers=headers, json=params, timeout=30) elif method == "DELETE": response = session.delete(url, headers=headers, timeout=30) else: raise ValueError("Invalid method: {}".format(method)) if prefect.context.config.cloud.get("diagnostics") is True: end_time = time.time() self.logger.debug(f"Response: {response.json()}") self.logger.debug( f"Request duration: {round(end_time - start_time, 4)} seconds" ) # Check if request returned a successful status response.raise_for_status() return response def _request( self, method: str, path: str, params: Dict[str, JSONLike] = None, server: str = None, headers: dict = None, token: str = None, retry_on_api_error: bool = True, ) -> "requests.models.Response": """ Runs any specified request (GET, POST, DELETE) against the server Args: - method (str): The type of request to be made (GET, POST, DELETE) - path (str): Path of the API URL - params (dict, optional): Parameters used for the request - server (str, optional): The server to make requests against, base API server is used if not specified - headers (dict, optional): Headers to pass with the request - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - requests.models.Response: The response returned from the request Raises: - ClientError: if the client token is not in the context (due to not being logged in) - ValueError: if a method is specified outside of the accepted GET, POST, DELETE - requests.HTTPError: if a status code is returned that is not `200` or `401` """ if server is None: server = self.api_server assert isinstance(server, str) # mypy assert if token is None: token = self.get_auth_token() # 'import requests' is expensive time-wise, we should do this just-in-time to keep # the 'import prefect' time low import requests url = urljoin(server, path.lstrip("/")).rstrip("/") params = params or {} headers = headers or {} if token: headers["Authorization"] = "Bearer {}".format(token) headers["X-PREFECT-CORE-VERSION"] = str(prefect.__version__) if self._attached_headers: headers.update(self._attached_headers) session = requests.Session() retry_total = 6 if prefect.config.backend == "cloud" else 1 retries = requests.packages.urllib3.util.retry.Retry( total=retry_total, backoff_factor=1, status_forcelist=[500, 502, 503, 504], method_whitelist=["DELETE", "GET", "POST"], ) session.mount("https://", requests.adapters.HTTPAdapter(max_retries=retries)) response = self._send_request( session=session, method=method, url=url, params=params, headers=headers ) # parse the response try: json_resp = response.json() except JSONDecodeError as exc: if prefect.config.backend == "cloud" and "Authorization" not in headers: raise ClientError( "Malformed response received from Cloud - please ensure that you " "have an API token properly configured." ) from exc else: raise ClientError("Malformed response received from API.") from exc # check if there was an API_ERROR code in the response if "API_ERROR" in str(json_resp.get("errors")) and retry_on_api_error: success, retry_count = False, 0 # retry up to six times while success is False and retry_count < 6: response = self._send_request( session=session, method=method, url=url, params=params, headers=headers, ) if "API_ERROR" in str(response.json().get("errors")): retry_count += 1 time.sleep(0.25 * (2 ** (retry_count - 1))) else: success = True return response def attach_headers(self, headers: dict) -> None: """ Set headers to be attached to this Client Args: - headers (dict): A dictionary of headers to attach to this client. These headers get added on to the existing dictionary of headers. """ self._attached_headers.update(headers) # ------------------------------------------------------------------------- # Auth # ------------------------------------------------------------------------- @property def _local_settings_path(self) -> Path: """ Returns the local settings directory corresponding to the current API servers """ path = "{home}/client/{server}".format( home=prefect.context.config.home_dir, server=slugify(self.api_server, regex_pattern=r"[^-\.a-z0-9]+"), ) return Path(os.path.expanduser(path)) / "settings.toml" def _save_local_settings(self, settings: dict) -> None: """ Writes settings to local storage """ self._local_settings_path.parent.mkdir(exist_ok=True, parents=True) with self._local_settings_path.open("w+") as f: toml.dump(settings, f) def _load_local_settings(self) -> dict: """ Loads settings from local storage """ if self._local_settings_path.exists(): with self._local_settings_path.open("r") as f: return toml.load(f) # type: ignore return {} def save_api_token(self) -> None: """ Saves the API token in local storage. """ settings = self._load_local_settings() settings["api_token"] = self._api_token self._save_local_settings(settings) def get_auth_token(self) -> str: """ Returns an auth token: - if no explicit access token is stored, returns the api token - if there is an access token: - if there's a refresh token and the access token expires in the next 30 seconds, then we refresh the access token and store the result - return the access token Returns: - str: the access token """ if not self._access_token: return self._api_token expiration = self._access_token_expires_at or pendulum.now() if self._refresh_token and pendulum.now().add(seconds=30) > expiration: self._refresh_access_token() return self._access_token def get_available_tenants(self) -> List[Dict]: """ Returns a list of available tenants. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - List[Dict]: a list of dictionaries containing the id, slug, and name of available tenants """ result = self.graphql( {"query": {"tenant(order_by: {slug: asc})": {"id", "slug", "name"}}}, # use the API token to see all available tenants token=self._api_token, ) # type: ignore return result.data.tenant # type: ignore def login_to_tenant(self, tenant_slug: str = None, tenant_id: str = None) -> bool: """ Log in to a specific tenant NOTE: this should only be called by users who have provided a USER-scoped API token. Args: - tenant_slug (str): the tenant's slug - tenant_id (str): the tenant's id Returns: - bool: True if the login was successful Raises: - ValueError: if at least one of `tenant_slug` or `tenant_id` isn't provided - ValueError: if the `tenant_id` is not a valid UUID - ValueError: if no matching tenants are found """ if tenant_slug is None and tenant_id is None: raise ValueError( "At least one of `tenant_slug` or `tenant_id` must be provided." ) elif tenant_id: try: uuid.UUID(tenant_id) except ValueError as exc: raise ValueError("The `tenant_id` must be a valid UUID.") from exc tenant = self.graphql( { "query($slug: String, $id: uuid)": { "tenant(where: {slug: { _eq: $slug }, id: { _eq: $id } })": {"id"} } }, variables=dict(slug=tenant_slug, id=tenant_id), # use the API token to query the tenant token=self._api_token, ) # type: ignore if not tenant.data.tenant: # type: ignore raise ValueError("No matching tenants found.") tenant_id = tenant.data.tenant[0].id # type: ignore if prefect.config.backend == "cloud": payload = self.graphql( { "mutation($input: switch_tenant_input!)": { "switch_tenant(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(tenant_id=tenant_id)), # Use the API token to switch tenants token=self._api_token, ) # type: ignore self._access_token = payload.data.switch_tenant.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.switch_tenant.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.switch_tenant.refresh_token # type: ignore self._active_tenant_id = tenant_id # save the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = self._active_tenant_id self._save_local_settings(settings) return True def logout_from_tenant(self) -> None: self._access_token = None self._refresh_token = None self._active_tenant_id = None # remove the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = None self._save_local_settings(settings) def _refresh_access_token(self) -> bool: """ Refresh the client's JWT access token. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - bool: True if the refresh succeeds """ payload = self.graphql( { "mutation($input: refresh_token_input!)": { "refresh_token(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(access_token=self._access_token)), # pass the refresh token as the auth header token=self._refresh_token, ) # type: ignore self._access_token = payload.data.refresh_token.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.refresh_token.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.refresh_token.refresh_token # type: ignore return True # ------------------------------------------------------------------------- # Actions # ------------------------------------------------------------------------- def register( self, flow: "Flow", project_name: str = None, build: bool = True, set_schedule_active: bool = True, version_group_id: str = None, compressed: bool = True, no_url: bool = False, ) -> str: """ Push a new flow to Prefect Cloud Args: - flow (Flow): a flow to register - project_name (str, optional): the project that should contain this flow. - build (bool, optional): if `True`, the flow's environment is built prior to serialization; defaults to `True` - set_schedule_active (bool, optional): if `False`, will set the schedule to inactive in the database to prevent auto-scheduling runs (if the Flow has a schedule). Defaults to `True`. This can be changed later. - version_group_id (str, optional): the UUID version group ID to use for versioning this Flow in Cloud; if not provided, the version group ID associated with this Flow's project and name will be used. - compressed (bool, optional): if `True`, the serialized flow will be; defaults to `True` compressed - no_url (bool, optional): if `True`, the stdout from this function will not contain the URL link to the newly-registered flow in the Cloud UI Returns: - str: the ID of the newly-registered flow Raises: - ClientError: if the register failed """ required_parameters = {p for p in flow.parameters() if p.required} if flow.schedule is not None and required_parameters: required_names = {p.name for p in required_parameters} if not all( [ required_names <= set(c.parameter_defaults.keys()) for c in flow.schedule.clocks ] ): raise ClientError( "Flows with required parameters can not be scheduled automatically." ) if any(e.key for e in flow.edges) and flow.result is None: warnings.warn( "No result handler was specified on your Flow. Cloud features such as " "input caching and resuming task runs from failure may not work properly.", stacklevel=2, ) if compressed: create_mutation = { "mutation($input: create_flow_from_compressed_string_input!)": { "create_flow_from_compressed_string(input: $input)": {"id"} } } else: create_mutation = { "mutation($input: create_flow_input!)": { "create_flow(input: $input)": {"id"} } } project = None if project_name is None: raise TypeError( "'project_name' is a required field when registering a flow." ) query_project = { "query": { with_args("project", {"where": {"name": {"_eq": project_name}}}): { "id": True } } } project = self.graphql(query_project).data.project # type: ignore if not project: raise ValueError( "Project {} not found. Run `prefect create project '{}'` to create it.".format( project_name, project_name ) ) serialized_flow = flow.serialize(build=build) # type: Any # Set Docker storage image in environment metadata if provided if isinstance(flow.storage, prefect.environments.storage.Docker): flow.environment.metadata["image"] = flow.storage.name serialized_flow = flow.serialize(build=False) # If no image ever set, default metadata to all_extras image on current version if not flow.environment.metadata.get("image"): version = prefect.__version__.split("+")[0] flow.environment.metadata[ "image" ] = f"prefecthq/prefect:all_extras-{version}" serialized_flow = flow.serialize(build=False) # verify that the serialized flow can be deserialized try: prefect.serialization.flow.FlowSchema().load(serialized_flow) except Exception as exc: raise ValueError( "Flow could not be deserialized successfully. Error was: {}".format( repr(exc) ) ) from exc if compressed: serialized_flow = compress(serialized_flow) res = self.graphql( create_mutation, variables=dict( input=dict( project_id=(project[0].id if project else None), serialized_flow=serialized_flow, set_schedule_active=set_schedule_active, version_group_id=version_group_id, ) ), retry_on_api_error=False, ) # type: Any flow_id = ( res.data.create_flow_from_compressed_string.id if compressed else res.data.create_flow.id ) if not no_url: # Generate direct link to Cloud flow flow_url = self.get_cloud_url("flow", flow_id) prefix = "└── " print("Flow URL: {}".format(flow_url)) # Extra information to improve visibility msg = ( f" {prefix}ID: {flow_id}\n" f" {prefix}Project: {project_name}\n" f" {prefix}Labels: {list(flow.environment.labels)}" ) print(msg) return flow_id def get_cloud_url(self, subdirectory: str, id: str, as_user: bool = True) -> str: """ Convenience method for creating Prefect Cloud URLs for a given subdirectory. Args: - subdirectory (str): the subdirectory to use (e.g., `"flow-run"`) - id (str): the ID of the page - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the URL corresponding to the appropriate base URL, tenant slug, subdirectory and ID Example: ```python from prefect import Client client = Client() client.get_cloud_url("flow-run", "424242-ca-94611-111-55") # returns "https://cloud.prefect.io/my-tenant-slug/flow-run/424242-ca-94611-111-55" ``` """ # Generate direct link to UI if prefect.config.backend == "cloud": tenant_slug = self.get_default_tenant_slug(as_user=as_user) else: tenant_slug = "" base_url = ( re.sub("api-", "", prefect.config.cloud.api) if re.search("api-", prefect.config.cloud.api) else re.sub("api", "cloud", prefect.config.cloud.api) ) full_url = prefect.config.cloud.api if tenant_slug: full_url = "/".join([base_url.rstrip("/"), tenant_slug, subdirectory, id]) elif prefect.config.backend == "server": full_url = "/".join([prefect.config.server.ui.endpoint, subdirectory, id]) return full_url def get_default_tenant_slug(self, as_user: bool = True) -> str: """ Get the default tenant slug for the currently authenticated user Args: - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the slug of the current default tenant for this user """ if as_user: query = { "query": {"user": {"default_membership": {"tenant": "slug"}}} } # type: dict else: query = {"query": {"tenant": {"slug"}}} res = self.graphql(query) if as_user: user = res.get("data").user[0] slug = user.default_membership.tenant.slug else: slug = res.get("data").tenant[0].slug return slug def create_project(self, project_name: str, project_description: str = None) -> str: """ Create a new Project Args: - project_name (str): the project that should contain this flow - project_description (str, optional): the project description Returns: - str: the ID of the newly-created project Raises: - ClientError: if the project creation failed """ project_mutation = { "mutation($input: create_project_input!)": { "create_project(input: $input)": {"id"} } } res = self.graphql( project_mutation, variables=dict( input=dict( name=project_name, description=project_description, tenant_id=self._active_tenant_id, ) ), ) # type: Any return res.data.create_project.id def create_flow_run( self, flow_id: str = None, context: dict = None, parameters: dict = None, scheduled_start_time: datetime.datetime = None, idempotency_key: str = None, run_name: str = None, version_group_id: str = None, ) -> str: """ Create a new flow run for the given flow id. If `start_time` is not provided, the flow run will be scheduled to start immediately. If both `flow_id` and `version_group_id` are provided, only the `flow_id` will be used. Args: - flow_id (str, optional): the id of the Flow you wish to schedule - context (dict, optional): the run context - parameters (dict, optional): a dictionary of parameter values to pass to the flow run - scheduled_start_time (datetime, optional): the time to schedule the execution for; if not provided, defaults to now - idempotency_key (str, optional): an idempotency key; if provided, this run will be cached for 24 hours. Any subsequent attempts to create a run with the same idempotency key will return the ID of the originally created run (no new run will be created after the first). An error will be raised if parameters or context are provided and don't match the original. Each subsequent request will reset the TTL for 24 hours. - run_name (str, optional): The name assigned to this flow run - version_group_id (str, optional): if provided, the unique unarchived flow within this version group will be scheduled to run. This input can be used as a stable API for running flows which are regularly updated. Returns: - str: the ID of the newly-created flow run Raises: - ClientError: if the GraphQL query is bad for any reason """ create_mutation = { "mutation($input: create_flow_run_input!)": { "create_flow_run(input: $input)": {"id": True} } } if not flow_id and not version_group_id: raise ValueError("One of flow_id or version_group_id must be provided") if flow_id: inputs = dict(flow_id=flow_id) else: inputs = dict(version_group_id=version_group_id) # type: ignore if parameters is not None: inputs.update(parameters=parameters) # type: ignore if context is not None: inputs.update(context=context) # type: ignore if idempotency_key is not None: inputs.update(idempotency_key=idempotency_key) # type: ignore if scheduled_start_time is not None: inputs.update( scheduled_start_time=scheduled_start_time.isoformat() ) # type: ignore if run_name is not None: inputs.update(flow_run_name=run_name) # type: ignore res = self.graphql(create_mutation, variables=dict(input=inputs)) return res.data.create_flow_run.id # type: ignore def get_flow_run_info(self, flow_run_id: str) -> FlowRunInfoResult: """ Retrieves version and current state information for the given flow run. Args: - flow_run_id (str): the id of the flow run to get information for Returns: - GraphQLResult: an object representing information about the flow run Raises: - ClientError: if the GraphQL mutation is bad for any reason """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "id": True, "name": True, "flow_id": True, "parameters": True, "context": True, "version": True, "scheduled_start_time": True, "serialized_state": True, # load all task runs except dynamic task runs with_args("task_runs", {"where": {"map_index": {"_eq": -1}}}): { "id": True, "task": {"id": True, "slug": True}, "version": True, "serialized_state": True, }, } } } result = self.graphql(query).data.flow_run_by_pk # type: ignore if result is None: raise ClientError('Flow run ID not found: "{}"'.format(flow_run_id)) # convert scheduled_start_time from string to datetime result.scheduled_start_time = pendulum.parse(result.scheduled_start_time) # create "state" attribute from serialized_state result.state = prefect.engine.state.State.deserialize( result.pop("serialized_state") ) # reformat task_runs task_runs = [] for tr in result.task_runs: tr.state = prefect.engine.state.State.deserialize( tr.pop("serialized_state") ) task_info = tr.pop("task") tr.task_id = task_info["id"] tr.task_slug = task_info["slug"] task_runs.append(TaskRunInfoResult(**tr)) result.task_runs = task_runs result.context = ( result.context.to_dict() if result.context is not None else None ) result.parameters = ( result.parameters.to_dict() if result.parameters is not None else None ) return FlowRunInfoResult(**result) def update_flow_run_heartbeat(self, flow_run_id: str) -> None: """ Convenience method for heartbeating a flow run. Does NOT raise an error if the update fails. Args: - flow_run_id (str): the flow run ID to heartbeat """ mutation = { "mutation": { with_args( "update_flow_run_heartbeat", {"input": {"flow_run_id": flow_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def update_task_run_heartbeat(self, task_run_id: str) -> None: """ Convenience method for heartbeating a task run. Does NOT raise an error if the update fails. Args: - task_run_id (str): the task run ID to heartbeat """ mutation = { "mutation": { with_args( "update_task_run_heartbeat", {"input": {"task_run_id": task_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def get_flow_run_state(self, flow_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a flow run. Args: - flow_run_id (str): the id for this flow run Returns: - State: a Prefect State object """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "serialized_state": True, } } } flow_run = self.graphql(query).data.flow_run_by_pk return prefect.engine.state.State.deserialize(flow_run.serialized_state) def set_flow_run_state( self, flow_run_id: str, state: "prefect.engine.state.State", version: int = None, ) -> "prefect.engine.state.State": """ Sets new state for a flow run in the database. Args: - flow_run_id (str): the id of the flow run to set state for - state (State): the new state for this flow run - version (int, optional): the current version of the flow run state. This is optional but it can be supplied to enforce version-locking. Returns: - State: the state the current flow run should be considered in Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation($input: set_flow_run_states_input!)": { "set_flow_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, flow_run_id=flow_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_flow_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def get_latest_cached_states( self, task_id: str, cache_key: Optional[str], created_after: datetime.datetime ) -> List["prefect.engine.state.State"]: """ Pulls all Cached states for the given task that were created after the provided date. Args: - task_id (str): the task id for this task run - cache_key (Optional[str]): the cache key for this Task's cache; if `None`, the task id alone will be used - created_after (datetime.datetime): the earliest date the state should have been created at Returns: - List[State]: a list of Cached states created after the given date """ args = { "where": { "state": {"_eq": "Cached"}, "state_timestamp": {"_gte": created_after.isoformat()}, }, "order_by": {"state_timestamp": EnumValue("desc")}, "limit": 100, } # type: Dict[str, Any] # if a cache key was provided, match it against all tasks if cache_key is not None: args["where"].update({"cache_key": {"_eq": cache_key}}) # otherwise match against only this task, across all cache keys else: args["where"].update({"task_id": {"_eq": task_id}}) query = {"query": {with_args("task_run", args): "serialized_state"}} result = self.graphql(query) # type: Any deserializer = prefect.engine.state.State.deserialize valid_states = [ deserializer(res.serialized_state) for res in result.data.task_run ] return valid_states def get_task_run_info( self, flow_run_id: str, task_id: str, map_index: Optional[int] = None ) -> TaskRunInfoResult: """ Retrieves version and current state information for the given task run. Args: - flow_run_id (str): the id of the flow run that this task run lives in - task_id (str): the task id for this task run - map_index (int, optional): the mapping index for this task run; if `None`, it is assumed this task is _not_ mapped Returns: - NamedTuple: a tuple containing `id, task_id, version, state` Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation": { with_args( "get_or_create_task_run", { "input": { "flow_run_id": flow_run_id, "task_id": task_id, "map_index": -1 if map_index is None else map_index, } }, ): { "id": True, } } } result = self.graphql(mutation) # type: Any if result is None: raise ClientError("Failed to create task run.") task_run_id = result.data.get_or_create_task_run.id query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "version": True, "serialized_state": True, "task": {"slug": True}, } } } task_run = self.graphql(query).data.task_run_by_pk # type: ignore if task_run is None: raise ClientError('Task run ID not found: "{}"'.format(task_run_id)) state = prefect.engine.state.State.deserialize(task_run.serialized_state) return TaskRunInfoResult( id=task_run_id, task_id=task_id, task_slug=task_run.task.slug, version=task_run.version, state=state, ) def set_task_run_name(self, task_run_id: str, name: str) -> bool: """ Set the name of a task run Args: - task_run_id (str): the id of a task run - name (str): a name for this task run Returns: - bool: whether or not the task run name was updated """ mutation = { "mutation($input: set_task_run_name_input!)": { "set_task_run_name(input: $input)": { "success": True, } } } result = self.graphql( mutation, variables=dict(input=dict(task_run_id=task_run_id, name=name)) ) return result.data.set_task_run_name.success def get_task_run_state(self, task_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a task run. Args: - task_run_id (str): the id for this task run Returns: - State: a Prefect State object """ query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "serialized_state": True, } } } task_run = self.graphql(query).data.task_run_by_pk return prefect.engine.state.State.deserialize(task_run.serialized_state) def set_task_run_state( self, task_run_id: str, state: "prefect.engine.state.State", version: int = None, cache_for: datetime.timedelta = None, ) -> "prefect.engine.state.State": """ Sets new state for a task run. Args: - task_run_id (str): the id of the task run to set state for - state (State): the new state for this task run - version (int, optional): the current version of the task run state. This is optional but it can be supplied to enforce version-locking. - cache_for (timedelta, optional): how long to store the result of this task for, using the serializer set in config; if not provided, no caching occurs Raises: - ClientError: if the GraphQL mutation is bad for any reason Returns: - State: the state the current task run should be considered in """ mutation = { "mutation($input: set_task_run_states_input!)": { "set_task_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, task_run_id=task_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_task_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def set_secret(self, name: str, value: Any) -> None: """ Set a secret with the given name and value. Args: - name (str): the name of the secret; used for retrieving the secret during task runs - value (Any): the value of the secret Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the secret-setting was unsuccessful """ mutation = { "mutation($input: set_secret_input!)": { "set_secret(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(name=name, value=value)) ) # type: Any if not result.data.set_secret.success: raise ValueError("Setting secret failed.") def get_task_tag_limit(self, tag: str) -> Optional[int]: """ Retrieve the current task tag concurrency limit for a given tag. Args: - tag (str): the tag to update Raises: - ClientError: if the GraphQL query fails """ query = { "query": { with_args("task_tag_limit", {"where": {"tag": {"_eq": tag}}}): { "limit": True } } } result = self.graphql(query) # type: Any if result.data.task_tag_limit: return result.data.task_tag_limit[0].limit else: return None def update_task_tag_limit(self, tag: str, limit: int) -> None: """ Update the task tag concurrency limit for a given tag; requires tenant admin permissions. Args: - tag (str): the tag to update - limit (int): the concurrency limit to enforce on the tag; should be a value >= 0 Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag limit-setting was unsuccessful, or if a bad limit was provided """ if limit < 0: raise ValueError("Concurrency limits must be >= 0") mutation = { "mutation($input: update_task_tag_limit_input!)": { "update_task_tag_limit(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict(input=dict(tag=tag, limit=limit)) ) # type: Any if not result.data.update_task_tag_limit.id: raise ValueError("Updating the task tag concurrency limit failed.") def delete_task_tag_limit(self, limit_id: str) -> None: """ Deletes a given task tag concurrency limit; requires tenant admin permissions. Args: - limit_id (str): the ID of the tag to delete Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag deletion was unsuccessful, or if a bad tag ID was provided """ mutation = { "mutation($input: delete_task_tag_limit_input!)": { "delete_task_tag_limit(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(limit_id=limit_id)) ) # type: Any if not result.data.delete_task_tag_limit.success: raise ValueError("Deleting the task tag concurrency limit failed.") def write_run_logs(self, logs: List[Dict]) -> None: """ Uploads a collection of logs to Cloud. Args: - logs (List[Dict]): a list of log entries to add Raises: - ValueError: if uploading the logs fail """ mutation = { "mutation($input: write_run_logs_input!)": { "write_run_logs(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(logs=logs)) ) # type: Any if not result.data.write_run_logs.success: raise ValueError("Writing logs failed.") def register_agent( self, agent_type: str, name: str = None, labels: List[str] = None, agent_config_id: str = None, ) -> str: """ Register an agent with a backend API Args: - agent_type (str): The type of agent being registered - name: (str, optional): The name of the agent being registered - labels (List[str], optional): A list of any present labels on the agent being registered - agent_config_id (str, optional): The ID of an agent configuration to register with Returns: - The agent ID as a string """ mutation = { "mutation($input: register_agent_input!)": { "register_agent(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict( input=dict( type=agent_type, name=name, labels=labels or [], tenant_id=self._active_tenant_id, agent_config_id=agent_config_id, ) ), ) if not result.data.register_agent.id: raise ValueError("Error registering agent") return result.data.register_agent.id def get_agent_config(self, agent_config_id: str) -> dict: """ Get agent config settings Args: - agent_config_id (str): The ID of an agent configuration to retrieve Returns: - dict: the agent configuration's `settings` """ query = { "query": { with_args( "agent_config", {"where": {"id": {"_eq": agent_config_id}}} ): {"settings": True} } } result = self.graphql(query) # type: Any return result.data.agent_config[0].settings
get_auth_token
Returns an auth token: - if no explicit access token is stored, returns the api token - if there is an access token: - if there's a refresh token and the access token expires in the next 30 seconds, then we refresh the access token and store the result - return the access token Returns: - str: the access token
import datetime import json import os import re import time import uuid import warnings from pathlib import Path from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Union from urllib.parse import urljoin # if simplejson is installed, `requests` defaults to using it instead of json # this allows the client to gracefully handle either json or simplejson try: from simplejson.errors import JSONDecodeError except ImportError: from json.decoder import JSONDecodeError import pendulum import toml from slugify import slugify import prefect from prefect.utilities.exceptions import ( AuthorizationError, ClientError, VersionLockError, ) from prefect.utilities.graphql import ( EnumValue, GraphQLResult, compress, parse_graphql, with_args, ) from prefect.utilities.logging import create_diagnostic_logger if TYPE_CHECKING: from prefect.core import Flow import requests JSONLike = Union[bool, dict, list, str, int, float, None] # type definitions for GraphQL results TaskRunInfoResult = NamedTuple( "TaskRunInfoResult", [ ("id", str), ("task_id", str), ("task_slug", str), ("version", int), ("state", "prefect.engine.state.State"), ], ) FlowRunInfoResult = NamedTuple( "FlowRunInfoResult", [ ("id", str), ("name", str), ("flow_id", str), ("parameters", Dict[str, Any]), ("context", Dict[str, Any]), ("version", int), ("scheduled_start_time", datetime.datetime), ("state", "prefect.engine.state.State"), ("task_runs", List[TaskRunInfoResult]), ], ) class Client: """ Client for communication with Prefect Cloud If the arguments aren't specified the client initialization first checks the prefect configuration and if the server is not set there it checks the current context. The token will only be present in the current context. Args: - api_server (str, optional): the URL to send all GraphQL requests to; if not provided, will be pulled from `cloud.graphql` config var - api_token (str, optional): a Prefect Cloud API token, taken from `config.cloud.auth_token` if not provided. If this token is USER-scoped, it may be used to log in to any tenant that the user is a member of. In that case, ephemeral JWTs will be loaded as necessary. Otherwise, the API token itself will be used as authorization. """ def __init__(self, api_server: str = None, api_token: str = None): self._access_token = None self._refresh_token = None self._access_token_expires_at = pendulum.now() self._active_tenant_id = None self._attached_headers = {} # type: Dict[str, str] self.logger = create_diagnostic_logger("Diagnostics") # store api server self.api_server = api_server or prefect.context.config.cloud.get("graphql") # store api token self._api_token = api_token or prefect.context.config.cloud.get( "auth_token", None ) if prefect.config.backend == "cloud": if not self._api_token: # if no api token was passed, attempt to load state from local storage settings = self._load_local_settings() self._api_token = settings.get("api_token") if self._api_token: self._active_tenant_id = settings.get("active_tenant_id") if self._active_tenant_id: try: self.login_to_tenant(tenant_id=self._active_tenant_id) except AuthorizationError: # if an authorization error is raised, then the token is invalid and should # be cleared self.logout_from_tenant() else: # TODO: Separate put this functionality and clean up initial tenant access handling if not self._active_tenant_id: tenant_info = self.graphql({"query": {"tenant": {"id"}}}) if tenant_info.data.tenant: self._active_tenant_id = tenant_info.data.tenant[0].id def create_tenant(self, name: str, slug: str = None) -> str: """ Creates a new tenant. Note this route only works when run against Prefect Server. Args: - name (str): the name of the tenant to create - slug (str, optional): the slug of the tenant to create; defaults to name Returns: - str: the ID of the newly created tenant, or the ID of the currently active tenant Raises: - ValueError: if run against Prefect Cloud """ if prefect.config.backend != "server": msg = "To create a tenant with Prefect Cloud, please signup at https://cloud.prefect.io/" raise ValueError(msg) if slug is None: slug = slugify(name) tenant_info = self.graphql( { "mutation($input: create_tenant_input!)": { "create_tenant(input: $input)": {"id"} } }, variables=dict(input=dict(name=name, slug=slug)), ) return tenant_info.data.create_tenant.id # ------------------------------------------------------------------------- # Utilities def get( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and GET request Args: - path (str): the path of the API url. For example, to GET http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the GET request to; defaults to `self.api_server` - headers (dict, optional): Headers to pass with the request - params (dict): GET parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="GET", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def post( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and POST request Args: - path (str): the path of the API url. For example, to POST http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the POST request to; defaults to `self.api_server` - headers(dict): headers to pass with the request - params (dict): POST parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="POST", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def graphql( self, query: Any, raise_on_error: bool = True, headers: Dict[str, str] = None, variables: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> GraphQLResult: """ Convenience function for running queries against the Prefect GraphQL API Args: - query (Any): A representation of a graphql query to be executed. It will be parsed by prefect.utilities.graphql.parse_graphql(). - raise_on_error (bool): if True, a `ClientError` will be raised if the GraphQL returns any `errors`. - headers (dict): any additional headers that should be passed as part of the request - variables (dict): Variables to be filled into a query with the key being equivalent to the variables that are accepted by the query - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Data returned from the GraphQL query Raises: - ClientError if there are errors raised by the GraphQL mutation """ result = self.post( path="", server=self.api_server, headers=headers, params=dict(query=parse_graphql(query), variables=json.dumps(variables)), token=token, retry_on_api_error=retry_on_api_error, ) if raise_on_error and "errors" in result: if "UNAUTHENTICATED" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif "Malformed Authorization header" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif ( result["errors"][0].get("extensions", {}).get("code") == "VERSION_LOCKING_ERROR" ): raise VersionLockError(result["errors"]) raise ClientError(result["errors"]) else: return GraphQLResult(result) # type: ignore def _send_request( self, session: "requests.Session", method: str, url: str, params: Dict[str, JSONLike] = None, headers: dict = None, ) -> "requests.models.Response": if prefect.context.config.cloud.get("diagnostics") is True: self.logger.debug(f"Preparing request to {url}") clean_headers = { head: re.sub("Bearer .*", "Bearer XXXX", val) for head, val in headers.items() # type: ignore } self.logger.debug(f"Headers: {clean_headers}") self.logger.debug(f"Request: {params}") start_time = time.time() if method == "GET": response = session.get(url, headers=headers, params=params, timeout=30) elif method == "POST": response = session.post(url, headers=headers, json=params, timeout=30) elif method == "DELETE": response = session.delete(url, headers=headers, timeout=30) else: raise ValueError("Invalid method: {}".format(method)) if prefect.context.config.cloud.get("diagnostics") is True: end_time = time.time() self.logger.debug(f"Response: {response.json()}") self.logger.debug( f"Request duration: {round(end_time - start_time, 4)} seconds" ) # Check if request returned a successful status response.raise_for_status() return response def _request( self, method: str, path: str, params: Dict[str, JSONLike] = None, server: str = None, headers: dict = None, token: str = None, retry_on_api_error: bool = True, ) -> "requests.models.Response": """ Runs any specified request (GET, POST, DELETE) against the server Args: - method (str): The type of request to be made (GET, POST, DELETE) - path (str): Path of the API URL - params (dict, optional): Parameters used for the request - server (str, optional): The server to make requests against, base API server is used if not specified - headers (dict, optional): Headers to pass with the request - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - requests.models.Response: The response returned from the request Raises: - ClientError: if the client token is not in the context (due to not being logged in) - ValueError: if a method is specified outside of the accepted GET, POST, DELETE - requests.HTTPError: if a status code is returned that is not `200` or `401` """ if server is None: server = self.api_server assert isinstance(server, str) # mypy assert if token is None: token = self.get_auth_token() # 'import requests' is expensive time-wise, we should do this just-in-time to keep # the 'import prefect' time low import requests url = urljoin(server, path.lstrip("/")).rstrip("/") params = params or {} headers = headers or {} if token: headers["Authorization"] = "Bearer {}".format(token) headers["X-PREFECT-CORE-VERSION"] = str(prefect.__version__) if self._attached_headers: headers.update(self._attached_headers) session = requests.Session() retry_total = 6 if prefect.config.backend == "cloud" else 1 retries = requests.packages.urllib3.util.retry.Retry( total=retry_total, backoff_factor=1, status_forcelist=[500, 502, 503, 504], method_whitelist=["DELETE", "GET", "POST"], ) session.mount("https://", requests.adapters.HTTPAdapter(max_retries=retries)) response = self._send_request( session=session, method=method, url=url, params=params, headers=headers ) # parse the response try: json_resp = response.json() except JSONDecodeError as exc: if prefect.config.backend == "cloud" and "Authorization" not in headers: raise ClientError( "Malformed response received from Cloud - please ensure that you " "have an API token properly configured." ) from exc else: raise ClientError("Malformed response received from API.") from exc # check if there was an API_ERROR code in the response if "API_ERROR" in str(json_resp.get("errors")) and retry_on_api_error: success, retry_count = False, 0 # retry up to six times while success is False and retry_count < 6: response = self._send_request( session=session, method=method, url=url, params=params, headers=headers, ) if "API_ERROR" in str(response.json().get("errors")): retry_count += 1 time.sleep(0.25 * (2 ** (retry_count - 1))) else: success = True return response def attach_headers(self, headers: dict) -> None: """ Set headers to be attached to this Client Args: - headers (dict): A dictionary of headers to attach to this client. These headers get added on to the existing dictionary of headers. """ self._attached_headers.update(headers) # ------------------------------------------------------------------------- # Auth # ------------------------------------------------------------------------- @property def _local_settings_path(self) -> Path: """ Returns the local settings directory corresponding to the current API servers """ path = "{home}/client/{server}".format( home=prefect.context.config.home_dir, server=slugify(self.api_server, regex_pattern=r"[^-\.a-z0-9]+"), ) return Path(os.path.expanduser(path)) / "settings.toml" def _save_local_settings(self, settings: dict) -> None: """ Writes settings to local storage """ self._local_settings_path.parent.mkdir(exist_ok=True, parents=True) with self._local_settings_path.open("w+") as f: toml.dump(settings, f) def _load_local_settings(self) -> dict: """ Loads settings from local storage """ if self._local_settings_path.exists(): with self._local_settings_path.open("r") as f: return toml.load(f) # type: ignore return {} def save_api_token(self) -> None: """ Saves the API token in local storage. """ settings = self._load_local_settings() settings["api_token"] = self._api_token self._save_local_settings(settings) # MASKED: get_auth_token function (lines 486-505) def get_available_tenants(self) -> List[Dict]: """ Returns a list of available tenants. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - List[Dict]: a list of dictionaries containing the id, slug, and name of available tenants """ result = self.graphql( {"query": {"tenant(order_by: {slug: asc})": {"id", "slug", "name"}}}, # use the API token to see all available tenants token=self._api_token, ) # type: ignore return result.data.tenant # type: ignore def login_to_tenant(self, tenant_slug: str = None, tenant_id: str = None) -> bool: """ Log in to a specific tenant NOTE: this should only be called by users who have provided a USER-scoped API token. Args: - tenant_slug (str): the tenant's slug - tenant_id (str): the tenant's id Returns: - bool: True if the login was successful Raises: - ValueError: if at least one of `tenant_slug` or `tenant_id` isn't provided - ValueError: if the `tenant_id` is not a valid UUID - ValueError: if no matching tenants are found """ if tenant_slug is None and tenant_id is None: raise ValueError( "At least one of `tenant_slug` or `tenant_id` must be provided." ) elif tenant_id: try: uuid.UUID(tenant_id) except ValueError as exc: raise ValueError("The `tenant_id` must be a valid UUID.") from exc tenant = self.graphql( { "query($slug: String, $id: uuid)": { "tenant(where: {slug: { _eq: $slug }, id: { _eq: $id } })": {"id"} } }, variables=dict(slug=tenant_slug, id=tenant_id), # use the API token to query the tenant token=self._api_token, ) # type: ignore if not tenant.data.tenant: # type: ignore raise ValueError("No matching tenants found.") tenant_id = tenant.data.tenant[0].id # type: ignore if prefect.config.backend == "cloud": payload = self.graphql( { "mutation($input: switch_tenant_input!)": { "switch_tenant(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(tenant_id=tenant_id)), # Use the API token to switch tenants token=self._api_token, ) # type: ignore self._access_token = payload.data.switch_tenant.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.switch_tenant.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.switch_tenant.refresh_token # type: ignore self._active_tenant_id = tenant_id # save the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = self._active_tenant_id self._save_local_settings(settings) return True def logout_from_tenant(self) -> None: self._access_token = None self._refresh_token = None self._active_tenant_id = None # remove the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = None self._save_local_settings(settings) def _refresh_access_token(self) -> bool: """ Refresh the client's JWT access token. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - bool: True if the refresh succeeds """ payload = self.graphql( { "mutation($input: refresh_token_input!)": { "refresh_token(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(access_token=self._access_token)), # pass the refresh token as the auth header token=self._refresh_token, ) # type: ignore self._access_token = payload.data.refresh_token.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.refresh_token.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.refresh_token.refresh_token # type: ignore return True # ------------------------------------------------------------------------- # Actions # ------------------------------------------------------------------------- def register( self, flow: "Flow", project_name: str = None, build: bool = True, set_schedule_active: bool = True, version_group_id: str = None, compressed: bool = True, no_url: bool = False, ) -> str: """ Push a new flow to Prefect Cloud Args: - flow (Flow): a flow to register - project_name (str, optional): the project that should contain this flow. - build (bool, optional): if `True`, the flow's environment is built prior to serialization; defaults to `True` - set_schedule_active (bool, optional): if `False`, will set the schedule to inactive in the database to prevent auto-scheduling runs (if the Flow has a schedule). Defaults to `True`. This can be changed later. - version_group_id (str, optional): the UUID version group ID to use for versioning this Flow in Cloud; if not provided, the version group ID associated with this Flow's project and name will be used. - compressed (bool, optional): if `True`, the serialized flow will be; defaults to `True` compressed - no_url (bool, optional): if `True`, the stdout from this function will not contain the URL link to the newly-registered flow in the Cloud UI Returns: - str: the ID of the newly-registered flow Raises: - ClientError: if the register failed """ required_parameters = {p for p in flow.parameters() if p.required} if flow.schedule is not None and required_parameters: required_names = {p.name for p in required_parameters} if not all( [ required_names <= set(c.parameter_defaults.keys()) for c in flow.schedule.clocks ] ): raise ClientError( "Flows with required parameters can not be scheduled automatically." ) if any(e.key for e in flow.edges) and flow.result is None: warnings.warn( "No result handler was specified on your Flow. Cloud features such as " "input caching and resuming task runs from failure may not work properly.", stacklevel=2, ) if compressed: create_mutation = { "mutation($input: create_flow_from_compressed_string_input!)": { "create_flow_from_compressed_string(input: $input)": {"id"} } } else: create_mutation = { "mutation($input: create_flow_input!)": { "create_flow(input: $input)": {"id"} } } project = None if project_name is None: raise TypeError( "'project_name' is a required field when registering a flow." ) query_project = { "query": { with_args("project", {"where": {"name": {"_eq": project_name}}}): { "id": True } } } project = self.graphql(query_project).data.project # type: ignore if not project: raise ValueError( "Project {} not found. Run `prefect create project '{}'` to create it.".format( project_name, project_name ) ) serialized_flow = flow.serialize(build=build) # type: Any # Set Docker storage image in environment metadata if provided if isinstance(flow.storage, prefect.environments.storage.Docker): flow.environment.metadata["image"] = flow.storage.name serialized_flow = flow.serialize(build=False) # If no image ever set, default metadata to all_extras image on current version if not flow.environment.metadata.get("image"): version = prefect.__version__.split("+")[0] flow.environment.metadata[ "image" ] = f"prefecthq/prefect:all_extras-{version}" serialized_flow = flow.serialize(build=False) # verify that the serialized flow can be deserialized try: prefect.serialization.flow.FlowSchema().load(serialized_flow) except Exception as exc: raise ValueError( "Flow could not be deserialized successfully. Error was: {}".format( repr(exc) ) ) from exc if compressed: serialized_flow = compress(serialized_flow) res = self.graphql( create_mutation, variables=dict( input=dict( project_id=(project[0].id if project else None), serialized_flow=serialized_flow, set_schedule_active=set_schedule_active, version_group_id=version_group_id, ) ), retry_on_api_error=False, ) # type: Any flow_id = ( res.data.create_flow_from_compressed_string.id if compressed else res.data.create_flow.id ) if not no_url: # Generate direct link to Cloud flow flow_url = self.get_cloud_url("flow", flow_id) prefix = "└── " print("Flow URL: {}".format(flow_url)) # Extra information to improve visibility msg = ( f" {prefix}ID: {flow_id}\n" f" {prefix}Project: {project_name}\n" f" {prefix}Labels: {list(flow.environment.labels)}" ) print(msg) return flow_id def get_cloud_url(self, subdirectory: str, id: str, as_user: bool = True) -> str: """ Convenience method for creating Prefect Cloud URLs for a given subdirectory. Args: - subdirectory (str): the subdirectory to use (e.g., `"flow-run"`) - id (str): the ID of the page - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the URL corresponding to the appropriate base URL, tenant slug, subdirectory and ID Example: ```python from prefect import Client client = Client() client.get_cloud_url("flow-run", "424242-ca-94611-111-55") # returns "https://cloud.prefect.io/my-tenant-slug/flow-run/424242-ca-94611-111-55" ``` """ # Generate direct link to UI if prefect.config.backend == "cloud": tenant_slug = self.get_default_tenant_slug(as_user=as_user) else: tenant_slug = "" base_url = ( re.sub("api-", "", prefect.config.cloud.api) if re.search("api-", prefect.config.cloud.api) else re.sub("api", "cloud", prefect.config.cloud.api) ) full_url = prefect.config.cloud.api if tenant_slug: full_url = "/".join([base_url.rstrip("/"), tenant_slug, subdirectory, id]) elif prefect.config.backend == "server": full_url = "/".join([prefect.config.server.ui.endpoint, subdirectory, id]) return full_url def get_default_tenant_slug(self, as_user: bool = True) -> str: """ Get the default tenant slug for the currently authenticated user Args: - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the slug of the current default tenant for this user """ if as_user: query = { "query": {"user": {"default_membership": {"tenant": "slug"}}} } # type: dict else: query = {"query": {"tenant": {"slug"}}} res = self.graphql(query) if as_user: user = res.get("data").user[0] slug = user.default_membership.tenant.slug else: slug = res.get("data").tenant[0].slug return slug def create_project(self, project_name: str, project_description: str = None) -> str: """ Create a new Project Args: - project_name (str): the project that should contain this flow - project_description (str, optional): the project description Returns: - str: the ID of the newly-created project Raises: - ClientError: if the project creation failed """ project_mutation = { "mutation($input: create_project_input!)": { "create_project(input: $input)": {"id"} } } res = self.graphql( project_mutation, variables=dict( input=dict( name=project_name, description=project_description, tenant_id=self._active_tenant_id, ) ), ) # type: Any return res.data.create_project.id def create_flow_run( self, flow_id: str = None, context: dict = None, parameters: dict = None, scheduled_start_time: datetime.datetime = None, idempotency_key: str = None, run_name: str = None, version_group_id: str = None, ) -> str: """ Create a new flow run for the given flow id. If `start_time` is not provided, the flow run will be scheduled to start immediately. If both `flow_id` and `version_group_id` are provided, only the `flow_id` will be used. Args: - flow_id (str, optional): the id of the Flow you wish to schedule - context (dict, optional): the run context - parameters (dict, optional): a dictionary of parameter values to pass to the flow run - scheduled_start_time (datetime, optional): the time to schedule the execution for; if not provided, defaults to now - idempotency_key (str, optional): an idempotency key; if provided, this run will be cached for 24 hours. Any subsequent attempts to create a run with the same idempotency key will return the ID of the originally created run (no new run will be created after the first). An error will be raised if parameters or context are provided and don't match the original. Each subsequent request will reset the TTL for 24 hours. - run_name (str, optional): The name assigned to this flow run - version_group_id (str, optional): if provided, the unique unarchived flow within this version group will be scheduled to run. This input can be used as a stable API for running flows which are regularly updated. Returns: - str: the ID of the newly-created flow run Raises: - ClientError: if the GraphQL query is bad for any reason """ create_mutation = { "mutation($input: create_flow_run_input!)": { "create_flow_run(input: $input)": {"id": True} } } if not flow_id and not version_group_id: raise ValueError("One of flow_id or version_group_id must be provided") if flow_id: inputs = dict(flow_id=flow_id) else: inputs = dict(version_group_id=version_group_id) # type: ignore if parameters is not None: inputs.update(parameters=parameters) # type: ignore if context is not None: inputs.update(context=context) # type: ignore if idempotency_key is not None: inputs.update(idempotency_key=idempotency_key) # type: ignore if scheduled_start_time is not None: inputs.update( scheduled_start_time=scheduled_start_time.isoformat() ) # type: ignore if run_name is not None: inputs.update(flow_run_name=run_name) # type: ignore res = self.graphql(create_mutation, variables=dict(input=inputs)) return res.data.create_flow_run.id # type: ignore def get_flow_run_info(self, flow_run_id: str) -> FlowRunInfoResult: """ Retrieves version and current state information for the given flow run. Args: - flow_run_id (str): the id of the flow run to get information for Returns: - GraphQLResult: an object representing information about the flow run Raises: - ClientError: if the GraphQL mutation is bad for any reason """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "id": True, "name": True, "flow_id": True, "parameters": True, "context": True, "version": True, "scheduled_start_time": True, "serialized_state": True, # load all task runs except dynamic task runs with_args("task_runs", {"where": {"map_index": {"_eq": -1}}}): { "id": True, "task": {"id": True, "slug": True}, "version": True, "serialized_state": True, }, } } } result = self.graphql(query).data.flow_run_by_pk # type: ignore if result is None: raise ClientError('Flow run ID not found: "{}"'.format(flow_run_id)) # convert scheduled_start_time from string to datetime result.scheduled_start_time = pendulum.parse(result.scheduled_start_time) # create "state" attribute from serialized_state result.state = prefect.engine.state.State.deserialize( result.pop("serialized_state") ) # reformat task_runs task_runs = [] for tr in result.task_runs: tr.state = prefect.engine.state.State.deserialize( tr.pop("serialized_state") ) task_info = tr.pop("task") tr.task_id = task_info["id"] tr.task_slug = task_info["slug"] task_runs.append(TaskRunInfoResult(**tr)) result.task_runs = task_runs result.context = ( result.context.to_dict() if result.context is not None else None ) result.parameters = ( result.parameters.to_dict() if result.parameters is not None else None ) return FlowRunInfoResult(**result) def update_flow_run_heartbeat(self, flow_run_id: str) -> None: """ Convenience method for heartbeating a flow run. Does NOT raise an error if the update fails. Args: - flow_run_id (str): the flow run ID to heartbeat """ mutation = { "mutation": { with_args( "update_flow_run_heartbeat", {"input": {"flow_run_id": flow_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def update_task_run_heartbeat(self, task_run_id: str) -> None: """ Convenience method for heartbeating a task run. Does NOT raise an error if the update fails. Args: - task_run_id (str): the task run ID to heartbeat """ mutation = { "mutation": { with_args( "update_task_run_heartbeat", {"input": {"task_run_id": task_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def get_flow_run_state(self, flow_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a flow run. Args: - flow_run_id (str): the id for this flow run Returns: - State: a Prefect State object """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "serialized_state": True, } } } flow_run = self.graphql(query).data.flow_run_by_pk return prefect.engine.state.State.deserialize(flow_run.serialized_state) def set_flow_run_state( self, flow_run_id: str, state: "prefect.engine.state.State", version: int = None, ) -> "prefect.engine.state.State": """ Sets new state for a flow run in the database. Args: - flow_run_id (str): the id of the flow run to set state for - state (State): the new state for this flow run - version (int, optional): the current version of the flow run state. This is optional but it can be supplied to enforce version-locking. Returns: - State: the state the current flow run should be considered in Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation($input: set_flow_run_states_input!)": { "set_flow_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, flow_run_id=flow_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_flow_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def get_latest_cached_states( self, task_id: str, cache_key: Optional[str], created_after: datetime.datetime ) -> List["prefect.engine.state.State"]: """ Pulls all Cached states for the given task that were created after the provided date. Args: - task_id (str): the task id for this task run - cache_key (Optional[str]): the cache key for this Task's cache; if `None`, the task id alone will be used - created_after (datetime.datetime): the earliest date the state should have been created at Returns: - List[State]: a list of Cached states created after the given date """ args = { "where": { "state": {"_eq": "Cached"}, "state_timestamp": {"_gte": created_after.isoformat()}, }, "order_by": {"state_timestamp": EnumValue("desc")}, "limit": 100, } # type: Dict[str, Any] # if a cache key was provided, match it against all tasks if cache_key is not None: args["where"].update({"cache_key": {"_eq": cache_key}}) # otherwise match against only this task, across all cache keys else: args["where"].update({"task_id": {"_eq": task_id}}) query = {"query": {with_args("task_run", args): "serialized_state"}} result = self.graphql(query) # type: Any deserializer = prefect.engine.state.State.deserialize valid_states = [ deserializer(res.serialized_state) for res in result.data.task_run ] return valid_states def get_task_run_info( self, flow_run_id: str, task_id: str, map_index: Optional[int] = None ) -> TaskRunInfoResult: """ Retrieves version and current state information for the given task run. Args: - flow_run_id (str): the id of the flow run that this task run lives in - task_id (str): the task id for this task run - map_index (int, optional): the mapping index for this task run; if `None`, it is assumed this task is _not_ mapped Returns: - NamedTuple: a tuple containing `id, task_id, version, state` Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation": { with_args( "get_or_create_task_run", { "input": { "flow_run_id": flow_run_id, "task_id": task_id, "map_index": -1 if map_index is None else map_index, } }, ): { "id": True, } } } result = self.graphql(mutation) # type: Any if result is None: raise ClientError("Failed to create task run.") task_run_id = result.data.get_or_create_task_run.id query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "version": True, "serialized_state": True, "task": {"slug": True}, } } } task_run = self.graphql(query).data.task_run_by_pk # type: ignore if task_run is None: raise ClientError('Task run ID not found: "{}"'.format(task_run_id)) state = prefect.engine.state.State.deserialize(task_run.serialized_state) return TaskRunInfoResult( id=task_run_id, task_id=task_id, task_slug=task_run.task.slug, version=task_run.version, state=state, ) def set_task_run_name(self, task_run_id: str, name: str) -> bool: """ Set the name of a task run Args: - task_run_id (str): the id of a task run - name (str): a name for this task run Returns: - bool: whether or not the task run name was updated """ mutation = { "mutation($input: set_task_run_name_input!)": { "set_task_run_name(input: $input)": { "success": True, } } } result = self.graphql( mutation, variables=dict(input=dict(task_run_id=task_run_id, name=name)) ) return result.data.set_task_run_name.success def get_task_run_state(self, task_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a task run. Args: - task_run_id (str): the id for this task run Returns: - State: a Prefect State object """ query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "serialized_state": True, } } } task_run = self.graphql(query).data.task_run_by_pk return prefect.engine.state.State.deserialize(task_run.serialized_state) def set_task_run_state( self, task_run_id: str, state: "prefect.engine.state.State", version: int = None, cache_for: datetime.timedelta = None, ) -> "prefect.engine.state.State": """ Sets new state for a task run. Args: - task_run_id (str): the id of the task run to set state for - state (State): the new state for this task run - version (int, optional): the current version of the task run state. This is optional but it can be supplied to enforce version-locking. - cache_for (timedelta, optional): how long to store the result of this task for, using the serializer set in config; if not provided, no caching occurs Raises: - ClientError: if the GraphQL mutation is bad for any reason Returns: - State: the state the current task run should be considered in """ mutation = { "mutation($input: set_task_run_states_input!)": { "set_task_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, task_run_id=task_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_task_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def set_secret(self, name: str, value: Any) -> None: """ Set a secret with the given name and value. Args: - name (str): the name of the secret; used for retrieving the secret during task runs - value (Any): the value of the secret Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the secret-setting was unsuccessful """ mutation = { "mutation($input: set_secret_input!)": { "set_secret(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(name=name, value=value)) ) # type: Any if not result.data.set_secret.success: raise ValueError("Setting secret failed.") def get_task_tag_limit(self, tag: str) -> Optional[int]: """ Retrieve the current task tag concurrency limit for a given tag. Args: - tag (str): the tag to update Raises: - ClientError: if the GraphQL query fails """ query = { "query": { with_args("task_tag_limit", {"where": {"tag": {"_eq": tag}}}): { "limit": True } } } result = self.graphql(query) # type: Any if result.data.task_tag_limit: return result.data.task_tag_limit[0].limit else: return None def update_task_tag_limit(self, tag: str, limit: int) -> None: """ Update the task tag concurrency limit for a given tag; requires tenant admin permissions. Args: - tag (str): the tag to update - limit (int): the concurrency limit to enforce on the tag; should be a value >= 0 Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag limit-setting was unsuccessful, or if a bad limit was provided """ if limit < 0: raise ValueError("Concurrency limits must be >= 0") mutation = { "mutation($input: update_task_tag_limit_input!)": { "update_task_tag_limit(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict(input=dict(tag=tag, limit=limit)) ) # type: Any if not result.data.update_task_tag_limit.id: raise ValueError("Updating the task tag concurrency limit failed.") def delete_task_tag_limit(self, limit_id: str) -> None: """ Deletes a given task tag concurrency limit; requires tenant admin permissions. Args: - limit_id (str): the ID of the tag to delete Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag deletion was unsuccessful, or if a bad tag ID was provided """ mutation = { "mutation($input: delete_task_tag_limit_input!)": { "delete_task_tag_limit(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(limit_id=limit_id)) ) # type: Any if not result.data.delete_task_tag_limit.success: raise ValueError("Deleting the task tag concurrency limit failed.") def write_run_logs(self, logs: List[Dict]) -> None: """ Uploads a collection of logs to Cloud. Args: - logs (List[Dict]): a list of log entries to add Raises: - ValueError: if uploading the logs fail """ mutation = { "mutation($input: write_run_logs_input!)": { "write_run_logs(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(logs=logs)) ) # type: Any if not result.data.write_run_logs.success: raise ValueError("Writing logs failed.") def register_agent( self, agent_type: str, name: str = None, labels: List[str] = None, agent_config_id: str = None, ) -> str: """ Register an agent with a backend API Args: - agent_type (str): The type of agent being registered - name: (str, optional): The name of the agent being registered - labels (List[str], optional): A list of any present labels on the agent being registered - agent_config_id (str, optional): The ID of an agent configuration to register with Returns: - The agent ID as a string """ mutation = { "mutation($input: register_agent_input!)": { "register_agent(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict( input=dict( type=agent_type, name=name, labels=labels or [], tenant_id=self._active_tenant_id, agent_config_id=agent_config_id, ) ), ) if not result.data.register_agent.id: raise ValueError("Error registering agent") return result.data.register_agent.id def get_agent_config(self, agent_config_id: str) -> dict: """ Get agent config settings Args: - agent_config_id (str): The ID of an agent configuration to retrieve Returns: - dict: the agent configuration's `settings` """ query = { "query": { with_args( "agent_config", {"where": {"id": {"_eq": agent_config_id}}} ): {"settings": True} } } result = self.graphql(query) # type: Any return result.data.agent_config[0].settings
def get_auth_token(self) -> str: """ Returns an auth token: - if no explicit access token is stored, returns the api token - if there is an access token: - if there's a refresh token and the access token expires in the next 30 seconds, then we refresh the access token and store the result - return the access token Returns: - str: the access token """ if not self._access_token: return self._api_token expiration = self._access_token_expires_at or pendulum.now() if self._refresh_token and pendulum.now().add(seconds=30) > expiration: self._refresh_access_token() return self._access_token
486
505
import datetime import json import os import re import time import uuid import warnings from pathlib import Path from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Union from urllib.parse import urljoin # if simplejson is installed, `requests` defaults to using it instead of json # this allows the client to gracefully handle either json or simplejson try: from simplejson.errors import JSONDecodeError except ImportError: from json.decoder import JSONDecodeError import pendulum import toml from slugify import slugify import prefect from prefect.utilities.exceptions import ( AuthorizationError, ClientError, VersionLockError, ) from prefect.utilities.graphql import ( EnumValue, GraphQLResult, compress, parse_graphql, with_args, ) from prefect.utilities.logging import create_diagnostic_logger if TYPE_CHECKING: from prefect.core import Flow import requests JSONLike = Union[bool, dict, list, str, int, float, None] # type definitions for GraphQL results TaskRunInfoResult = NamedTuple( "TaskRunInfoResult", [ ("id", str), ("task_id", str), ("task_slug", str), ("version", int), ("state", "prefect.engine.state.State"), ], ) FlowRunInfoResult = NamedTuple( "FlowRunInfoResult", [ ("id", str), ("name", str), ("flow_id", str), ("parameters", Dict[str, Any]), ("context", Dict[str, Any]), ("version", int), ("scheduled_start_time", datetime.datetime), ("state", "prefect.engine.state.State"), ("task_runs", List[TaskRunInfoResult]), ], ) class Client: """ Client for communication with Prefect Cloud If the arguments aren't specified the client initialization first checks the prefect configuration and if the server is not set there it checks the current context. The token will only be present in the current context. Args: - api_server (str, optional): the URL to send all GraphQL requests to; if not provided, will be pulled from `cloud.graphql` config var - api_token (str, optional): a Prefect Cloud API token, taken from `config.cloud.auth_token` if not provided. If this token is USER-scoped, it may be used to log in to any tenant that the user is a member of. In that case, ephemeral JWTs will be loaded as necessary. Otherwise, the API token itself will be used as authorization. """ def __init__(self, api_server: str = None, api_token: str = None): self._access_token = None self._refresh_token = None self._access_token_expires_at = pendulum.now() self._active_tenant_id = None self._attached_headers = {} # type: Dict[str, str] self.logger = create_diagnostic_logger("Diagnostics") # store api server self.api_server = api_server or prefect.context.config.cloud.get("graphql") # store api token self._api_token = api_token or prefect.context.config.cloud.get( "auth_token", None ) if prefect.config.backend == "cloud": if not self._api_token: # if no api token was passed, attempt to load state from local storage settings = self._load_local_settings() self._api_token = settings.get("api_token") if self._api_token: self._active_tenant_id = settings.get("active_tenant_id") if self._active_tenant_id: try: self.login_to_tenant(tenant_id=self._active_tenant_id) except AuthorizationError: # if an authorization error is raised, then the token is invalid and should # be cleared self.logout_from_tenant() else: # TODO: Separate put this functionality and clean up initial tenant access handling if not self._active_tenant_id: tenant_info = self.graphql({"query": {"tenant": {"id"}}}) if tenant_info.data.tenant: self._active_tenant_id = tenant_info.data.tenant[0].id def create_tenant(self, name: str, slug: str = None) -> str: """ Creates a new tenant. Note this route only works when run against Prefect Server. Args: - name (str): the name of the tenant to create - slug (str, optional): the slug of the tenant to create; defaults to name Returns: - str: the ID of the newly created tenant, or the ID of the currently active tenant Raises: - ValueError: if run against Prefect Cloud """ if prefect.config.backend != "server": msg = "To create a tenant with Prefect Cloud, please signup at https://cloud.prefect.io/" raise ValueError(msg) if slug is None: slug = slugify(name) tenant_info = self.graphql( { "mutation($input: create_tenant_input!)": { "create_tenant(input: $input)": {"id"} } }, variables=dict(input=dict(name=name, slug=slug)), ) return tenant_info.data.create_tenant.id # ------------------------------------------------------------------------- # Utilities def get( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and GET request Args: - path (str): the path of the API url. For example, to GET http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the GET request to; defaults to `self.api_server` - headers (dict, optional): Headers to pass with the request - params (dict): GET parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="GET", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def post( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and POST request Args: - path (str): the path of the API url. For example, to POST http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the POST request to; defaults to `self.api_server` - headers(dict): headers to pass with the request - params (dict): POST parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="POST", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def graphql( self, query: Any, raise_on_error: bool = True, headers: Dict[str, str] = None, variables: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> GraphQLResult: """ Convenience function for running queries against the Prefect GraphQL API Args: - query (Any): A representation of a graphql query to be executed. It will be parsed by prefect.utilities.graphql.parse_graphql(). - raise_on_error (bool): if True, a `ClientError` will be raised if the GraphQL returns any `errors`. - headers (dict): any additional headers that should be passed as part of the request - variables (dict): Variables to be filled into a query with the key being equivalent to the variables that are accepted by the query - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Data returned from the GraphQL query Raises: - ClientError if there are errors raised by the GraphQL mutation """ result = self.post( path="", server=self.api_server, headers=headers, params=dict(query=parse_graphql(query), variables=json.dumps(variables)), token=token, retry_on_api_error=retry_on_api_error, ) if raise_on_error and "errors" in result: if "UNAUTHENTICATED" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif "Malformed Authorization header" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif ( result["errors"][0].get("extensions", {}).get("code") == "VERSION_LOCKING_ERROR" ): raise VersionLockError(result["errors"]) raise ClientError(result["errors"]) else: return GraphQLResult(result) # type: ignore def _send_request( self, session: "requests.Session", method: str, url: str, params: Dict[str, JSONLike] = None, headers: dict = None, ) -> "requests.models.Response": if prefect.context.config.cloud.get("diagnostics") is True: self.logger.debug(f"Preparing request to {url}") clean_headers = { head: re.sub("Bearer .*", "Bearer XXXX", val) for head, val in headers.items() # type: ignore } self.logger.debug(f"Headers: {clean_headers}") self.logger.debug(f"Request: {params}") start_time = time.time() if method == "GET": response = session.get(url, headers=headers, params=params, timeout=30) elif method == "POST": response = session.post(url, headers=headers, json=params, timeout=30) elif method == "DELETE": response = session.delete(url, headers=headers, timeout=30) else: raise ValueError("Invalid method: {}".format(method)) if prefect.context.config.cloud.get("diagnostics") is True: end_time = time.time() self.logger.debug(f"Response: {response.json()}") self.logger.debug( f"Request duration: {round(end_time - start_time, 4)} seconds" ) # Check if request returned a successful status response.raise_for_status() return response def _request( self, method: str, path: str, params: Dict[str, JSONLike] = None, server: str = None, headers: dict = None, token: str = None, retry_on_api_error: bool = True, ) -> "requests.models.Response": """ Runs any specified request (GET, POST, DELETE) against the server Args: - method (str): The type of request to be made (GET, POST, DELETE) - path (str): Path of the API URL - params (dict, optional): Parameters used for the request - server (str, optional): The server to make requests against, base API server is used if not specified - headers (dict, optional): Headers to pass with the request - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - requests.models.Response: The response returned from the request Raises: - ClientError: if the client token is not in the context (due to not being logged in) - ValueError: if a method is specified outside of the accepted GET, POST, DELETE - requests.HTTPError: if a status code is returned that is not `200` or `401` """ if server is None: server = self.api_server assert isinstance(server, str) # mypy assert if token is None: token = self.get_auth_token() # 'import requests' is expensive time-wise, we should do this just-in-time to keep # the 'import prefect' time low import requests url = urljoin(server, path.lstrip("/")).rstrip("/") params = params or {} headers = headers or {} if token: headers["Authorization"] = "Bearer {}".format(token) headers["X-PREFECT-CORE-VERSION"] = str(prefect.__version__) if self._attached_headers: headers.update(self._attached_headers) session = requests.Session() retry_total = 6 if prefect.config.backend == "cloud" else 1 retries = requests.packages.urllib3.util.retry.Retry( total=retry_total, backoff_factor=1, status_forcelist=[500, 502, 503, 504], method_whitelist=["DELETE", "GET", "POST"], ) session.mount("https://", requests.adapters.HTTPAdapter(max_retries=retries)) response = self._send_request( session=session, method=method, url=url, params=params, headers=headers ) # parse the response try: json_resp = response.json() except JSONDecodeError as exc: if prefect.config.backend == "cloud" and "Authorization" not in headers: raise ClientError( "Malformed response received from Cloud - please ensure that you " "have an API token properly configured." ) from exc else: raise ClientError("Malformed response received from API.") from exc # check if there was an API_ERROR code in the response if "API_ERROR" in str(json_resp.get("errors")) and retry_on_api_error: success, retry_count = False, 0 # retry up to six times while success is False and retry_count < 6: response = self._send_request( session=session, method=method, url=url, params=params, headers=headers, ) if "API_ERROR" in str(response.json().get("errors")): retry_count += 1 time.sleep(0.25 * (2 ** (retry_count - 1))) else: success = True return response def attach_headers(self, headers: dict) -> None: """ Set headers to be attached to this Client Args: - headers (dict): A dictionary of headers to attach to this client. These headers get added on to the existing dictionary of headers. """ self._attached_headers.update(headers) # ------------------------------------------------------------------------- # Auth # ------------------------------------------------------------------------- @property def _local_settings_path(self) -> Path: """ Returns the local settings directory corresponding to the current API servers """ path = "{home}/client/{server}".format( home=prefect.context.config.home_dir, server=slugify(self.api_server, regex_pattern=r"[^-\.a-z0-9]+"), ) return Path(os.path.expanduser(path)) / "settings.toml" def _save_local_settings(self, settings: dict) -> None: """ Writes settings to local storage """ self._local_settings_path.parent.mkdir(exist_ok=True, parents=True) with self._local_settings_path.open("w+") as f: toml.dump(settings, f) def _load_local_settings(self) -> dict: """ Loads settings from local storage """ if self._local_settings_path.exists(): with self._local_settings_path.open("r") as f: return toml.load(f) # type: ignore return {} def save_api_token(self) -> None: """ Saves the API token in local storage. """ settings = self._load_local_settings() settings["api_token"] = self._api_token self._save_local_settings(settings) def get_auth_token(self) -> str: """ Returns an auth token: - if no explicit access token is stored, returns the api token - if there is an access token: - if there's a refresh token and the access token expires in the next 30 seconds, then we refresh the access token and store the result - return the access token Returns: - str: the access token """ if not self._access_token: return self._api_token expiration = self._access_token_expires_at or pendulum.now() if self._refresh_token and pendulum.now().add(seconds=30) > expiration: self._refresh_access_token() return self._access_token def get_available_tenants(self) -> List[Dict]: """ Returns a list of available tenants. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - List[Dict]: a list of dictionaries containing the id, slug, and name of available tenants """ result = self.graphql( {"query": {"tenant(order_by: {slug: asc})": {"id", "slug", "name"}}}, # use the API token to see all available tenants token=self._api_token, ) # type: ignore return result.data.tenant # type: ignore def login_to_tenant(self, tenant_slug: str = None, tenant_id: str = None) -> bool: """ Log in to a specific tenant NOTE: this should only be called by users who have provided a USER-scoped API token. Args: - tenant_slug (str): the tenant's slug - tenant_id (str): the tenant's id Returns: - bool: True if the login was successful Raises: - ValueError: if at least one of `tenant_slug` or `tenant_id` isn't provided - ValueError: if the `tenant_id` is not a valid UUID - ValueError: if no matching tenants are found """ if tenant_slug is None and tenant_id is None: raise ValueError( "At least one of `tenant_slug` or `tenant_id` must be provided." ) elif tenant_id: try: uuid.UUID(tenant_id) except ValueError as exc: raise ValueError("The `tenant_id` must be a valid UUID.") from exc tenant = self.graphql( { "query($slug: String, $id: uuid)": { "tenant(where: {slug: { _eq: $slug }, id: { _eq: $id } })": {"id"} } }, variables=dict(slug=tenant_slug, id=tenant_id), # use the API token to query the tenant token=self._api_token, ) # type: ignore if not tenant.data.tenant: # type: ignore raise ValueError("No matching tenants found.") tenant_id = tenant.data.tenant[0].id # type: ignore if prefect.config.backend == "cloud": payload = self.graphql( { "mutation($input: switch_tenant_input!)": { "switch_tenant(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(tenant_id=tenant_id)), # Use the API token to switch tenants token=self._api_token, ) # type: ignore self._access_token = payload.data.switch_tenant.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.switch_tenant.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.switch_tenant.refresh_token # type: ignore self._active_tenant_id = tenant_id # save the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = self._active_tenant_id self._save_local_settings(settings) return True def logout_from_tenant(self) -> None: self._access_token = None self._refresh_token = None self._active_tenant_id = None # remove the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = None self._save_local_settings(settings) def _refresh_access_token(self) -> bool: """ Refresh the client's JWT access token. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - bool: True if the refresh succeeds """ payload = self.graphql( { "mutation($input: refresh_token_input!)": { "refresh_token(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(access_token=self._access_token)), # pass the refresh token as the auth header token=self._refresh_token, ) # type: ignore self._access_token = payload.data.refresh_token.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.refresh_token.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.refresh_token.refresh_token # type: ignore return True # ------------------------------------------------------------------------- # Actions # ------------------------------------------------------------------------- def register( self, flow: "Flow", project_name: str = None, build: bool = True, set_schedule_active: bool = True, version_group_id: str = None, compressed: bool = True, no_url: bool = False, ) -> str: """ Push a new flow to Prefect Cloud Args: - flow (Flow): a flow to register - project_name (str, optional): the project that should contain this flow. - build (bool, optional): if `True`, the flow's environment is built prior to serialization; defaults to `True` - set_schedule_active (bool, optional): if `False`, will set the schedule to inactive in the database to prevent auto-scheduling runs (if the Flow has a schedule). Defaults to `True`. This can be changed later. - version_group_id (str, optional): the UUID version group ID to use for versioning this Flow in Cloud; if not provided, the version group ID associated with this Flow's project and name will be used. - compressed (bool, optional): if `True`, the serialized flow will be; defaults to `True` compressed - no_url (bool, optional): if `True`, the stdout from this function will not contain the URL link to the newly-registered flow in the Cloud UI Returns: - str: the ID of the newly-registered flow Raises: - ClientError: if the register failed """ required_parameters = {p for p in flow.parameters() if p.required} if flow.schedule is not None and required_parameters: required_names = {p.name for p in required_parameters} if not all( [ required_names <= set(c.parameter_defaults.keys()) for c in flow.schedule.clocks ] ): raise ClientError( "Flows with required parameters can not be scheduled automatically." ) if any(e.key for e in flow.edges) and flow.result is None: warnings.warn( "No result handler was specified on your Flow. Cloud features such as " "input caching and resuming task runs from failure may not work properly.", stacklevel=2, ) if compressed: create_mutation = { "mutation($input: create_flow_from_compressed_string_input!)": { "create_flow_from_compressed_string(input: $input)": {"id"} } } else: create_mutation = { "mutation($input: create_flow_input!)": { "create_flow(input: $input)": {"id"} } } project = None if project_name is None: raise TypeError( "'project_name' is a required field when registering a flow." ) query_project = { "query": { with_args("project", {"where": {"name": {"_eq": project_name}}}): { "id": True } } } project = self.graphql(query_project).data.project # type: ignore if not project: raise ValueError( "Project {} not found. Run `prefect create project '{}'` to create it.".format( project_name, project_name ) ) serialized_flow = flow.serialize(build=build) # type: Any # Set Docker storage image in environment metadata if provided if isinstance(flow.storage, prefect.environments.storage.Docker): flow.environment.metadata["image"] = flow.storage.name serialized_flow = flow.serialize(build=False) # If no image ever set, default metadata to all_extras image on current version if not flow.environment.metadata.get("image"): version = prefect.__version__.split("+")[0] flow.environment.metadata[ "image" ] = f"prefecthq/prefect:all_extras-{version}" serialized_flow = flow.serialize(build=False) # verify that the serialized flow can be deserialized try: prefect.serialization.flow.FlowSchema().load(serialized_flow) except Exception as exc: raise ValueError( "Flow could not be deserialized successfully. Error was: {}".format( repr(exc) ) ) from exc if compressed: serialized_flow = compress(serialized_flow) res = self.graphql( create_mutation, variables=dict( input=dict( project_id=(project[0].id if project else None), serialized_flow=serialized_flow, set_schedule_active=set_schedule_active, version_group_id=version_group_id, ) ), retry_on_api_error=False, ) # type: Any flow_id = ( res.data.create_flow_from_compressed_string.id if compressed else res.data.create_flow.id ) if not no_url: # Generate direct link to Cloud flow flow_url = self.get_cloud_url("flow", flow_id) prefix = "└── " print("Flow URL: {}".format(flow_url)) # Extra information to improve visibility msg = ( f" {prefix}ID: {flow_id}\n" f" {prefix}Project: {project_name}\n" f" {prefix}Labels: {list(flow.environment.labels)}" ) print(msg) return flow_id def get_cloud_url(self, subdirectory: str, id: str, as_user: bool = True) -> str: """ Convenience method for creating Prefect Cloud URLs for a given subdirectory. Args: - subdirectory (str): the subdirectory to use (e.g., `"flow-run"`) - id (str): the ID of the page - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the URL corresponding to the appropriate base URL, tenant slug, subdirectory and ID Example: ```python from prefect import Client client = Client() client.get_cloud_url("flow-run", "424242-ca-94611-111-55") # returns "https://cloud.prefect.io/my-tenant-slug/flow-run/424242-ca-94611-111-55" ``` """ # Generate direct link to UI if prefect.config.backend == "cloud": tenant_slug = self.get_default_tenant_slug(as_user=as_user) else: tenant_slug = "" base_url = ( re.sub("api-", "", prefect.config.cloud.api) if re.search("api-", prefect.config.cloud.api) else re.sub("api", "cloud", prefect.config.cloud.api) ) full_url = prefect.config.cloud.api if tenant_slug: full_url = "/".join([base_url.rstrip("/"), tenant_slug, subdirectory, id]) elif prefect.config.backend == "server": full_url = "/".join([prefect.config.server.ui.endpoint, subdirectory, id]) return full_url def get_default_tenant_slug(self, as_user: bool = True) -> str: """ Get the default tenant slug for the currently authenticated user Args: - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the slug of the current default tenant for this user """ if as_user: query = { "query": {"user": {"default_membership": {"tenant": "slug"}}} } # type: dict else: query = {"query": {"tenant": {"slug"}}} res = self.graphql(query) if as_user: user = res.get("data").user[0] slug = user.default_membership.tenant.slug else: slug = res.get("data").tenant[0].slug return slug def create_project(self, project_name: str, project_description: str = None) -> str: """ Create a new Project Args: - project_name (str): the project that should contain this flow - project_description (str, optional): the project description Returns: - str: the ID of the newly-created project Raises: - ClientError: if the project creation failed """ project_mutation = { "mutation($input: create_project_input!)": { "create_project(input: $input)": {"id"} } } res = self.graphql( project_mutation, variables=dict( input=dict( name=project_name, description=project_description, tenant_id=self._active_tenant_id, ) ), ) # type: Any return res.data.create_project.id def create_flow_run( self, flow_id: str = None, context: dict = None, parameters: dict = None, scheduled_start_time: datetime.datetime = None, idempotency_key: str = None, run_name: str = None, version_group_id: str = None, ) -> str: """ Create a new flow run for the given flow id. If `start_time` is not provided, the flow run will be scheduled to start immediately. If both `flow_id` and `version_group_id` are provided, only the `flow_id` will be used. Args: - flow_id (str, optional): the id of the Flow you wish to schedule - context (dict, optional): the run context - parameters (dict, optional): a dictionary of parameter values to pass to the flow run - scheduled_start_time (datetime, optional): the time to schedule the execution for; if not provided, defaults to now - idempotency_key (str, optional): an idempotency key; if provided, this run will be cached for 24 hours. Any subsequent attempts to create a run with the same idempotency key will return the ID of the originally created run (no new run will be created after the first). An error will be raised if parameters or context are provided and don't match the original. Each subsequent request will reset the TTL for 24 hours. - run_name (str, optional): The name assigned to this flow run - version_group_id (str, optional): if provided, the unique unarchived flow within this version group will be scheduled to run. This input can be used as a stable API for running flows which are regularly updated. Returns: - str: the ID of the newly-created flow run Raises: - ClientError: if the GraphQL query is bad for any reason """ create_mutation = { "mutation($input: create_flow_run_input!)": { "create_flow_run(input: $input)": {"id": True} } } if not flow_id and not version_group_id: raise ValueError("One of flow_id or version_group_id must be provided") if flow_id: inputs = dict(flow_id=flow_id) else: inputs = dict(version_group_id=version_group_id) # type: ignore if parameters is not None: inputs.update(parameters=parameters) # type: ignore if context is not None: inputs.update(context=context) # type: ignore if idempotency_key is not None: inputs.update(idempotency_key=idempotency_key) # type: ignore if scheduled_start_time is not None: inputs.update( scheduled_start_time=scheduled_start_time.isoformat() ) # type: ignore if run_name is not None: inputs.update(flow_run_name=run_name) # type: ignore res = self.graphql(create_mutation, variables=dict(input=inputs)) return res.data.create_flow_run.id # type: ignore def get_flow_run_info(self, flow_run_id: str) -> FlowRunInfoResult: """ Retrieves version and current state information for the given flow run. Args: - flow_run_id (str): the id of the flow run to get information for Returns: - GraphQLResult: an object representing information about the flow run Raises: - ClientError: if the GraphQL mutation is bad for any reason """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "id": True, "name": True, "flow_id": True, "parameters": True, "context": True, "version": True, "scheduled_start_time": True, "serialized_state": True, # load all task runs except dynamic task runs with_args("task_runs", {"where": {"map_index": {"_eq": -1}}}): { "id": True, "task": {"id": True, "slug": True}, "version": True, "serialized_state": True, }, } } } result = self.graphql(query).data.flow_run_by_pk # type: ignore if result is None: raise ClientError('Flow run ID not found: "{}"'.format(flow_run_id)) # convert scheduled_start_time from string to datetime result.scheduled_start_time = pendulum.parse(result.scheduled_start_time) # create "state" attribute from serialized_state result.state = prefect.engine.state.State.deserialize( result.pop("serialized_state") ) # reformat task_runs task_runs = [] for tr in result.task_runs: tr.state = prefect.engine.state.State.deserialize( tr.pop("serialized_state") ) task_info = tr.pop("task") tr.task_id = task_info["id"] tr.task_slug = task_info["slug"] task_runs.append(TaskRunInfoResult(**tr)) result.task_runs = task_runs result.context = ( result.context.to_dict() if result.context is not None else None ) result.parameters = ( result.parameters.to_dict() if result.parameters is not None else None ) return FlowRunInfoResult(**result) def update_flow_run_heartbeat(self, flow_run_id: str) -> None: """ Convenience method for heartbeating a flow run. Does NOT raise an error if the update fails. Args: - flow_run_id (str): the flow run ID to heartbeat """ mutation = { "mutation": { with_args( "update_flow_run_heartbeat", {"input": {"flow_run_id": flow_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def update_task_run_heartbeat(self, task_run_id: str) -> None: """ Convenience method for heartbeating a task run. Does NOT raise an error if the update fails. Args: - task_run_id (str): the task run ID to heartbeat """ mutation = { "mutation": { with_args( "update_task_run_heartbeat", {"input": {"task_run_id": task_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def get_flow_run_state(self, flow_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a flow run. Args: - flow_run_id (str): the id for this flow run Returns: - State: a Prefect State object """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "serialized_state": True, } } } flow_run = self.graphql(query).data.flow_run_by_pk return prefect.engine.state.State.deserialize(flow_run.serialized_state) def set_flow_run_state( self, flow_run_id: str, state: "prefect.engine.state.State", version: int = None, ) -> "prefect.engine.state.State": """ Sets new state for a flow run in the database. Args: - flow_run_id (str): the id of the flow run to set state for - state (State): the new state for this flow run - version (int, optional): the current version of the flow run state. This is optional but it can be supplied to enforce version-locking. Returns: - State: the state the current flow run should be considered in Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation($input: set_flow_run_states_input!)": { "set_flow_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, flow_run_id=flow_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_flow_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def get_latest_cached_states( self, task_id: str, cache_key: Optional[str], created_after: datetime.datetime ) -> List["prefect.engine.state.State"]: """ Pulls all Cached states for the given task that were created after the provided date. Args: - task_id (str): the task id for this task run - cache_key (Optional[str]): the cache key for this Task's cache; if `None`, the task id alone will be used - created_after (datetime.datetime): the earliest date the state should have been created at Returns: - List[State]: a list of Cached states created after the given date """ args = { "where": { "state": {"_eq": "Cached"}, "state_timestamp": {"_gte": created_after.isoformat()}, }, "order_by": {"state_timestamp": EnumValue("desc")}, "limit": 100, } # type: Dict[str, Any] # if a cache key was provided, match it against all tasks if cache_key is not None: args["where"].update({"cache_key": {"_eq": cache_key}}) # otherwise match against only this task, across all cache keys else: args["where"].update({"task_id": {"_eq": task_id}}) query = {"query": {with_args("task_run", args): "serialized_state"}} result = self.graphql(query) # type: Any deserializer = prefect.engine.state.State.deserialize valid_states = [ deserializer(res.serialized_state) for res in result.data.task_run ] return valid_states def get_task_run_info( self, flow_run_id: str, task_id: str, map_index: Optional[int] = None ) -> TaskRunInfoResult: """ Retrieves version and current state information for the given task run. Args: - flow_run_id (str): the id of the flow run that this task run lives in - task_id (str): the task id for this task run - map_index (int, optional): the mapping index for this task run; if `None`, it is assumed this task is _not_ mapped Returns: - NamedTuple: a tuple containing `id, task_id, version, state` Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation": { with_args( "get_or_create_task_run", { "input": { "flow_run_id": flow_run_id, "task_id": task_id, "map_index": -1 if map_index is None else map_index, } }, ): { "id": True, } } } result = self.graphql(mutation) # type: Any if result is None: raise ClientError("Failed to create task run.") task_run_id = result.data.get_or_create_task_run.id query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "version": True, "serialized_state": True, "task": {"slug": True}, } } } task_run = self.graphql(query).data.task_run_by_pk # type: ignore if task_run is None: raise ClientError('Task run ID not found: "{}"'.format(task_run_id)) state = prefect.engine.state.State.deserialize(task_run.serialized_state) return TaskRunInfoResult( id=task_run_id, task_id=task_id, task_slug=task_run.task.slug, version=task_run.version, state=state, ) def set_task_run_name(self, task_run_id: str, name: str) -> bool: """ Set the name of a task run Args: - task_run_id (str): the id of a task run - name (str): a name for this task run Returns: - bool: whether or not the task run name was updated """ mutation = { "mutation($input: set_task_run_name_input!)": { "set_task_run_name(input: $input)": { "success": True, } } } result = self.graphql( mutation, variables=dict(input=dict(task_run_id=task_run_id, name=name)) ) return result.data.set_task_run_name.success def get_task_run_state(self, task_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a task run. Args: - task_run_id (str): the id for this task run Returns: - State: a Prefect State object """ query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "serialized_state": True, } } } task_run = self.graphql(query).data.task_run_by_pk return prefect.engine.state.State.deserialize(task_run.serialized_state) def set_task_run_state( self, task_run_id: str, state: "prefect.engine.state.State", version: int = None, cache_for: datetime.timedelta = None, ) -> "prefect.engine.state.State": """ Sets new state for a task run. Args: - task_run_id (str): the id of the task run to set state for - state (State): the new state for this task run - version (int, optional): the current version of the task run state. This is optional but it can be supplied to enforce version-locking. - cache_for (timedelta, optional): how long to store the result of this task for, using the serializer set in config; if not provided, no caching occurs Raises: - ClientError: if the GraphQL mutation is bad for any reason Returns: - State: the state the current task run should be considered in """ mutation = { "mutation($input: set_task_run_states_input!)": { "set_task_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, task_run_id=task_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_task_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def set_secret(self, name: str, value: Any) -> None: """ Set a secret with the given name and value. Args: - name (str): the name of the secret; used for retrieving the secret during task runs - value (Any): the value of the secret Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the secret-setting was unsuccessful """ mutation = { "mutation($input: set_secret_input!)": { "set_secret(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(name=name, value=value)) ) # type: Any if not result.data.set_secret.success: raise ValueError("Setting secret failed.") def get_task_tag_limit(self, tag: str) -> Optional[int]: """ Retrieve the current task tag concurrency limit for a given tag. Args: - tag (str): the tag to update Raises: - ClientError: if the GraphQL query fails """ query = { "query": { with_args("task_tag_limit", {"where": {"tag": {"_eq": tag}}}): { "limit": True } } } result = self.graphql(query) # type: Any if result.data.task_tag_limit: return result.data.task_tag_limit[0].limit else: return None def update_task_tag_limit(self, tag: str, limit: int) -> None: """ Update the task tag concurrency limit for a given tag; requires tenant admin permissions. Args: - tag (str): the tag to update - limit (int): the concurrency limit to enforce on the tag; should be a value >= 0 Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag limit-setting was unsuccessful, or if a bad limit was provided """ if limit < 0: raise ValueError("Concurrency limits must be >= 0") mutation = { "mutation($input: update_task_tag_limit_input!)": { "update_task_tag_limit(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict(input=dict(tag=tag, limit=limit)) ) # type: Any if not result.data.update_task_tag_limit.id: raise ValueError("Updating the task tag concurrency limit failed.") def delete_task_tag_limit(self, limit_id: str) -> None: """ Deletes a given task tag concurrency limit; requires tenant admin permissions. Args: - limit_id (str): the ID of the tag to delete Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag deletion was unsuccessful, or if a bad tag ID was provided """ mutation = { "mutation($input: delete_task_tag_limit_input!)": { "delete_task_tag_limit(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(limit_id=limit_id)) ) # type: Any if not result.data.delete_task_tag_limit.success: raise ValueError("Deleting the task tag concurrency limit failed.") def write_run_logs(self, logs: List[Dict]) -> None: """ Uploads a collection of logs to Cloud. Args: - logs (List[Dict]): a list of log entries to add Raises: - ValueError: if uploading the logs fail """ mutation = { "mutation($input: write_run_logs_input!)": { "write_run_logs(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(logs=logs)) ) # type: Any if not result.data.write_run_logs.success: raise ValueError("Writing logs failed.") def register_agent( self, agent_type: str, name: str = None, labels: List[str] = None, agent_config_id: str = None, ) -> str: """ Register an agent with a backend API Args: - agent_type (str): The type of agent being registered - name: (str, optional): The name of the agent being registered - labels (List[str], optional): A list of any present labels on the agent being registered - agent_config_id (str, optional): The ID of an agent configuration to register with Returns: - The agent ID as a string """ mutation = { "mutation($input: register_agent_input!)": { "register_agent(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict( input=dict( type=agent_type, name=name, labels=labels or [], tenant_id=self._active_tenant_id, agent_config_id=agent_config_id, ) ), ) if not result.data.register_agent.id: raise ValueError("Error registering agent") return result.data.register_agent.id def get_agent_config(self, agent_config_id: str) -> dict: """ Get agent config settings Args: - agent_config_id (str): The ID of an agent configuration to retrieve Returns: - dict: the agent configuration's `settings` """ query = { "query": { with_args( "agent_config", {"where": {"id": {"_eq": agent_config_id}}} ): {"settings": True} } } result = self.graphql(query) # type: Any return result.data.agent_config[0].settings
_refresh_access_token
Refresh the client's JWT access token. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - bool: True if the refresh succeeds
import datetime import json import os import re import time import uuid import warnings from pathlib import Path from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Union from urllib.parse import urljoin # if simplejson is installed, `requests` defaults to using it instead of json # this allows the client to gracefully handle either json or simplejson try: from simplejson.errors import JSONDecodeError except ImportError: from json.decoder import JSONDecodeError import pendulum import toml from slugify import slugify import prefect from prefect.utilities.exceptions import ( AuthorizationError, ClientError, VersionLockError, ) from prefect.utilities.graphql import ( EnumValue, GraphQLResult, compress, parse_graphql, with_args, ) from prefect.utilities.logging import create_diagnostic_logger if TYPE_CHECKING: from prefect.core import Flow import requests JSONLike = Union[bool, dict, list, str, int, float, None] # type definitions for GraphQL results TaskRunInfoResult = NamedTuple( "TaskRunInfoResult", [ ("id", str), ("task_id", str), ("task_slug", str), ("version", int), ("state", "prefect.engine.state.State"), ], ) FlowRunInfoResult = NamedTuple( "FlowRunInfoResult", [ ("id", str), ("name", str), ("flow_id", str), ("parameters", Dict[str, Any]), ("context", Dict[str, Any]), ("version", int), ("scheduled_start_time", datetime.datetime), ("state", "prefect.engine.state.State"), ("task_runs", List[TaskRunInfoResult]), ], ) class Client: """ Client for communication with Prefect Cloud If the arguments aren't specified the client initialization first checks the prefect configuration and if the server is not set there it checks the current context. The token will only be present in the current context. Args: - api_server (str, optional): the URL to send all GraphQL requests to; if not provided, will be pulled from `cloud.graphql` config var - api_token (str, optional): a Prefect Cloud API token, taken from `config.cloud.auth_token` if not provided. If this token is USER-scoped, it may be used to log in to any tenant that the user is a member of. In that case, ephemeral JWTs will be loaded as necessary. Otherwise, the API token itself will be used as authorization. """ def __init__(self, api_server: str = None, api_token: str = None): self._access_token = None self._refresh_token = None self._access_token_expires_at = pendulum.now() self._active_tenant_id = None self._attached_headers = {} # type: Dict[str, str] self.logger = create_diagnostic_logger("Diagnostics") # store api server self.api_server = api_server or prefect.context.config.cloud.get("graphql") # store api token self._api_token = api_token or prefect.context.config.cloud.get( "auth_token", None ) if prefect.config.backend == "cloud": if not self._api_token: # if no api token was passed, attempt to load state from local storage settings = self._load_local_settings() self._api_token = settings.get("api_token") if self._api_token: self._active_tenant_id = settings.get("active_tenant_id") if self._active_tenant_id: try: self.login_to_tenant(tenant_id=self._active_tenant_id) except AuthorizationError: # if an authorization error is raised, then the token is invalid and should # be cleared self.logout_from_tenant() else: # TODO: Separate put this functionality and clean up initial tenant access handling if not self._active_tenant_id: tenant_info = self.graphql({"query": {"tenant": {"id"}}}) if tenant_info.data.tenant: self._active_tenant_id = tenant_info.data.tenant[0].id def create_tenant(self, name: str, slug: str = None) -> str: """ Creates a new tenant. Note this route only works when run against Prefect Server. Args: - name (str): the name of the tenant to create - slug (str, optional): the slug of the tenant to create; defaults to name Returns: - str: the ID of the newly created tenant, or the ID of the currently active tenant Raises: - ValueError: if run against Prefect Cloud """ if prefect.config.backend != "server": msg = "To create a tenant with Prefect Cloud, please signup at https://cloud.prefect.io/" raise ValueError(msg) if slug is None: slug = slugify(name) tenant_info = self.graphql( { "mutation($input: create_tenant_input!)": { "create_tenant(input: $input)": {"id"} } }, variables=dict(input=dict(name=name, slug=slug)), ) return tenant_info.data.create_tenant.id # ------------------------------------------------------------------------- # Utilities def get( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and GET request Args: - path (str): the path of the API url. For example, to GET http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the GET request to; defaults to `self.api_server` - headers (dict, optional): Headers to pass with the request - params (dict): GET parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="GET", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def post( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and POST request Args: - path (str): the path of the API url. For example, to POST http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the POST request to; defaults to `self.api_server` - headers(dict): headers to pass with the request - params (dict): POST parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="POST", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def graphql( self, query: Any, raise_on_error: bool = True, headers: Dict[str, str] = None, variables: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> GraphQLResult: """ Convenience function for running queries against the Prefect GraphQL API Args: - query (Any): A representation of a graphql query to be executed. It will be parsed by prefect.utilities.graphql.parse_graphql(). - raise_on_error (bool): if True, a `ClientError` will be raised if the GraphQL returns any `errors`. - headers (dict): any additional headers that should be passed as part of the request - variables (dict): Variables to be filled into a query with the key being equivalent to the variables that are accepted by the query - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Data returned from the GraphQL query Raises: - ClientError if there are errors raised by the GraphQL mutation """ result = self.post( path="", server=self.api_server, headers=headers, params=dict(query=parse_graphql(query), variables=json.dumps(variables)), token=token, retry_on_api_error=retry_on_api_error, ) if raise_on_error and "errors" in result: if "UNAUTHENTICATED" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif "Malformed Authorization header" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif ( result["errors"][0].get("extensions", {}).get("code") == "VERSION_LOCKING_ERROR" ): raise VersionLockError(result["errors"]) raise ClientError(result["errors"]) else: return GraphQLResult(result) # type: ignore def _send_request( self, session: "requests.Session", method: str, url: str, params: Dict[str, JSONLike] = None, headers: dict = None, ) -> "requests.models.Response": if prefect.context.config.cloud.get("diagnostics") is True: self.logger.debug(f"Preparing request to {url}") clean_headers = { head: re.sub("Bearer .*", "Bearer XXXX", val) for head, val in headers.items() # type: ignore } self.logger.debug(f"Headers: {clean_headers}") self.logger.debug(f"Request: {params}") start_time = time.time() if method == "GET": response = session.get(url, headers=headers, params=params, timeout=30) elif method == "POST": response = session.post(url, headers=headers, json=params, timeout=30) elif method == "DELETE": response = session.delete(url, headers=headers, timeout=30) else: raise ValueError("Invalid method: {}".format(method)) if prefect.context.config.cloud.get("diagnostics") is True: end_time = time.time() self.logger.debug(f"Response: {response.json()}") self.logger.debug( f"Request duration: {round(end_time - start_time, 4)} seconds" ) # Check if request returned a successful status response.raise_for_status() return response def _request( self, method: str, path: str, params: Dict[str, JSONLike] = None, server: str = None, headers: dict = None, token: str = None, retry_on_api_error: bool = True, ) -> "requests.models.Response": """ Runs any specified request (GET, POST, DELETE) against the server Args: - method (str): The type of request to be made (GET, POST, DELETE) - path (str): Path of the API URL - params (dict, optional): Parameters used for the request - server (str, optional): The server to make requests against, base API server is used if not specified - headers (dict, optional): Headers to pass with the request - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - requests.models.Response: The response returned from the request Raises: - ClientError: if the client token is not in the context (due to not being logged in) - ValueError: if a method is specified outside of the accepted GET, POST, DELETE - requests.HTTPError: if a status code is returned that is not `200` or `401` """ if server is None: server = self.api_server assert isinstance(server, str) # mypy assert if token is None: token = self.get_auth_token() # 'import requests' is expensive time-wise, we should do this just-in-time to keep # the 'import prefect' time low import requests url = urljoin(server, path.lstrip("/")).rstrip("/") params = params or {} headers = headers or {} if token: headers["Authorization"] = "Bearer {}".format(token) headers["X-PREFECT-CORE-VERSION"] = str(prefect.__version__) if self._attached_headers: headers.update(self._attached_headers) session = requests.Session() retry_total = 6 if prefect.config.backend == "cloud" else 1 retries = requests.packages.urllib3.util.retry.Retry( total=retry_total, backoff_factor=1, status_forcelist=[500, 502, 503, 504], method_whitelist=["DELETE", "GET", "POST"], ) session.mount("https://", requests.adapters.HTTPAdapter(max_retries=retries)) response = self._send_request( session=session, method=method, url=url, params=params, headers=headers ) # parse the response try: json_resp = response.json() except JSONDecodeError as exc: if prefect.config.backend == "cloud" and "Authorization" not in headers: raise ClientError( "Malformed response received from Cloud - please ensure that you " "have an API token properly configured." ) from exc else: raise ClientError("Malformed response received from API.") from exc # check if there was an API_ERROR code in the response if "API_ERROR" in str(json_resp.get("errors")) and retry_on_api_error: success, retry_count = False, 0 # retry up to six times while success is False and retry_count < 6: response = self._send_request( session=session, method=method, url=url, params=params, headers=headers, ) if "API_ERROR" in str(response.json().get("errors")): retry_count += 1 time.sleep(0.25 * (2 ** (retry_count - 1))) else: success = True return response def attach_headers(self, headers: dict) -> None: """ Set headers to be attached to this Client Args: - headers (dict): A dictionary of headers to attach to this client. These headers get added on to the existing dictionary of headers. """ self._attached_headers.update(headers) # ------------------------------------------------------------------------- # Auth # ------------------------------------------------------------------------- @property def _local_settings_path(self) -> Path: """ Returns the local settings directory corresponding to the current API servers """ path = "{home}/client/{server}".format( home=prefect.context.config.home_dir, server=slugify(self.api_server, regex_pattern=r"[^-\.a-z0-9]+"), ) return Path(os.path.expanduser(path)) / "settings.toml" def _save_local_settings(self, settings: dict) -> None: """ Writes settings to local storage """ self._local_settings_path.parent.mkdir(exist_ok=True, parents=True) with self._local_settings_path.open("w+") as f: toml.dump(settings, f) def _load_local_settings(self) -> dict: """ Loads settings from local storage """ if self._local_settings_path.exists(): with self._local_settings_path.open("r") as f: return toml.load(f) # type: ignore return {} def save_api_token(self) -> None: """ Saves the API token in local storage. """ settings = self._load_local_settings() settings["api_token"] = self._api_token self._save_local_settings(settings) def get_auth_token(self) -> str: """ Returns an auth token: - if no explicit access token is stored, returns the api token - if there is an access token: - if there's a refresh token and the access token expires in the next 30 seconds, then we refresh the access token and store the result - return the access token Returns: - str: the access token """ if not self._access_token: return self._api_token expiration = self._access_token_expires_at or pendulum.now() if self._refresh_token and pendulum.now().add(seconds=30) > expiration: self._refresh_access_token() return self._access_token def get_available_tenants(self) -> List[Dict]: """ Returns a list of available tenants. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - List[Dict]: a list of dictionaries containing the id, slug, and name of available tenants """ result = self.graphql( {"query": {"tenant(order_by: {slug: asc})": {"id", "slug", "name"}}}, # use the API token to see all available tenants token=self._api_token, ) # type: ignore return result.data.tenant # type: ignore def login_to_tenant(self, tenant_slug: str = None, tenant_id: str = None) -> bool: """ Log in to a specific tenant NOTE: this should only be called by users who have provided a USER-scoped API token. Args: - tenant_slug (str): the tenant's slug - tenant_id (str): the tenant's id Returns: - bool: True if the login was successful Raises: - ValueError: if at least one of `tenant_slug` or `tenant_id` isn't provided - ValueError: if the `tenant_id` is not a valid UUID - ValueError: if no matching tenants are found """ if tenant_slug is None and tenant_id is None: raise ValueError( "At least one of `tenant_slug` or `tenant_id` must be provided." ) elif tenant_id: try: uuid.UUID(tenant_id) except ValueError as exc: raise ValueError("The `tenant_id` must be a valid UUID.") from exc tenant = self.graphql( { "query($slug: String, $id: uuid)": { "tenant(where: {slug: { _eq: $slug }, id: { _eq: $id } })": {"id"} } }, variables=dict(slug=tenant_slug, id=tenant_id), # use the API token to query the tenant token=self._api_token, ) # type: ignore if not tenant.data.tenant: # type: ignore raise ValueError("No matching tenants found.") tenant_id = tenant.data.tenant[0].id # type: ignore if prefect.config.backend == "cloud": payload = self.graphql( { "mutation($input: switch_tenant_input!)": { "switch_tenant(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(tenant_id=tenant_id)), # Use the API token to switch tenants token=self._api_token, ) # type: ignore self._access_token = payload.data.switch_tenant.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.switch_tenant.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.switch_tenant.refresh_token # type: ignore self._active_tenant_id = tenant_id # save the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = self._active_tenant_id self._save_local_settings(settings) return True def logout_from_tenant(self) -> None: self._access_token = None self._refresh_token = None self._active_tenant_id = None # remove the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = None self._save_local_settings(settings) # MASKED: _refresh_access_token function (lines 609-638) # ------------------------------------------------------------------------- # Actions # ------------------------------------------------------------------------- def register( self, flow: "Flow", project_name: str = None, build: bool = True, set_schedule_active: bool = True, version_group_id: str = None, compressed: bool = True, no_url: bool = False, ) -> str: """ Push a new flow to Prefect Cloud Args: - flow (Flow): a flow to register - project_name (str, optional): the project that should contain this flow. - build (bool, optional): if `True`, the flow's environment is built prior to serialization; defaults to `True` - set_schedule_active (bool, optional): if `False`, will set the schedule to inactive in the database to prevent auto-scheduling runs (if the Flow has a schedule). Defaults to `True`. This can be changed later. - version_group_id (str, optional): the UUID version group ID to use for versioning this Flow in Cloud; if not provided, the version group ID associated with this Flow's project and name will be used. - compressed (bool, optional): if `True`, the serialized flow will be; defaults to `True` compressed - no_url (bool, optional): if `True`, the stdout from this function will not contain the URL link to the newly-registered flow in the Cloud UI Returns: - str: the ID of the newly-registered flow Raises: - ClientError: if the register failed """ required_parameters = {p for p in flow.parameters() if p.required} if flow.schedule is not None and required_parameters: required_names = {p.name for p in required_parameters} if not all( [ required_names <= set(c.parameter_defaults.keys()) for c in flow.schedule.clocks ] ): raise ClientError( "Flows with required parameters can not be scheduled automatically." ) if any(e.key for e in flow.edges) and flow.result is None: warnings.warn( "No result handler was specified on your Flow. Cloud features such as " "input caching and resuming task runs from failure may not work properly.", stacklevel=2, ) if compressed: create_mutation = { "mutation($input: create_flow_from_compressed_string_input!)": { "create_flow_from_compressed_string(input: $input)": {"id"} } } else: create_mutation = { "mutation($input: create_flow_input!)": { "create_flow(input: $input)": {"id"} } } project = None if project_name is None: raise TypeError( "'project_name' is a required field when registering a flow." ) query_project = { "query": { with_args("project", {"where": {"name": {"_eq": project_name}}}): { "id": True } } } project = self.graphql(query_project).data.project # type: ignore if not project: raise ValueError( "Project {} not found. Run `prefect create project '{}'` to create it.".format( project_name, project_name ) ) serialized_flow = flow.serialize(build=build) # type: Any # Set Docker storage image in environment metadata if provided if isinstance(flow.storage, prefect.environments.storage.Docker): flow.environment.metadata["image"] = flow.storage.name serialized_flow = flow.serialize(build=False) # If no image ever set, default metadata to all_extras image on current version if not flow.environment.metadata.get("image"): version = prefect.__version__.split("+")[0] flow.environment.metadata[ "image" ] = f"prefecthq/prefect:all_extras-{version}" serialized_flow = flow.serialize(build=False) # verify that the serialized flow can be deserialized try: prefect.serialization.flow.FlowSchema().load(serialized_flow) except Exception as exc: raise ValueError( "Flow could not be deserialized successfully. Error was: {}".format( repr(exc) ) ) from exc if compressed: serialized_flow = compress(serialized_flow) res = self.graphql( create_mutation, variables=dict( input=dict( project_id=(project[0].id if project else None), serialized_flow=serialized_flow, set_schedule_active=set_schedule_active, version_group_id=version_group_id, ) ), retry_on_api_error=False, ) # type: Any flow_id = ( res.data.create_flow_from_compressed_string.id if compressed else res.data.create_flow.id ) if not no_url: # Generate direct link to Cloud flow flow_url = self.get_cloud_url("flow", flow_id) prefix = "└── " print("Flow URL: {}".format(flow_url)) # Extra information to improve visibility msg = ( f" {prefix}ID: {flow_id}\n" f" {prefix}Project: {project_name}\n" f" {prefix}Labels: {list(flow.environment.labels)}" ) print(msg) return flow_id def get_cloud_url(self, subdirectory: str, id: str, as_user: bool = True) -> str: """ Convenience method for creating Prefect Cloud URLs for a given subdirectory. Args: - subdirectory (str): the subdirectory to use (e.g., `"flow-run"`) - id (str): the ID of the page - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the URL corresponding to the appropriate base URL, tenant slug, subdirectory and ID Example: ```python from prefect import Client client = Client() client.get_cloud_url("flow-run", "424242-ca-94611-111-55") # returns "https://cloud.prefect.io/my-tenant-slug/flow-run/424242-ca-94611-111-55" ``` """ # Generate direct link to UI if prefect.config.backend == "cloud": tenant_slug = self.get_default_tenant_slug(as_user=as_user) else: tenant_slug = "" base_url = ( re.sub("api-", "", prefect.config.cloud.api) if re.search("api-", prefect.config.cloud.api) else re.sub("api", "cloud", prefect.config.cloud.api) ) full_url = prefect.config.cloud.api if tenant_slug: full_url = "/".join([base_url.rstrip("/"), tenant_slug, subdirectory, id]) elif prefect.config.backend == "server": full_url = "/".join([prefect.config.server.ui.endpoint, subdirectory, id]) return full_url def get_default_tenant_slug(self, as_user: bool = True) -> str: """ Get the default tenant slug for the currently authenticated user Args: - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the slug of the current default tenant for this user """ if as_user: query = { "query": {"user": {"default_membership": {"tenant": "slug"}}} } # type: dict else: query = {"query": {"tenant": {"slug"}}} res = self.graphql(query) if as_user: user = res.get("data").user[0] slug = user.default_membership.tenant.slug else: slug = res.get("data").tenant[0].slug return slug def create_project(self, project_name: str, project_description: str = None) -> str: """ Create a new Project Args: - project_name (str): the project that should contain this flow - project_description (str, optional): the project description Returns: - str: the ID of the newly-created project Raises: - ClientError: if the project creation failed """ project_mutation = { "mutation($input: create_project_input!)": { "create_project(input: $input)": {"id"} } } res = self.graphql( project_mutation, variables=dict( input=dict( name=project_name, description=project_description, tenant_id=self._active_tenant_id, ) ), ) # type: Any return res.data.create_project.id def create_flow_run( self, flow_id: str = None, context: dict = None, parameters: dict = None, scheduled_start_time: datetime.datetime = None, idempotency_key: str = None, run_name: str = None, version_group_id: str = None, ) -> str: """ Create a new flow run for the given flow id. If `start_time` is not provided, the flow run will be scheduled to start immediately. If both `flow_id` and `version_group_id` are provided, only the `flow_id` will be used. Args: - flow_id (str, optional): the id of the Flow you wish to schedule - context (dict, optional): the run context - parameters (dict, optional): a dictionary of parameter values to pass to the flow run - scheduled_start_time (datetime, optional): the time to schedule the execution for; if not provided, defaults to now - idempotency_key (str, optional): an idempotency key; if provided, this run will be cached for 24 hours. Any subsequent attempts to create a run with the same idempotency key will return the ID of the originally created run (no new run will be created after the first). An error will be raised if parameters or context are provided and don't match the original. Each subsequent request will reset the TTL for 24 hours. - run_name (str, optional): The name assigned to this flow run - version_group_id (str, optional): if provided, the unique unarchived flow within this version group will be scheduled to run. This input can be used as a stable API for running flows which are regularly updated. Returns: - str: the ID of the newly-created flow run Raises: - ClientError: if the GraphQL query is bad for any reason """ create_mutation = { "mutation($input: create_flow_run_input!)": { "create_flow_run(input: $input)": {"id": True} } } if not flow_id and not version_group_id: raise ValueError("One of flow_id or version_group_id must be provided") if flow_id: inputs = dict(flow_id=flow_id) else: inputs = dict(version_group_id=version_group_id) # type: ignore if parameters is not None: inputs.update(parameters=parameters) # type: ignore if context is not None: inputs.update(context=context) # type: ignore if idempotency_key is not None: inputs.update(idempotency_key=idempotency_key) # type: ignore if scheduled_start_time is not None: inputs.update( scheduled_start_time=scheduled_start_time.isoformat() ) # type: ignore if run_name is not None: inputs.update(flow_run_name=run_name) # type: ignore res = self.graphql(create_mutation, variables=dict(input=inputs)) return res.data.create_flow_run.id # type: ignore def get_flow_run_info(self, flow_run_id: str) -> FlowRunInfoResult: """ Retrieves version and current state information for the given flow run. Args: - flow_run_id (str): the id of the flow run to get information for Returns: - GraphQLResult: an object representing information about the flow run Raises: - ClientError: if the GraphQL mutation is bad for any reason """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "id": True, "name": True, "flow_id": True, "parameters": True, "context": True, "version": True, "scheduled_start_time": True, "serialized_state": True, # load all task runs except dynamic task runs with_args("task_runs", {"where": {"map_index": {"_eq": -1}}}): { "id": True, "task": {"id": True, "slug": True}, "version": True, "serialized_state": True, }, } } } result = self.graphql(query).data.flow_run_by_pk # type: ignore if result is None: raise ClientError('Flow run ID not found: "{}"'.format(flow_run_id)) # convert scheduled_start_time from string to datetime result.scheduled_start_time = pendulum.parse(result.scheduled_start_time) # create "state" attribute from serialized_state result.state = prefect.engine.state.State.deserialize( result.pop("serialized_state") ) # reformat task_runs task_runs = [] for tr in result.task_runs: tr.state = prefect.engine.state.State.deserialize( tr.pop("serialized_state") ) task_info = tr.pop("task") tr.task_id = task_info["id"] tr.task_slug = task_info["slug"] task_runs.append(TaskRunInfoResult(**tr)) result.task_runs = task_runs result.context = ( result.context.to_dict() if result.context is not None else None ) result.parameters = ( result.parameters.to_dict() if result.parameters is not None else None ) return FlowRunInfoResult(**result) def update_flow_run_heartbeat(self, flow_run_id: str) -> None: """ Convenience method for heartbeating a flow run. Does NOT raise an error if the update fails. Args: - flow_run_id (str): the flow run ID to heartbeat """ mutation = { "mutation": { with_args( "update_flow_run_heartbeat", {"input": {"flow_run_id": flow_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def update_task_run_heartbeat(self, task_run_id: str) -> None: """ Convenience method for heartbeating a task run. Does NOT raise an error if the update fails. Args: - task_run_id (str): the task run ID to heartbeat """ mutation = { "mutation": { with_args( "update_task_run_heartbeat", {"input": {"task_run_id": task_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def get_flow_run_state(self, flow_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a flow run. Args: - flow_run_id (str): the id for this flow run Returns: - State: a Prefect State object """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "serialized_state": True, } } } flow_run = self.graphql(query).data.flow_run_by_pk return prefect.engine.state.State.deserialize(flow_run.serialized_state) def set_flow_run_state( self, flow_run_id: str, state: "prefect.engine.state.State", version: int = None, ) -> "prefect.engine.state.State": """ Sets new state for a flow run in the database. Args: - flow_run_id (str): the id of the flow run to set state for - state (State): the new state for this flow run - version (int, optional): the current version of the flow run state. This is optional but it can be supplied to enforce version-locking. Returns: - State: the state the current flow run should be considered in Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation($input: set_flow_run_states_input!)": { "set_flow_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, flow_run_id=flow_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_flow_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def get_latest_cached_states( self, task_id: str, cache_key: Optional[str], created_after: datetime.datetime ) -> List["prefect.engine.state.State"]: """ Pulls all Cached states for the given task that were created after the provided date. Args: - task_id (str): the task id for this task run - cache_key (Optional[str]): the cache key for this Task's cache; if `None`, the task id alone will be used - created_after (datetime.datetime): the earliest date the state should have been created at Returns: - List[State]: a list of Cached states created after the given date """ args = { "where": { "state": {"_eq": "Cached"}, "state_timestamp": {"_gte": created_after.isoformat()}, }, "order_by": {"state_timestamp": EnumValue("desc")}, "limit": 100, } # type: Dict[str, Any] # if a cache key was provided, match it against all tasks if cache_key is not None: args["where"].update({"cache_key": {"_eq": cache_key}}) # otherwise match against only this task, across all cache keys else: args["where"].update({"task_id": {"_eq": task_id}}) query = {"query": {with_args("task_run", args): "serialized_state"}} result = self.graphql(query) # type: Any deserializer = prefect.engine.state.State.deserialize valid_states = [ deserializer(res.serialized_state) for res in result.data.task_run ] return valid_states def get_task_run_info( self, flow_run_id: str, task_id: str, map_index: Optional[int] = None ) -> TaskRunInfoResult: """ Retrieves version and current state information for the given task run. Args: - flow_run_id (str): the id of the flow run that this task run lives in - task_id (str): the task id for this task run - map_index (int, optional): the mapping index for this task run; if `None`, it is assumed this task is _not_ mapped Returns: - NamedTuple: a tuple containing `id, task_id, version, state` Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation": { with_args( "get_or_create_task_run", { "input": { "flow_run_id": flow_run_id, "task_id": task_id, "map_index": -1 if map_index is None else map_index, } }, ): { "id": True, } } } result = self.graphql(mutation) # type: Any if result is None: raise ClientError("Failed to create task run.") task_run_id = result.data.get_or_create_task_run.id query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "version": True, "serialized_state": True, "task": {"slug": True}, } } } task_run = self.graphql(query).data.task_run_by_pk # type: ignore if task_run is None: raise ClientError('Task run ID not found: "{}"'.format(task_run_id)) state = prefect.engine.state.State.deserialize(task_run.serialized_state) return TaskRunInfoResult( id=task_run_id, task_id=task_id, task_slug=task_run.task.slug, version=task_run.version, state=state, ) def set_task_run_name(self, task_run_id: str, name: str) -> bool: """ Set the name of a task run Args: - task_run_id (str): the id of a task run - name (str): a name for this task run Returns: - bool: whether or not the task run name was updated """ mutation = { "mutation($input: set_task_run_name_input!)": { "set_task_run_name(input: $input)": { "success": True, } } } result = self.graphql( mutation, variables=dict(input=dict(task_run_id=task_run_id, name=name)) ) return result.data.set_task_run_name.success def get_task_run_state(self, task_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a task run. Args: - task_run_id (str): the id for this task run Returns: - State: a Prefect State object """ query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "serialized_state": True, } } } task_run = self.graphql(query).data.task_run_by_pk return prefect.engine.state.State.deserialize(task_run.serialized_state) def set_task_run_state( self, task_run_id: str, state: "prefect.engine.state.State", version: int = None, cache_for: datetime.timedelta = None, ) -> "prefect.engine.state.State": """ Sets new state for a task run. Args: - task_run_id (str): the id of the task run to set state for - state (State): the new state for this task run - version (int, optional): the current version of the task run state. This is optional but it can be supplied to enforce version-locking. - cache_for (timedelta, optional): how long to store the result of this task for, using the serializer set in config; if not provided, no caching occurs Raises: - ClientError: if the GraphQL mutation is bad for any reason Returns: - State: the state the current task run should be considered in """ mutation = { "mutation($input: set_task_run_states_input!)": { "set_task_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, task_run_id=task_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_task_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def set_secret(self, name: str, value: Any) -> None: """ Set a secret with the given name and value. Args: - name (str): the name of the secret; used for retrieving the secret during task runs - value (Any): the value of the secret Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the secret-setting was unsuccessful """ mutation = { "mutation($input: set_secret_input!)": { "set_secret(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(name=name, value=value)) ) # type: Any if not result.data.set_secret.success: raise ValueError("Setting secret failed.") def get_task_tag_limit(self, tag: str) -> Optional[int]: """ Retrieve the current task tag concurrency limit for a given tag. Args: - tag (str): the tag to update Raises: - ClientError: if the GraphQL query fails """ query = { "query": { with_args("task_tag_limit", {"where": {"tag": {"_eq": tag}}}): { "limit": True } } } result = self.graphql(query) # type: Any if result.data.task_tag_limit: return result.data.task_tag_limit[0].limit else: return None def update_task_tag_limit(self, tag: str, limit: int) -> None: """ Update the task tag concurrency limit for a given tag; requires tenant admin permissions. Args: - tag (str): the tag to update - limit (int): the concurrency limit to enforce on the tag; should be a value >= 0 Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag limit-setting was unsuccessful, or if a bad limit was provided """ if limit < 0: raise ValueError("Concurrency limits must be >= 0") mutation = { "mutation($input: update_task_tag_limit_input!)": { "update_task_tag_limit(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict(input=dict(tag=tag, limit=limit)) ) # type: Any if not result.data.update_task_tag_limit.id: raise ValueError("Updating the task tag concurrency limit failed.") def delete_task_tag_limit(self, limit_id: str) -> None: """ Deletes a given task tag concurrency limit; requires tenant admin permissions. Args: - limit_id (str): the ID of the tag to delete Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag deletion was unsuccessful, or if a bad tag ID was provided """ mutation = { "mutation($input: delete_task_tag_limit_input!)": { "delete_task_tag_limit(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(limit_id=limit_id)) ) # type: Any if not result.data.delete_task_tag_limit.success: raise ValueError("Deleting the task tag concurrency limit failed.") def write_run_logs(self, logs: List[Dict]) -> None: """ Uploads a collection of logs to Cloud. Args: - logs (List[Dict]): a list of log entries to add Raises: - ValueError: if uploading the logs fail """ mutation = { "mutation($input: write_run_logs_input!)": { "write_run_logs(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(logs=logs)) ) # type: Any if not result.data.write_run_logs.success: raise ValueError("Writing logs failed.") def register_agent( self, agent_type: str, name: str = None, labels: List[str] = None, agent_config_id: str = None, ) -> str: """ Register an agent with a backend API Args: - agent_type (str): The type of agent being registered - name: (str, optional): The name of the agent being registered - labels (List[str], optional): A list of any present labels on the agent being registered - agent_config_id (str, optional): The ID of an agent configuration to register with Returns: - The agent ID as a string """ mutation = { "mutation($input: register_agent_input!)": { "register_agent(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict( input=dict( type=agent_type, name=name, labels=labels or [], tenant_id=self._active_tenant_id, agent_config_id=agent_config_id, ) ), ) if not result.data.register_agent.id: raise ValueError("Error registering agent") return result.data.register_agent.id def get_agent_config(self, agent_config_id: str) -> dict: """ Get agent config settings Args: - agent_config_id (str): The ID of an agent configuration to retrieve Returns: - dict: the agent configuration's `settings` """ query = { "query": { with_args( "agent_config", {"where": {"id": {"_eq": agent_config_id}}} ): {"settings": True} } } result = self.graphql(query) # type: Any return result.data.agent_config[0].settings
def _refresh_access_token(self) -> bool: """ Refresh the client's JWT access token. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - bool: True if the refresh succeeds """ payload = self.graphql( { "mutation($input: refresh_token_input!)": { "refresh_token(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(access_token=self._access_token)), # pass the refresh token as the auth header token=self._refresh_token, ) # type: ignore self._access_token = payload.data.refresh_token.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.refresh_token.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.refresh_token.refresh_token # type: ignore return True
609
638
import datetime import json import os import re import time import uuid import warnings from pathlib import Path from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Union from urllib.parse import urljoin # if simplejson is installed, `requests` defaults to using it instead of json # this allows the client to gracefully handle either json or simplejson try: from simplejson.errors import JSONDecodeError except ImportError: from json.decoder import JSONDecodeError import pendulum import toml from slugify import slugify import prefect from prefect.utilities.exceptions import ( AuthorizationError, ClientError, VersionLockError, ) from prefect.utilities.graphql import ( EnumValue, GraphQLResult, compress, parse_graphql, with_args, ) from prefect.utilities.logging import create_diagnostic_logger if TYPE_CHECKING: from prefect.core import Flow import requests JSONLike = Union[bool, dict, list, str, int, float, None] # type definitions for GraphQL results TaskRunInfoResult = NamedTuple( "TaskRunInfoResult", [ ("id", str), ("task_id", str), ("task_slug", str), ("version", int), ("state", "prefect.engine.state.State"), ], ) FlowRunInfoResult = NamedTuple( "FlowRunInfoResult", [ ("id", str), ("name", str), ("flow_id", str), ("parameters", Dict[str, Any]), ("context", Dict[str, Any]), ("version", int), ("scheduled_start_time", datetime.datetime), ("state", "prefect.engine.state.State"), ("task_runs", List[TaskRunInfoResult]), ], ) class Client: """ Client for communication with Prefect Cloud If the arguments aren't specified the client initialization first checks the prefect configuration and if the server is not set there it checks the current context. The token will only be present in the current context. Args: - api_server (str, optional): the URL to send all GraphQL requests to; if not provided, will be pulled from `cloud.graphql` config var - api_token (str, optional): a Prefect Cloud API token, taken from `config.cloud.auth_token` if not provided. If this token is USER-scoped, it may be used to log in to any tenant that the user is a member of. In that case, ephemeral JWTs will be loaded as necessary. Otherwise, the API token itself will be used as authorization. """ def __init__(self, api_server: str = None, api_token: str = None): self._access_token = None self._refresh_token = None self._access_token_expires_at = pendulum.now() self._active_tenant_id = None self._attached_headers = {} # type: Dict[str, str] self.logger = create_diagnostic_logger("Diagnostics") # store api server self.api_server = api_server or prefect.context.config.cloud.get("graphql") # store api token self._api_token = api_token or prefect.context.config.cloud.get( "auth_token", None ) if prefect.config.backend == "cloud": if not self._api_token: # if no api token was passed, attempt to load state from local storage settings = self._load_local_settings() self._api_token = settings.get("api_token") if self._api_token: self._active_tenant_id = settings.get("active_tenant_id") if self._active_tenant_id: try: self.login_to_tenant(tenant_id=self._active_tenant_id) except AuthorizationError: # if an authorization error is raised, then the token is invalid and should # be cleared self.logout_from_tenant() else: # TODO: Separate put this functionality and clean up initial tenant access handling if not self._active_tenant_id: tenant_info = self.graphql({"query": {"tenant": {"id"}}}) if tenant_info.data.tenant: self._active_tenant_id = tenant_info.data.tenant[0].id def create_tenant(self, name: str, slug: str = None) -> str: """ Creates a new tenant. Note this route only works when run against Prefect Server. Args: - name (str): the name of the tenant to create - slug (str, optional): the slug of the tenant to create; defaults to name Returns: - str: the ID of the newly created tenant, or the ID of the currently active tenant Raises: - ValueError: if run against Prefect Cloud """ if prefect.config.backend != "server": msg = "To create a tenant with Prefect Cloud, please signup at https://cloud.prefect.io/" raise ValueError(msg) if slug is None: slug = slugify(name) tenant_info = self.graphql( { "mutation($input: create_tenant_input!)": { "create_tenant(input: $input)": {"id"} } }, variables=dict(input=dict(name=name, slug=slug)), ) return tenant_info.data.create_tenant.id # ------------------------------------------------------------------------- # Utilities def get( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and GET request Args: - path (str): the path of the API url. For example, to GET http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the GET request to; defaults to `self.api_server` - headers (dict, optional): Headers to pass with the request - params (dict): GET parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="GET", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def post( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and POST request Args: - path (str): the path of the API url. For example, to POST http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the POST request to; defaults to `self.api_server` - headers(dict): headers to pass with the request - params (dict): POST parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="POST", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def graphql( self, query: Any, raise_on_error: bool = True, headers: Dict[str, str] = None, variables: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> GraphQLResult: """ Convenience function for running queries against the Prefect GraphQL API Args: - query (Any): A representation of a graphql query to be executed. It will be parsed by prefect.utilities.graphql.parse_graphql(). - raise_on_error (bool): if True, a `ClientError` will be raised if the GraphQL returns any `errors`. - headers (dict): any additional headers that should be passed as part of the request - variables (dict): Variables to be filled into a query with the key being equivalent to the variables that are accepted by the query - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Data returned from the GraphQL query Raises: - ClientError if there are errors raised by the GraphQL mutation """ result = self.post( path="", server=self.api_server, headers=headers, params=dict(query=parse_graphql(query), variables=json.dumps(variables)), token=token, retry_on_api_error=retry_on_api_error, ) if raise_on_error and "errors" in result: if "UNAUTHENTICATED" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif "Malformed Authorization header" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif ( result["errors"][0].get("extensions", {}).get("code") == "VERSION_LOCKING_ERROR" ): raise VersionLockError(result["errors"]) raise ClientError(result["errors"]) else: return GraphQLResult(result) # type: ignore def _send_request( self, session: "requests.Session", method: str, url: str, params: Dict[str, JSONLike] = None, headers: dict = None, ) -> "requests.models.Response": if prefect.context.config.cloud.get("diagnostics") is True: self.logger.debug(f"Preparing request to {url}") clean_headers = { head: re.sub("Bearer .*", "Bearer XXXX", val) for head, val in headers.items() # type: ignore } self.logger.debug(f"Headers: {clean_headers}") self.logger.debug(f"Request: {params}") start_time = time.time() if method == "GET": response = session.get(url, headers=headers, params=params, timeout=30) elif method == "POST": response = session.post(url, headers=headers, json=params, timeout=30) elif method == "DELETE": response = session.delete(url, headers=headers, timeout=30) else: raise ValueError("Invalid method: {}".format(method)) if prefect.context.config.cloud.get("diagnostics") is True: end_time = time.time() self.logger.debug(f"Response: {response.json()}") self.logger.debug( f"Request duration: {round(end_time - start_time, 4)} seconds" ) # Check if request returned a successful status response.raise_for_status() return response def _request( self, method: str, path: str, params: Dict[str, JSONLike] = None, server: str = None, headers: dict = None, token: str = None, retry_on_api_error: bool = True, ) -> "requests.models.Response": """ Runs any specified request (GET, POST, DELETE) against the server Args: - method (str): The type of request to be made (GET, POST, DELETE) - path (str): Path of the API URL - params (dict, optional): Parameters used for the request - server (str, optional): The server to make requests against, base API server is used if not specified - headers (dict, optional): Headers to pass with the request - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - requests.models.Response: The response returned from the request Raises: - ClientError: if the client token is not in the context (due to not being logged in) - ValueError: if a method is specified outside of the accepted GET, POST, DELETE - requests.HTTPError: if a status code is returned that is not `200` or `401` """ if server is None: server = self.api_server assert isinstance(server, str) # mypy assert if token is None: token = self.get_auth_token() # 'import requests' is expensive time-wise, we should do this just-in-time to keep # the 'import prefect' time low import requests url = urljoin(server, path.lstrip("/")).rstrip("/") params = params or {} headers = headers or {} if token: headers["Authorization"] = "Bearer {}".format(token) headers["X-PREFECT-CORE-VERSION"] = str(prefect.__version__) if self._attached_headers: headers.update(self._attached_headers) session = requests.Session() retry_total = 6 if prefect.config.backend == "cloud" else 1 retries = requests.packages.urllib3.util.retry.Retry( total=retry_total, backoff_factor=1, status_forcelist=[500, 502, 503, 504], method_whitelist=["DELETE", "GET", "POST"], ) session.mount("https://", requests.adapters.HTTPAdapter(max_retries=retries)) response = self._send_request( session=session, method=method, url=url, params=params, headers=headers ) # parse the response try: json_resp = response.json() except JSONDecodeError as exc: if prefect.config.backend == "cloud" and "Authorization" not in headers: raise ClientError( "Malformed response received from Cloud - please ensure that you " "have an API token properly configured." ) from exc else: raise ClientError("Malformed response received from API.") from exc # check if there was an API_ERROR code in the response if "API_ERROR" in str(json_resp.get("errors")) and retry_on_api_error: success, retry_count = False, 0 # retry up to six times while success is False and retry_count < 6: response = self._send_request( session=session, method=method, url=url, params=params, headers=headers, ) if "API_ERROR" in str(response.json().get("errors")): retry_count += 1 time.sleep(0.25 * (2 ** (retry_count - 1))) else: success = True return response def attach_headers(self, headers: dict) -> None: """ Set headers to be attached to this Client Args: - headers (dict): A dictionary of headers to attach to this client. These headers get added on to the existing dictionary of headers. """ self._attached_headers.update(headers) # ------------------------------------------------------------------------- # Auth # ------------------------------------------------------------------------- @property def _local_settings_path(self) -> Path: """ Returns the local settings directory corresponding to the current API servers """ path = "{home}/client/{server}".format( home=prefect.context.config.home_dir, server=slugify(self.api_server, regex_pattern=r"[^-\.a-z0-9]+"), ) return Path(os.path.expanduser(path)) / "settings.toml" def _save_local_settings(self, settings: dict) -> None: """ Writes settings to local storage """ self._local_settings_path.parent.mkdir(exist_ok=True, parents=True) with self._local_settings_path.open("w+") as f: toml.dump(settings, f) def _load_local_settings(self) -> dict: """ Loads settings from local storage """ if self._local_settings_path.exists(): with self._local_settings_path.open("r") as f: return toml.load(f) # type: ignore return {} def save_api_token(self) -> None: """ Saves the API token in local storage. """ settings = self._load_local_settings() settings["api_token"] = self._api_token self._save_local_settings(settings) def get_auth_token(self) -> str: """ Returns an auth token: - if no explicit access token is stored, returns the api token - if there is an access token: - if there's a refresh token and the access token expires in the next 30 seconds, then we refresh the access token and store the result - return the access token Returns: - str: the access token """ if not self._access_token: return self._api_token expiration = self._access_token_expires_at or pendulum.now() if self._refresh_token and pendulum.now().add(seconds=30) > expiration: self._refresh_access_token() return self._access_token def get_available_tenants(self) -> List[Dict]: """ Returns a list of available tenants. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - List[Dict]: a list of dictionaries containing the id, slug, and name of available tenants """ result = self.graphql( {"query": {"tenant(order_by: {slug: asc})": {"id", "slug", "name"}}}, # use the API token to see all available tenants token=self._api_token, ) # type: ignore return result.data.tenant # type: ignore def login_to_tenant(self, tenant_slug: str = None, tenant_id: str = None) -> bool: """ Log in to a specific tenant NOTE: this should only be called by users who have provided a USER-scoped API token. Args: - tenant_slug (str): the tenant's slug - tenant_id (str): the tenant's id Returns: - bool: True if the login was successful Raises: - ValueError: if at least one of `tenant_slug` or `tenant_id` isn't provided - ValueError: if the `tenant_id` is not a valid UUID - ValueError: if no matching tenants are found """ if tenant_slug is None and tenant_id is None: raise ValueError( "At least one of `tenant_slug` or `tenant_id` must be provided." ) elif tenant_id: try: uuid.UUID(tenant_id) except ValueError as exc: raise ValueError("The `tenant_id` must be a valid UUID.") from exc tenant = self.graphql( { "query($slug: String, $id: uuid)": { "tenant(where: {slug: { _eq: $slug }, id: { _eq: $id } })": {"id"} } }, variables=dict(slug=tenant_slug, id=tenant_id), # use the API token to query the tenant token=self._api_token, ) # type: ignore if not tenant.data.tenant: # type: ignore raise ValueError("No matching tenants found.") tenant_id = tenant.data.tenant[0].id # type: ignore if prefect.config.backend == "cloud": payload = self.graphql( { "mutation($input: switch_tenant_input!)": { "switch_tenant(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(tenant_id=tenant_id)), # Use the API token to switch tenants token=self._api_token, ) # type: ignore self._access_token = payload.data.switch_tenant.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.switch_tenant.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.switch_tenant.refresh_token # type: ignore self._active_tenant_id = tenant_id # save the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = self._active_tenant_id self._save_local_settings(settings) return True def logout_from_tenant(self) -> None: self._access_token = None self._refresh_token = None self._active_tenant_id = None # remove the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = None self._save_local_settings(settings) def _refresh_access_token(self) -> bool: """ Refresh the client's JWT access token. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - bool: True if the refresh succeeds """ payload = self.graphql( { "mutation($input: refresh_token_input!)": { "refresh_token(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(access_token=self._access_token)), # pass the refresh token as the auth header token=self._refresh_token, ) # type: ignore self._access_token = payload.data.refresh_token.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.refresh_token.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.refresh_token.refresh_token # type: ignore return True # ------------------------------------------------------------------------- # Actions # ------------------------------------------------------------------------- def register( self, flow: "Flow", project_name: str = None, build: bool = True, set_schedule_active: bool = True, version_group_id: str = None, compressed: bool = True, no_url: bool = False, ) -> str: """ Push a new flow to Prefect Cloud Args: - flow (Flow): a flow to register - project_name (str, optional): the project that should contain this flow. - build (bool, optional): if `True`, the flow's environment is built prior to serialization; defaults to `True` - set_schedule_active (bool, optional): if `False`, will set the schedule to inactive in the database to prevent auto-scheduling runs (if the Flow has a schedule). Defaults to `True`. This can be changed later. - version_group_id (str, optional): the UUID version group ID to use for versioning this Flow in Cloud; if not provided, the version group ID associated with this Flow's project and name will be used. - compressed (bool, optional): if `True`, the serialized flow will be; defaults to `True` compressed - no_url (bool, optional): if `True`, the stdout from this function will not contain the URL link to the newly-registered flow in the Cloud UI Returns: - str: the ID of the newly-registered flow Raises: - ClientError: if the register failed """ required_parameters = {p for p in flow.parameters() if p.required} if flow.schedule is not None and required_parameters: required_names = {p.name for p in required_parameters} if not all( [ required_names <= set(c.parameter_defaults.keys()) for c in flow.schedule.clocks ] ): raise ClientError( "Flows with required parameters can not be scheduled automatically." ) if any(e.key for e in flow.edges) and flow.result is None: warnings.warn( "No result handler was specified on your Flow. Cloud features such as " "input caching and resuming task runs from failure may not work properly.", stacklevel=2, ) if compressed: create_mutation = { "mutation($input: create_flow_from_compressed_string_input!)": { "create_flow_from_compressed_string(input: $input)": {"id"} } } else: create_mutation = { "mutation($input: create_flow_input!)": { "create_flow(input: $input)": {"id"} } } project = None if project_name is None: raise TypeError( "'project_name' is a required field when registering a flow." ) query_project = { "query": { with_args("project", {"where": {"name": {"_eq": project_name}}}): { "id": True } } } project = self.graphql(query_project).data.project # type: ignore if not project: raise ValueError( "Project {} not found. Run `prefect create project '{}'` to create it.".format( project_name, project_name ) ) serialized_flow = flow.serialize(build=build) # type: Any # Set Docker storage image in environment metadata if provided if isinstance(flow.storage, prefect.environments.storage.Docker): flow.environment.metadata["image"] = flow.storage.name serialized_flow = flow.serialize(build=False) # If no image ever set, default metadata to all_extras image on current version if not flow.environment.metadata.get("image"): version = prefect.__version__.split("+")[0] flow.environment.metadata[ "image" ] = f"prefecthq/prefect:all_extras-{version}" serialized_flow = flow.serialize(build=False) # verify that the serialized flow can be deserialized try: prefect.serialization.flow.FlowSchema().load(serialized_flow) except Exception as exc: raise ValueError( "Flow could not be deserialized successfully. Error was: {}".format( repr(exc) ) ) from exc if compressed: serialized_flow = compress(serialized_flow) res = self.graphql( create_mutation, variables=dict( input=dict( project_id=(project[0].id if project else None), serialized_flow=serialized_flow, set_schedule_active=set_schedule_active, version_group_id=version_group_id, ) ), retry_on_api_error=False, ) # type: Any flow_id = ( res.data.create_flow_from_compressed_string.id if compressed else res.data.create_flow.id ) if not no_url: # Generate direct link to Cloud flow flow_url = self.get_cloud_url("flow", flow_id) prefix = "└── " print("Flow URL: {}".format(flow_url)) # Extra information to improve visibility msg = ( f" {prefix}ID: {flow_id}\n" f" {prefix}Project: {project_name}\n" f" {prefix}Labels: {list(flow.environment.labels)}" ) print(msg) return flow_id def get_cloud_url(self, subdirectory: str, id: str, as_user: bool = True) -> str: """ Convenience method for creating Prefect Cloud URLs for a given subdirectory. Args: - subdirectory (str): the subdirectory to use (e.g., `"flow-run"`) - id (str): the ID of the page - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the URL corresponding to the appropriate base URL, tenant slug, subdirectory and ID Example: ```python from prefect import Client client = Client() client.get_cloud_url("flow-run", "424242-ca-94611-111-55") # returns "https://cloud.prefect.io/my-tenant-slug/flow-run/424242-ca-94611-111-55" ``` """ # Generate direct link to UI if prefect.config.backend == "cloud": tenant_slug = self.get_default_tenant_slug(as_user=as_user) else: tenant_slug = "" base_url = ( re.sub("api-", "", prefect.config.cloud.api) if re.search("api-", prefect.config.cloud.api) else re.sub("api", "cloud", prefect.config.cloud.api) ) full_url = prefect.config.cloud.api if tenant_slug: full_url = "/".join([base_url.rstrip("/"), tenant_slug, subdirectory, id]) elif prefect.config.backend == "server": full_url = "/".join([prefect.config.server.ui.endpoint, subdirectory, id]) return full_url def get_default_tenant_slug(self, as_user: bool = True) -> str: """ Get the default tenant slug for the currently authenticated user Args: - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the slug of the current default tenant for this user """ if as_user: query = { "query": {"user": {"default_membership": {"tenant": "slug"}}} } # type: dict else: query = {"query": {"tenant": {"slug"}}} res = self.graphql(query) if as_user: user = res.get("data").user[0] slug = user.default_membership.tenant.slug else: slug = res.get("data").tenant[0].slug return slug def create_project(self, project_name: str, project_description: str = None) -> str: """ Create a new Project Args: - project_name (str): the project that should contain this flow - project_description (str, optional): the project description Returns: - str: the ID of the newly-created project Raises: - ClientError: if the project creation failed """ project_mutation = { "mutation($input: create_project_input!)": { "create_project(input: $input)": {"id"} } } res = self.graphql( project_mutation, variables=dict( input=dict( name=project_name, description=project_description, tenant_id=self._active_tenant_id, ) ), ) # type: Any return res.data.create_project.id def create_flow_run( self, flow_id: str = None, context: dict = None, parameters: dict = None, scheduled_start_time: datetime.datetime = None, idempotency_key: str = None, run_name: str = None, version_group_id: str = None, ) -> str: """ Create a new flow run for the given flow id. If `start_time` is not provided, the flow run will be scheduled to start immediately. If both `flow_id` and `version_group_id` are provided, only the `flow_id` will be used. Args: - flow_id (str, optional): the id of the Flow you wish to schedule - context (dict, optional): the run context - parameters (dict, optional): a dictionary of parameter values to pass to the flow run - scheduled_start_time (datetime, optional): the time to schedule the execution for; if not provided, defaults to now - idempotency_key (str, optional): an idempotency key; if provided, this run will be cached for 24 hours. Any subsequent attempts to create a run with the same idempotency key will return the ID of the originally created run (no new run will be created after the first). An error will be raised if parameters or context are provided and don't match the original. Each subsequent request will reset the TTL for 24 hours. - run_name (str, optional): The name assigned to this flow run - version_group_id (str, optional): if provided, the unique unarchived flow within this version group will be scheduled to run. This input can be used as a stable API for running flows which are regularly updated. Returns: - str: the ID of the newly-created flow run Raises: - ClientError: if the GraphQL query is bad for any reason """ create_mutation = { "mutation($input: create_flow_run_input!)": { "create_flow_run(input: $input)": {"id": True} } } if not flow_id and not version_group_id: raise ValueError("One of flow_id or version_group_id must be provided") if flow_id: inputs = dict(flow_id=flow_id) else: inputs = dict(version_group_id=version_group_id) # type: ignore if parameters is not None: inputs.update(parameters=parameters) # type: ignore if context is not None: inputs.update(context=context) # type: ignore if idempotency_key is not None: inputs.update(idempotency_key=idempotency_key) # type: ignore if scheduled_start_time is not None: inputs.update( scheduled_start_time=scheduled_start_time.isoformat() ) # type: ignore if run_name is not None: inputs.update(flow_run_name=run_name) # type: ignore res = self.graphql(create_mutation, variables=dict(input=inputs)) return res.data.create_flow_run.id # type: ignore def get_flow_run_info(self, flow_run_id: str) -> FlowRunInfoResult: """ Retrieves version and current state information for the given flow run. Args: - flow_run_id (str): the id of the flow run to get information for Returns: - GraphQLResult: an object representing information about the flow run Raises: - ClientError: if the GraphQL mutation is bad for any reason """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "id": True, "name": True, "flow_id": True, "parameters": True, "context": True, "version": True, "scheduled_start_time": True, "serialized_state": True, # load all task runs except dynamic task runs with_args("task_runs", {"where": {"map_index": {"_eq": -1}}}): { "id": True, "task": {"id": True, "slug": True}, "version": True, "serialized_state": True, }, } } } result = self.graphql(query).data.flow_run_by_pk # type: ignore if result is None: raise ClientError('Flow run ID not found: "{}"'.format(flow_run_id)) # convert scheduled_start_time from string to datetime result.scheduled_start_time = pendulum.parse(result.scheduled_start_time) # create "state" attribute from serialized_state result.state = prefect.engine.state.State.deserialize( result.pop("serialized_state") ) # reformat task_runs task_runs = [] for tr in result.task_runs: tr.state = prefect.engine.state.State.deserialize( tr.pop("serialized_state") ) task_info = tr.pop("task") tr.task_id = task_info["id"] tr.task_slug = task_info["slug"] task_runs.append(TaskRunInfoResult(**tr)) result.task_runs = task_runs result.context = ( result.context.to_dict() if result.context is not None else None ) result.parameters = ( result.parameters.to_dict() if result.parameters is not None else None ) return FlowRunInfoResult(**result) def update_flow_run_heartbeat(self, flow_run_id: str) -> None: """ Convenience method for heartbeating a flow run. Does NOT raise an error if the update fails. Args: - flow_run_id (str): the flow run ID to heartbeat """ mutation = { "mutation": { with_args( "update_flow_run_heartbeat", {"input": {"flow_run_id": flow_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def update_task_run_heartbeat(self, task_run_id: str) -> None: """ Convenience method for heartbeating a task run. Does NOT raise an error if the update fails. Args: - task_run_id (str): the task run ID to heartbeat """ mutation = { "mutation": { with_args( "update_task_run_heartbeat", {"input": {"task_run_id": task_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def get_flow_run_state(self, flow_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a flow run. Args: - flow_run_id (str): the id for this flow run Returns: - State: a Prefect State object """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "serialized_state": True, } } } flow_run = self.graphql(query).data.flow_run_by_pk return prefect.engine.state.State.deserialize(flow_run.serialized_state) def set_flow_run_state( self, flow_run_id: str, state: "prefect.engine.state.State", version: int = None, ) -> "prefect.engine.state.State": """ Sets new state for a flow run in the database. Args: - flow_run_id (str): the id of the flow run to set state for - state (State): the new state for this flow run - version (int, optional): the current version of the flow run state. This is optional but it can be supplied to enforce version-locking. Returns: - State: the state the current flow run should be considered in Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation($input: set_flow_run_states_input!)": { "set_flow_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, flow_run_id=flow_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_flow_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def get_latest_cached_states( self, task_id: str, cache_key: Optional[str], created_after: datetime.datetime ) -> List["prefect.engine.state.State"]: """ Pulls all Cached states for the given task that were created after the provided date. Args: - task_id (str): the task id for this task run - cache_key (Optional[str]): the cache key for this Task's cache; if `None`, the task id alone will be used - created_after (datetime.datetime): the earliest date the state should have been created at Returns: - List[State]: a list of Cached states created after the given date """ args = { "where": { "state": {"_eq": "Cached"}, "state_timestamp": {"_gte": created_after.isoformat()}, }, "order_by": {"state_timestamp": EnumValue("desc")}, "limit": 100, } # type: Dict[str, Any] # if a cache key was provided, match it against all tasks if cache_key is not None: args["where"].update({"cache_key": {"_eq": cache_key}}) # otherwise match against only this task, across all cache keys else: args["where"].update({"task_id": {"_eq": task_id}}) query = {"query": {with_args("task_run", args): "serialized_state"}} result = self.graphql(query) # type: Any deserializer = prefect.engine.state.State.deserialize valid_states = [ deserializer(res.serialized_state) for res in result.data.task_run ] return valid_states def get_task_run_info( self, flow_run_id: str, task_id: str, map_index: Optional[int] = None ) -> TaskRunInfoResult: """ Retrieves version and current state information for the given task run. Args: - flow_run_id (str): the id of the flow run that this task run lives in - task_id (str): the task id for this task run - map_index (int, optional): the mapping index for this task run; if `None`, it is assumed this task is _not_ mapped Returns: - NamedTuple: a tuple containing `id, task_id, version, state` Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation": { with_args( "get_or_create_task_run", { "input": { "flow_run_id": flow_run_id, "task_id": task_id, "map_index": -1 if map_index is None else map_index, } }, ): { "id": True, } } } result = self.graphql(mutation) # type: Any if result is None: raise ClientError("Failed to create task run.") task_run_id = result.data.get_or_create_task_run.id query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "version": True, "serialized_state": True, "task": {"slug": True}, } } } task_run = self.graphql(query).data.task_run_by_pk # type: ignore if task_run is None: raise ClientError('Task run ID not found: "{}"'.format(task_run_id)) state = prefect.engine.state.State.deserialize(task_run.serialized_state) return TaskRunInfoResult( id=task_run_id, task_id=task_id, task_slug=task_run.task.slug, version=task_run.version, state=state, ) def set_task_run_name(self, task_run_id: str, name: str) -> bool: """ Set the name of a task run Args: - task_run_id (str): the id of a task run - name (str): a name for this task run Returns: - bool: whether or not the task run name was updated """ mutation = { "mutation($input: set_task_run_name_input!)": { "set_task_run_name(input: $input)": { "success": True, } } } result = self.graphql( mutation, variables=dict(input=dict(task_run_id=task_run_id, name=name)) ) return result.data.set_task_run_name.success def get_task_run_state(self, task_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a task run. Args: - task_run_id (str): the id for this task run Returns: - State: a Prefect State object """ query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "serialized_state": True, } } } task_run = self.graphql(query).data.task_run_by_pk return prefect.engine.state.State.deserialize(task_run.serialized_state) def set_task_run_state( self, task_run_id: str, state: "prefect.engine.state.State", version: int = None, cache_for: datetime.timedelta = None, ) -> "prefect.engine.state.State": """ Sets new state for a task run. Args: - task_run_id (str): the id of the task run to set state for - state (State): the new state for this task run - version (int, optional): the current version of the task run state. This is optional but it can be supplied to enforce version-locking. - cache_for (timedelta, optional): how long to store the result of this task for, using the serializer set in config; if not provided, no caching occurs Raises: - ClientError: if the GraphQL mutation is bad for any reason Returns: - State: the state the current task run should be considered in """ mutation = { "mutation($input: set_task_run_states_input!)": { "set_task_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, task_run_id=task_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_task_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def set_secret(self, name: str, value: Any) -> None: """ Set a secret with the given name and value. Args: - name (str): the name of the secret; used for retrieving the secret during task runs - value (Any): the value of the secret Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the secret-setting was unsuccessful """ mutation = { "mutation($input: set_secret_input!)": { "set_secret(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(name=name, value=value)) ) # type: Any if not result.data.set_secret.success: raise ValueError("Setting secret failed.") def get_task_tag_limit(self, tag: str) -> Optional[int]: """ Retrieve the current task tag concurrency limit for a given tag. Args: - tag (str): the tag to update Raises: - ClientError: if the GraphQL query fails """ query = { "query": { with_args("task_tag_limit", {"where": {"tag": {"_eq": tag}}}): { "limit": True } } } result = self.graphql(query) # type: Any if result.data.task_tag_limit: return result.data.task_tag_limit[0].limit else: return None def update_task_tag_limit(self, tag: str, limit: int) -> None: """ Update the task tag concurrency limit for a given tag; requires tenant admin permissions. Args: - tag (str): the tag to update - limit (int): the concurrency limit to enforce on the tag; should be a value >= 0 Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag limit-setting was unsuccessful, or if a bad limit was provided """ if limit < 0: raise ValueError("Concurrency limits must be >= 0") mutation = { "mutation($input: update_task_tag_limit_input!)": { "update_task_tag_limit(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict(input=dict(tag=tag, limit=limit)) ) # type: Any if not result.data.update_task_tag_limit.id: raise ValueError("Updating the task tag concurrency limit failed.") def delete_task_tag_limit(self, limit_id: str) -> None: """ Deletes a given task tag concurrency limit; requires tenant admin permissions. Args: - limit_id (str): the ID of the tag to delete Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag deletion was unsuccessful, or if a bad tag ID was provided """ mutation = { "mutation($input: delete_task_tag_limit_input!)": { "delete_task_tag_limit(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(limit_id=limit_id)) ) # type: Any if not result.data.delete_task_tag_limit.success: raise ValueError("Deleting the task tag concurrency limit failed.") def write_run_logs(self, logs: List[Dict]) -> None: """ Uploads a collection of logs to Cloud. Args: - logs (List[Dict]): a list of log entries to add Raises: - ValueError: if uploading the logs fail """ mutation = { "mutation($input: write_run_logs_input!)": { "write_run_logs(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(logs=logs)) ) # type: Any if not result.data.write_run_logs.success: raise ValueError("Writing logs failed.") def register_agent( self, agent_type: str, name: str = None, labels: List[str] = None, agent_config_id: str = None, ) -> str: """ Register an agent with a backend API Args: - agent_type (str): The type of agent being registered - name: (str, optional): The name of the agent being registered - labels (List[str], optional): A list of any present labels on the agent being registered - agent_config_id (str, optional): The ID of an agent configuration to register with Returns: - The agent ID as a string """ mutation = { "mutation($input: register_agent_input!)": { "register_agent(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict( input=dict( type=agent_type, name=name, labels=labels or [], tenant_id=self._active_tenant_id, agent_config_id=agent_config_id, ) ), ) if not result.data.register_agent.id: raise ValueError("Error registering agent") return result.data.register_agent.id def get_agent_config(self, agent_config_id: str) -> dict: """ Get agent config settings Args: - agent_config_id (str): The ID of an agent configuration to retrieve Returns: - dict: the agent configuration's `settings` """ query = { "query": { with_args( "agent_config", {"where": {"id": {"_eq": agent_config_id}}} ): {"settings": True} } } result = self.graphql(query) # type: Any return result.data.agent_config[0].settings
create_project
Create a new Project Args: - project_name (str): the project that should contain this flow - project_description (str, optional): the project description Returns: - str: the ID of the newly-created project Raises: - ClientError: if the project creation failed
import datetime import json import os import re import time import uuid import warnings from pathlib import Path from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Union from urllib.parse import urljoin # if simplejson is installed, `requests` defaults to using it instead of json # this allows the client to gracefully handle either json or simplejson try: from simplejson.errors import JSONDecodeError except ImportError: from json.decoder import JSONDecodeError import pendulum import toml from slugify import slugify import prefect from prefect.utilities.exceptions import ( AuthorizationError, ClientError, VersionLockError, ) from prefect.utilities.graphql import ( EnumValue, GraphQLResult, compress, parse_graphql, with_args, ) from prefect.utilities.logging import create_diagnostic_logger if TYPE_CHECKING: from prefect.core import Flow import requests JSONLike = Union[bool, dict, list, str, int, float, None] # type definitions for GraphQL results TaskRunInfoResult = NamedTuple( "TaskRunInfoResult", [ ("id", str), ("task_id", str), ("task_slug", str), ("version", int), ("state", "prefect.engine.state.State"), ], ) FlowRunInfoResult = NamedTuple( "FlowRunInfoResult", [ ("id", str), ("name", str), ("flow_id", str), ("parameters", Dict[str, Any]), ("context", Dict[str, Any]), ("version", int), ("scheduled_start_time", datetime.datetime), ("state", "prefect.engine.state.State"), ("task_runs", List[TaskRunInfoResult]), ], ) class Client: """ Client for communication with Prefect Cloud If the arguments aren't specified the client initialization first checks the prefect configuration and if the server is not set there it checks the current context. The token will only be present in the current context. Args: - api_server (str, optional): the URL to send all GraphQL requests to; if not provided, will be pulled from `cloud.graphql` config var - api_token (str, optional): a Prefect Cloud API token, taken from `config.cloud.auth_token` if not provided. If this token is USER-scoped, it may be used to log in to any tenant that the user is a member of. In that case, ephemeral JWTs will be loaded as necessary. Otherwise, the API token itself will be used as authorization. """ def __init__(self, api_server: str = None, api_token: str = None): self._access_token = None self._refresh_token = None self._access_token_expires_at = pendulum.now() self._active_tenant_id = None self._attached_headers = {} # type: Dict[str, str] self.logger = create_diagnostic_logger("Diagnostics") # store api server self.api_server = api_server or prefect.context.config.cloud.get("graphql") # store api token self._api_token = api_token or prefect.context.config.cloud.get( "auth_token", None ) if prefect.config.backend == "cloud": if not self._api_token: # if no api token was passed, attempt to load state from local storage settings = self._load_local_settings() self._api_token = settings.get("api_token") if self._api_token: self._active_tenant_id = settings.get("active_tenant_id") if self._active_tenant_id: try: self.login_to_tenant(tenant_id=self._active_tenant_id) except AuthorizationError: # if an authorization error is raised, then the token is invalid and should # be cleared self.logout_from_tenant() else: # TODO: Separate put this functionality and clean up initial tenant access handling if not self._active_tenant_id: tenant_info = self.graphql({"query": {"tenant": {"id"}}}) if tenant_info.data.tenant: self._active_tenant_id = tenant_info.data.tenant[0].id def create_tenant(self, name: str, slug: str = None) -> str: """ Creates a new tenant. Note this route only works when run against Prefect Server. Args: - name (str): the name of the tenant to create - slug (str, optional): the slug of the tenant to create; defaults to name Returns: - str: the ID of the newly created tenant, or the ID of the currently active tenant Raises: - ValueError: if run against Prefect Cloud """ if prefect.config.backend != "server": msg = "To create a tenant with Prefect Cloud, please signup at https://cloud.prefect.io/" raise ValueError(msg) if slug is None: slug = slugify(name) tenant_info = self.graphql( { "mutation($input: create_tenant_input!)": { "create_tenant(input: $input)": {"id"} } }, variables=dict(input=dict(name=name, slug=slug)), ) return tenant_info.data.create_tenant.id # ------------------------------------------------------------------------- # Utilities def get( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and GET request Args: - path (str): the path of the API url. For example, to GET http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the GET request to; defaults to `self.api_server` - headers (dict, optional): Headers to pass with the request - params (dict): GET parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="GET", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def post( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and POST request Args: - path (str): the path of the API url. For example, to POST http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the POST request to; defaults to `self.api_server` - headers(dict): headers to pass with the request - params (dict): POST parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="POST", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def graphql( self, query: Any, raise_on_error: bool = True, headers: Dict[str, str] = None, variables: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> GraphQLResult: """ Convenience function for running queries against the Prefect GraphQL API Args: - query (Any): A representation of a graphql query to be executed. It will be parsed by prefect.utilities.graphql.parse_graphql(). - raise_on_error (bool): if True, a `ClientError` will be raised if the GraphQL returns any `errors`. - headers (dict): any additional headers that should be passed as part of the request - variables (dict): Variables to be filled into a query with the key being equivalent to the variables that are accepted by the query - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Data returned from the GraphQL query Raises: - ClientError if there are errors raised by the GraphQL mutation """ result = self.post( path="", server=self.api_server, headers=headers, params=dict(query=parse_graphql(query), variables=json.dumps(variables)), token=token, retry_on_api_error=retry_on_api_error, ) if raise_on_error and "errors" in result: if "UNAUTHENTICATED" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif "Malformed Authorization header" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif ( result["errors"][0].get("extensions", {}).get("code") == "VERSION_LOCKING_ERROR" ): raise VersionLockError(result["errors"]) raise ClientError(result["errors"]) else: return GraphQLResult(result) # type: ignore def _send_request( self, session: "requests.Session", method: str, url: str, params: Dict[str, JSONLike] = None, headers: dict = None, ) -> "requests.models.Response": if prefect.context.config.cloud.get("diagnostics") is True: self.logger.debug(f"Preparing request to {url}") clean_headers = { head: re.sub("Bearer .*", "Bearer XXXX", val) for head, val in headers.items() # type: ignore } self.logger.debug(f"Headers: {clean_headers}") self.logger.debug(f"Request: {params}") start_time = time.time() if method == "GET": response = session.get(url, headers=headers, params=params, timeout=30) elif method == "POST": response = session.post(url, headers=headers, json=params, timeout=30) elif method == "DELETE": response = session.delete(url, headers=headers, timeout=30) else: raise ValueError("Invalid method: {}".format(method)) if prefect.context.config.cloud.get("diagnostics") is True: end_time = time.time() self.logger.debug(f"Response: {response.json()}") self.logger.debug( f"Request duration: {round(end_time - start_time, 4)} seconds" ) # Check if request returned a successful status response.raise_for_status() return response def _request( self, method: str, path: str, params: Dict[str, JSONLike] = None, server: str = None, headers: dict = None, token: str = None, retry_on_api_error: bool = True, ) -> "requests.models.Response": """ Runs any specified request (GET, POST, DELETE) against the server Args: - method (str): The type of request to be made (GET, POST, DELETE) - path (str): Path of the API URL - params (dict, optional): Parameters used for the request - server (str, optional): The server to make requests against, base API server is used if not specified - headers (dict, optional): Headers to pass with the request - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - requests.models.Response: The response returned from the request Raises: - ClientError: if the client token is not in the context (due to not being logged in) - ValueError: if a method is specified outside of the accepted GET, POST, DELETE - requests.HTTPError: if a status code is returned that is not `200` or `401` """ if server is None: server = self.api_server assert isinstance(server, str) # mypy assert if token is None: token = self.get_auth_token() # 'import requests' is expensive time-wise, we should do this just-in-time to keep # the 'import prefect' time low import requests url = urljoin(server, path.lstrip("/")).rstrip("/") params = params or {} headers = headers or {} if token: headers["Authorization"] = "Bearer {}".format(token) headers["X-PREFECT-CORE-VERSION"] = str(prefect.__version__) if self._attached_headers: headers.update(self._attached_headers) session = requests.Session() retry_total = 6 if prefect.config.backend == "cloud" else 1 retries = requests.packages.urllib3.util.retry.Retry( total=retry_total, backoff_factor=1, status_forcelist=[500, 502, 503, 504], method_whitelist=["DELETE", "GET", "POST"], ) session.mount("https://", requests.adapters.HTTPAdapter(max_retries=retries)) response = self._send_request( session=session, method=method, url=url, params=params, headers=headers ) # parse the response try: json_resp = response.json() except JSONDecodeError as exc: if prefect.config.backend == "cloud" and "Authorization" not in headers: raise ClientError( "Malformed response received from Cloud - please ensure that you " "have an API token properly configured." ) from exc else: raise ClientError("Malformed response received from API.") from exc # check if there was an API_ERROR code in the response if "API_ERROR" in str(json_resp.get("errors")) and retry_on_api_error: success, retry_count = False, 0 # retry up to six times while success is False and retry_count < 6: response = self._send_request( session=session, method=method, url=url, params=params, headers=headers, ) if "API_ERROR" in str(response.json().get("errors")): retry_count += 1 time.sleep(0.25 * (2 ** (retry_count - 1))) else: success = True return response def attach_headers(self, headers: dict) -> None: """ Set headers to be attached to this Client Args: - headers (dict): A dictionary of headers to attach to this client. These headers get added on to the existing dictionary of headers. """ self._attached_headers.update(headers) # ------------------------------------------------------------------------- # Auth # ------------------------------------------------------------------------- @property def _local_settings_path(self) -> Path: """ Returns the local settings directory corresponding to the current API servers """ path = "{home}/client/{server}".format( home=prefect.context.config.home_dir, server=slugify(self.api_server, regex_pattern=r"[^-\.a-z0-9]+"), ) return Path(os.path.expanduser(path)) / "settings.toml" def _save_local_settings(self, settings: dict) -> None: """ Writes settings to local storage """ self._local_settings_path.parent.mkdir(exist_ok=True, parents=True) with self._local_settings_path.open("w+") as f: toml.dump(settings, f) def _load_local_settings(self) -> dict: """ Loads settings from local storage """ if self._local_settings_path.exists(): with self._local_settings_path.open("r") as f: return toml.load(f) # type: ignore return {} def save_api_token(self) -> None: """ Saves the API token in local storage. """ settings = self._load_local_settings() settings["api_token"] = self._api_token self._save_local_settings(settings) def get_auth_token(self) -> str: """ Returns an auth token: - if no explicit access token is stored, returns the api token - if there is an access token: - if there's a refresh token and the access token expires in the next 30 seconds, then we refresh the access token and store the result - return the access token Returns: - str: the access token """ if not self._access_token: return self._api_token expiration = self._access_token_expires_at or pendulum.now() if self._refresh_token and pendulum.now().add(seconds=30) > expiration: self._refresh_access_token() return self._access_token def get_available_tenants(self) -> List[Dict]: """ Returns a list of available tenants. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - List[Dict]: a list of dictionaries containing the id, slug, and name of available tenants """ result = self.graphql( {"query": {"tenant(order_by: {slug: asc})": {"id", "slug", "name"}}}, # use the API token to see all available tenants token=self._api_token, ) # type: ignore return result.data.tenant # type: ignore def login_to_tenant(self, tenant_slug: str = None, tenant_id: str = None) -> bool: """ Log in to a specific tenant NOTE: this should only be called by users who have provided a USER-scoped API token. Args: - tenant_slug (str): the tenant's slug - tenant_id (str): the tenant's id Returns: - bool: True if the login was successful Raises: - ValueError: if at least one of `tenant_slug` or `tenant_id` isn't provided - ValueError: if the `tenant_id` is not a valid UUID - ValueError: if no matching tenants are found """ if tenant_slug is None and tenant_id is None: raise ValueError( "At least one of `tenant_slug` or `tenant_id` must be provided." ) elif tenant_id: try: uuid.UUID(tenant_id) except ValueError as exc: raise ValueError("The `tenant_id` must be a valid UUID.") from exc tenant = self.graphql( { "query($slug: String, $id: uuid)": { "tenant(where: {slug: { _eq: $slug }, id: { _eq: $id } })": {"id"} } }, variables=dict(slug=tenant_slug, id=tenant_id), # use the API token to query the tenant token=self._api_token, ) # type: ignore if not tenant.data.tenant: # type: ignore raise ValueError("No matching tenants found.") tenant_id = tenant.data.tenant[0].id # type: ignore if prefect.config.backend == "cloud": payload = self.graphql( { "mutation($input: switch_tenant_input!)": { "switch_tenant(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(tenant_id=tenant_id)), # Use the API token to switch tenants token=self._api_token, ) # type: ignore self._access_token = payload.data.switch_tenant.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.switch_tenant.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.switch_tenant.refresh_token # type: ignore self._active_tenant_id = tenant_id # save the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = self._active_tenant_id self._save_local_settings(settings) return True def logout_from_tenant(self) -> None: self._access_token = None self._refresh_token = None self._active_tenant_id = None # remove the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = None self._save_local_settings(settings) def _refresh_access_token(self) -> bool: """ Refresh the client's JWT access token. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - bool: True if the refresh succeeds """ payload = self.graphql( { "mutation($input: refresh_token_input!)": { "refresh_token(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(access_token=self._access_token)), # pass the refresh token as the auth header token=self._refresh_token, ) # type: ignore self._access_token = payload.data.refresh_token.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.refresh_token.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.refresh_token.refresh_token # type: ignore return True # ------------------------------------------------------------------------- # Actions # ------------------------------------------------------------------------- def register( self, flow: "Flow", project_name: str = None, build: bool = True, set_schedule_active: bool = True, version_group_id: str = None, compressed: bool = True, no_url: bool = False, ) -> str: """ Push a new flow to Prefect Cloud Args: - flow (Flow): a flow to register - project_name (str, optional): the project that should contain this flow. - build (bool, optional): if `True`, the flow's environment is built prior to serialization; defaults to `True` - set_schedule_active (bool, optional): if `False`, will set the schedule to inactive in the database to prevent auto-scheduling runs (if the Flow has a schedule). Defaults to `True`. This can be changed later. - version_group_id (str, optional): the UUID version group ID to use for versioning this Flow in Cloud; if not provided, the version group ID associated with this Flow's project and name will be used. - compressed (bool, optional): if `True`, the serialized flow will be; defaults to `True` compressed - no_url (bool, optional): if `True`, the stdout from this function will not contain the URL link to the newly-registered flow in the Cloud UI Returns: - str: the ID of the newly-registered flow Raises: - ClientError: if the register failed """ required_parameters = {p for p in flow.parameters() if p.required} if flow.schedule is not None and required_parameters: required_names = {p.name for p in required_parameters} if not all( [ required_names <= set(c.parameter_defaults.keys()) for c in flow.schedule.clocks ] ): raise ClientError( "Flows with required parameters can not be scheduled automatically." ) if any(e.key for e in flow.edges) and flow.result is None: warnings.warn( "No result handler was specified on your Flow. Cloud features such as " "input caching and resuming task runs from failure may not work properly.", stacklevel=2, ) if compressed: create_mutation = { "mutation($input: create_flow_from_compressed_string_input!)": { "create_flow_from_compressed_string(input: $input)": {"id"} } } else: create_mutation = { "mutation($input: create_flow_input!)": { "create_flow(input: $input)": {"id"} } } project = None if project_name is None: raise TypeError( "'project_name' is a required field when registering a flow." ) query_project = { "query": { with_args("project", {"where": {"name": {"_eq": project_name}}}): { "id": True } } } project = self.graphql(query_project).data.project # type: ignore if not project: raise ValueError( "Project {} not found. Run `prefect create project '{}'` to create it.".format( project_name, project_name ) ) serialized_flow = flow.serialize(build=build) # type: Any # Set Docker storage image in environment metadata if provided if isinstance(flow.storage, prefect.environments.storage.Docker): flow.environment.metadata["image"] = flow.storage.name serialized_flow = flow.serialize(build=False) # If no image ever set, default metadata to all_extras image on current version if not flow.environment.metadata.get("image"): version = prefect.__version__.split("+")[0] flow.environment.metadata[ "image" ] = f"prefecthq/prefect:all_extras-{version}" serialized_flow = flow.serialize(build=False) # verify that the serialized flow can be deserialized try: prefect.serialization.flow.FlowSchema().load(serialized_flow) except Exception as exc: raise ValueError( "Flow could not be deserialized successfully. Error was: {}".format( repr(exc) ) ) from exc if compressed: serialized_flow = compress(serialized_flow) res = self.graphql( create_mutation, variables=dict( input=dict( project_id=(project[0].id if project else None), serialized_flow=serialized_flow, set_schedule_active=set_schedule_active, version_group_id=version_group_id, ) ), retry_on_api_error=False, ) # type: Any flow_id = ( res.data.create_flow_from_compressed_string.id if compressed else res.data.create_flow.id ) if not no_url: # Generate direct link to Cloud flow flow_url = self.get_cloud_url("flow", flow_id) prefix = "└── " print("Flow URL: {}".format(flow_url)) # Extra information to improve visibility msg = ( f" {prefix}ID: {flow_id}\n" f" {prefix}Project: {project_name}\n" f" {prefix}Labels: {list(flow.environment.labels)}" ) print(msg) return flow_id def get_cloud_url(self, subdirectory: str, id: str, as_user: bool = True) -> str: """ Convenience method for creating Prefect Cloud URLs for a given subdirectory. Args: - subdirectory (str): the subdirectory to use (e.g., `"flow-run"`) - id (str): the ID of the page - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the URL corresponding to the appropriate base URL, tenant slug, subdirectory and ID Example: ```python from prefect import Client client = Client() client.get_cloud_url("flow-run", "424242-ca-94611-111-55") # returns "https://cloud.prefect.io/my-tenant-slug/flow-run/424242-ca-94611-111-55" ``` """ # Generate direct link to UI if prefect.config.backend == "cloud": tenant_slug = self.get_default_tenant_slug(as_user=as_user) else: tenant_slug = "" base_url = ( re.sub("api-", "", prefect.config.cloud.api) if re.search("api-", prefect.config.cloud.api) else re.sub("api", "cloud", prefect.config.cloud.api) ) full_url = prefect.config.cloud.api if tenant_slug: full_url = "/".join([base_url.rstrip("/"), tenant_slug, subdirectory, id]) elif prefect.config.backend == "server": full_url = "/".join([prefect.config.server.ui.endpoint, subdirectory, id]) return full_url def get_default_tenant_slug(self, as_user: bool = True) -> str: """ Get the default tenant slug for the currently authenticated user Args: - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the slug of the current default tenant for this user """ if as_user: query = { "query": {"user": {"default_membership": {"tenant": "slug"}}} } # type: dict else: query = {"query": {"tenant": {"slug"}}} res = self.graphql(query) if as_user: user = res.get("data").user[0] slug = user.default_membership.tenant.slug else: slug = res.get("data").tenant[0].slug return slug # MASKED: create_project function (lines 869-900) def create_flow_run( self, flow_id: str = None, context: dict = None, parameters: dict = None, scheduled_start_time: datetime.datetime = None, idempotency_key: str = None, run_name: str = None, version_group_id: str = None, ) -> str: """ Create a new flow run for the given flow id. If `start_time` is not provided, the flow run will be scheduled to start immediately. If both `flow_id` and `version_group_id` are provided, only the `flow_id` will be used. Args: - flow_id (str, optional): the id of the Flow you wish to schedule - context (dict, optional): the run context - parameters (dict, optional): a dictionary of parameter values to pass to the flow run - scheduled_start_time (datetime, optional): the time to schedule the execution for; if not provided, defaults to now - idempotency_key (str, optional): an idempotency key; if provided, this run will be cached for 24 hours. Any subsequent attempts to create a run with the same idempotency key will return the ID of the originally created run (no new run will be created after the first). An error will be raised if parameters or context are provided and don't match the original. Each subsequent request will reset the TTL for 24 hours. - run_name (str, optional): The name assigned to this flow run - version_group_id (str, optional): if provided, the unique unarchived flow within this version group will be scheduled to run. This input can be used as a stable API for running flows which are regularly updated. Returns: - str: the ID of the newly-created flow run Raises: - ClientError: if the GraphQL query is bad for any reason """ create_mutation = { "mutation($input: create_flow_run_input!)": { "create_flow_run(input: $input)": {"id": True} } } if not flow_id and not version_group_id: raise ValueError("One of flow_id or version_group_id must be provided") if flow_id: inputs = dict(flow_id=flow_id) else: inputs = dict(version_group_id=version_group_id) # type: ignore if parameters is not None: inputs.update(parameters=parameters) # type: ignore if context is not None: inputs.update(context=context) # type: ignore if idempotency_key is not None: inputs.update(idempotency_key=idempotency_key) # type: ignore if scheduled_start_time is not None: inputs.update( scheduled_start_time=scheduled_start_time.isoformat() ) # type: ignore if run_name is not None: inputs.update(flow_run_name=run_name) # type: ignore res = self.graphql(create_mutation, variables=dict(input=inputs)) return res.data.create_flow_run.id # type: ignore def get_flow_run_info(self, flow_run_id: str) -> FlowRunInfoResult: """ Retrieves version and current state information for the given flow run. Args: - flow_run_id (str): the id of the flow run to get information for Returns: - GraphQLResult: an object representing information about the flow run Raises: - ClientError: if the GraphQL mutation is bad for any reason """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "id": True, "name": True, "flow_id": True, "parameters": True, "context": True, "version": True, "scheduled_start_time": True, "serialized_state": True, # load all task runs except dynamic task runs with_args("task_runs", {"where": {"map_index": {"_eq": -1}}}): { "id": True, "task": {"id": True, "slug": True}, "version": True, "serialized_state": True, }, } } } result = self.graphql(query).data.flow_run_by_pk # type: ignore if result is None: raise ClientError('Flow run ID not found: "{}"'.format(flow_run_id)) # convert scheduled_start_time from string to datetime result.scheduled_start_time = pendulum.parse(result.scheduled_start_time) # create "state" attribute from serialized_state result.state = prefect.engine.state.State.deserialize( result.pop("serialized_state") ) # reformat task_runs task_runs = [] for tr in result.task_runs: tr.state = prefect.engine.state.State.deserialize( tr.pop("serialized_state") ) task_info = tr.pop("task") tr.task_id = task_info["id"] tr.task_slug = task_info["slug"] task_runs.append(TaskRunInfoResult(**tr)) result.task_runs = task_runs result.context = ( result.context.to_dict() if result.context is not None else None ) result.parameters = ( result.parameters.to_dict() if result.parameters is not None else None ) return FlowRunInfoResult(**result) def update_flow_run_heartbeat(self, flow_run_id: str) -> None: """ Convenience method for heartbeating a flow run. Does NOT raise an error if the update fails. Args: - flow_run_id (str): the flow run ID to heartbeat """ mutation = { "mutation": { with_args( "update_flow_run_heartbeat", {"input": {"flow_run_id": flow_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def update_task_run_heartbeat(self, task_run_id: str) -> None: """ Convenience method for heartbeating a task run. Does NOT raise an error if the update fails. Args: - task_run_id (str): the task run ID to heartbeat """ mutation = { "mutation": { with_args( "update_task_run_heartbeat", {"input": {"task_run_id": task_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def get_flow_run_state(self, flow_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a flow run. Args: - flow_run_id (str): the id for this flow run Returns: - State: a Prefect State object """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "serialized_state": True, } } } flow_run = self.graphql(query).data.flow_run_by_pk return prefect.engine.state.State.deserialize(flow_run.serialized_state) def set_flow_run_state( self, flow_run_id: str, state: "prefect.engine.state.State", version: int = None, ) -> "prefect.engine.state.State": """ Sets new state for a flow run in the database. Args: - flow_run_id (str): the id of the flow run to set state for - state (State): the new state for this flow run - version (int, optional): the current version of the flow run state. This is optional but it can be supplied to enforce version-locking. Returns: - State: the state the current flow run should be considered in Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation($input: set_flow_run_states_input!)": { "set_flow_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, flow_run_id=flow_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_flow_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def get_latest_cached_states( self, task_id: str, cache_key: Optional[str], created_after: datetime.datetime ) -> List["prefect.engine.state.State"]: """ Pulls all Cached states for the given task that were created after the provided date. Args: - task_id (str): the task id for this task run - cache_key (Optional[str]): the cache key for this Task's cache; if `None`, the task id alone will be used - created_after (datetime.datetime): the earliest date the state should have been created at Returns: - List[State]: a list of Cached states created after the given date """ args = { "where": { "state": {"_eq": "Cached"}, "state_timestamp": {"_gte": created_after.isoformat()}, }, "order_by": {"state_timestamp": EnumValue("desc")}, "limit": 100, } # type: Dict[str, Any] # if a cache key was provided, match it against all tasks if cache_key is not None: args["where"].update({"cache_key": {"_eq": cache_key}}) # otherwise match against only this task, across all cache keys else: args["where"].update({"task_id": {"_eq": task_id}}) query = {"query": {with_args("task_run", args): "serialized_state"}} result = self.graphql(query) # type: Any deserializer = prefect.engine.state.State.deserialize valid_states = [ deserializer(res.serialized_state) for res in result.data.task_run ] return valid_states def get_task_run_info( self, flow_run_id: str, task_id: str, map_index: Optional[int] = None ) -> TaskRunInfoResult: """ Retrieves version and current state information for the given task run. Args: - flow_run_id (str): the id of the flow run that this task run lives in - task_id (str): the task id for this task run - map_index (int, optional): the mapping index for this task run; if `None`, it is assumed this task is _not_ mapped Returns: - NamedTuple: a tuple containing `id, task_id, version, state` Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation": { with_args( "get_or_create_task_run", { "input": { "flow_run_id": flow_run_id, "task_id": task_id, "map_index": -1 if map_index is None else map_index, } }, ): { "id": True, } } } result = self.graphql(mutation) # type: Any if result is None: raise ClientError("Failed to create task run.") task_run_id = result.data.get_or_create_task_run.id query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "version": True, "serialized_state": True, "task": {"slug": True}, } } } task_run = self.graphql(query).data.task_run_by_pk # type: ignore if task_run is None: raise ClientError('Task run ID not found: "{}"'.format(task_run_id)) state = prefect.engine.state.State.deserialize(task_run.serialized_state) return TaskRunInfoResult( id=task_run_id, task_id=task_id, task_slug=task_run.task.slug, version=task_run.version, state=state, ) def set_task_run_name(self, task_run_id: str, name: str) -> bool: """ Set the name of a task run Args: - task_run_id (str): the id of a task run - name (str): a name for this task run Returns: - bool: whether or not the task run name was updated """ mutation = { "mutation($input: set_task_run_name_input!)": { "set_task_run_name(input: $input)": { "success": True, } } } result = self.graphql( mutation, variables=dict(input=dict(task_run_id=task_run_id, name=name)) ) return result.data.set_task_run_name.success def get_task_run_state(self, task_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a task run. Args: - task_run_id (str): the id for this task run Returns: - State: a Prefect State object """ query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "serialized_state": True, } } } task_run = self.graphql(query).data.task_run_by_pk return prefect.engine.state.State.deserialize(task_run.serialized_state) def set_task_run_state( self, task_run_id: str, state: "prefect.engine.state.State", version: int = None, cache_for: datetime.timedelta = None, ) -> "prefect.engine.state.State": """ Sets new state for a task run. Args: - task_run_id (str): the id of the task run to set state for - state (State): the new state for this task run - version (int, optional): the current version of the task run state. This is optional but it can be supplied to enforce version-locking. - cache_for (timedelta, optional): how long to store the result of this task for, using the serializer set in config; if not provided, no caching occurs Raises: - ClientError: if the GraphQL mutation is bad for any reason Returns: - State: the state the current task run should be considered in """ mutation = { "mutation($input: set_task_run_states_input!)": { "set_task_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, task_run_id=task_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_task_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def set_secret(self, name: str, value: Any) -> None: """ Set a secret with the given name and value. Args: - name (str): the name of the secret; used for retrieving the secret during task runs - value (Any): the value of the secret Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the secret-setting was unsuccessful """ mutation = { "mutation($input: set_secret_input!)": { "set_secret(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(name=name, value=value)) ) # type: Any if not result.data.set_secret.success: raise ValueError("Setting secret failed.") def get_task_tag_limit(self, tag: str) -> Optional[int]: """ Retrieve the current task tag concurrency limit for a given tag. Args: - tag (str): the tag to update Raises: - ClientError: if the GraphQL query fails """ query = { "query": { with_args("task_tag_limit", {"where": {"tag": {"_eq": tag}}}): { "limit": True } } } result = self.graphql(query) # type: Any if result.data.task_tag_limit: return result.data.task_tag_limit[0].limit else: return None def update_task_tag_limit(self, tag: str, limit: int) -> None: """ Update the task tag concurrency limit for a given tag; requires tenant admin permissions. Args: - tag (str): the tag to update - limit (int): the concurrency limit to enforce on the tag; should be a value >= 0 Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag limit-setting was unsuccessful, or if a bad limit was provided """ if limit < 0: raise ValueError("Concurrency limits must be >= 0") mutation = { "mutation($input: update_task_tag_limit_input!)": { "update_task_tag_limit(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict(input=dict(tag=tag, limit=limit)) ) # type: Any if not result.data.update_task_tag_limit.id: raise ValueError("Updating the task tag concurrency limit failed.") def delete_task_tag_limit(self, limit_id: str) -> None: """ Deletes a given task tag concurrency limit; requires tenant admin permissions. Args: - limit_id (str): the ID of the tag to delete Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag deletion was unsuccessful, or if a bad tag ID was provided """ mutation = { "mutation($input: delete_task_tag_limit_input!)": { "delete_task_tag_limit(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(limit_id=limit_id)) ) # type: Any if not result.data.delete_task_tag_limit.success: raise ValueError("Deleting the task tag concurrency limit failed.") def write_run_logs(self, logs: List[Dict]) -> None: """ Uploads a collection of logs to Cloud. Args: - logs (List[Dict]): a list of log entries to add Raises: - ValueError: if uploading the logs fail """ mutation = { "mutation($input: write_run_logs_input!)": { "write_run_logs(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(logs=logs)) ) # type: Any if not result.data.write_run_logs.success: raise ValueError("Writing logs failed.") def register_agent( self, agent_type: str, name: str = None, labels: List[str] = None, agent_config_id: str = None, ) -> str: """ Register an agent with a backend API Args: - agent_type (str): The type of agent being registered - name: (str, optional): The name of the agent being registered - labels (List[str], optional): A list of any present labels on the agent being registered - agent_config_id (str, optional): The ID of an agent configuration to register with Returns: - The agent ID as a string """ mutation = { "mutation($input: register_agent_input!)": { "register_agent(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict( input=dict( type=agent_type, name=name, labels=labels or [], tenant_id=self._active_tenant_id, agent_config_id=agent_config_id, ) ), ) if not result.data.register_agent.id: raise ValueError("Error registering agent") return result.data.register_agent.id def get_agent_config(self, agent_config_id: str) -> dict: """ Get agent config settings Args: - agent_config_id (str): The ID of an agent configuration to retrieve Returns: - dict: the agent configuration's `settings` """ query = { "query": { with_args( "agent_config", {"where": {"id": {"_eq": agent_config_id}}} ): {"settings": True} } } result = self.graphql(query) # type: Any return result.data.agent_config[0].settings
def create_project(self, project_name: str, project_description: str = None) -> str: """ Create a new Project Args: - project_name (str): the project that should contain this flow - project_description (str, optional): the project description Returns: - str: the ID of the newly-created project Raises: - ClientError: if the project creation failed """ project_mutation = { "mutation($input: create_project_input!)": { "create_project(input: $input)": {"id"} } } res = self.graphql( project_mutation, variables=dict( input=dict( name=project_name, description=project_description, tenant_id=self._active_tenant_id, ) ), ) # type: Any return res.data.create_project.id
869
900
import datetime import json import os import re import time import uuid import warnings from pathlib import Path from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Union from urllib.parse import urljoin # if simplejson is installed, `requests` defaults to using it instead of json # this allows the client to gracefully handle either json or simplejson try: from simplejson.errors import JSONDecodeError except ImportError: from json.decoder import JSONDecodeError import pendulum import toml from slugify import slugify import prefect from prefect.utilities.exceptions import ( AuthorizationError, ClientError, VersionLockError, ) from prefect.utilities.graphql import ( EnumValue, GraphQLResult, compress, parse_graphql, with_args, ) from prefect.utilities.logging import create_diagnostic_logger if TYPE_CHECKING: from prefect.core import Flow import requests JSONLike = Union[bool, dict, list, str, int, float, None] # type definitions for GraphQL results TaskRunInfoResult = NamedTuple( "TaskRunInfoResult", [ ("id", str), ("task_id", str), ("task_slug", str), ("version", int), ("state", "prefect.engine.state.State"), ], ) FlowRunInfoResult = NamedTuple( "FlowRunInfoResult", [ ("id", str), ("name", str), ("flow_id", str), ("parameters", Dict[str, Any]), ("context", Dict[str, Any]), ("version", int), ("scheduled_start_time", datetime.datetime), ("state", "prefect.engine.state.State"), ("task_runs", List[TaskRunInfoResult]), ], ) class Client: """ Client for communication with Prefect Cloud If the arguments aren't specified the client initialization first checks the prefect configuration and if the server is not set there it checks the current context. The token will only be present in the current context. Args: - api_server (str, optional): the URL to send all GraphQL requests to; if not provided, will be pulled from `cloud.graphql` config var - api_token (str, optional): a Prefect Cloud API token, taken from `config.cloud.auth_token` if not provided. If this token is USER-scoped, it may be used to log in to any tenant that the user is a member of. In that case, ephemeral JWTs will be loaded as necessary. Otherwise, the API token itself will be used as authorization. """ def __init__(self, api_server: str = None, api_token: str = None): self._access_token = None self._refresh_token = None self._access_token_expires_at = pendulum.now() self._active_tenant_id = None self._attached_headers = {} # type: Dict[str, str] self.logger = create_diagnostic_logger("Diagnostics") # store api server self.api_server = api_server or prefect.context.config.cloud.get("graphql") # store api token self._api_token = api_token or prefect.context.config.cloud.get( "auth_token", None ) if prefect.config.backend == "cloud": if not self._api_token: # if no api token was passed, attempt to load state from local storage settings = self._load_local_settings() self._api_token = settings.get("api_token") if self._api_token: self._active_tenant_id = settings.get("active_tenant_id") if self._active_tenant_id: try: self.login_to_tenant(tenant_id=self._active_tenant_id) except AuthorizationError: # if an authorization error is raised, then the token is invalid and should # be cleared self.logout_from_tenant() else: # TODO: Separate put this functionality and clean up initial tenant access handling if not self._active_tenant_id: tenant_info = self.graphql({"query": {"tenant": {"id"}}}) if tenant_info.data.tenant: self._active_tenant_id = tenant_info.data.tenant[0].id def create_tenant(self, name: str, slug: str = None) -> str: """ Creates a new tenant. Note this route only works when run against Prefect Server. Args: - name (str): the name of the tenant to create - slug (str, optional): the slug of the tenant to create; defaults to name Returns: - str: the ID of the newly created tenant, or the ID of the currently active tenant Raises: - ValueError: if run against Prefect Cloud """ if prefect.config.backend != "server": msg = "To create a tenant with Prefect Cloud, please signup at https://cloud.prefect.io/" raise ValueError(msg) if slug is None: slug = slugify(name) tenant_info = self.graphql( { "mutation($input: create_tenant_input!)": { "create_tenant(input: $input)": {"id"} } }, variables=dict(input=dict(name=name, slug=slug)), ) return tenant_info.data.create_tenant.id # ------------------------------------------------------------------------- # Utilities def get( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and GET request Args: - path (str): the path of the API url. For example, to GET http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the GET request to; defaults to `self.api_server` - headers (dict, optional): Headers to pass with the request - params (dict): GET parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="GET", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def post( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and POST request Args: - path (str): the path of the API url. For example, to POST http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the POST request to; defaults to `self.api_server` - headers(dict): headers to pass with the request - params (dict): POST parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="POST", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def graphql( self, query: Any, raise_on_error: bool = True, headers: Dict[str, str] = None, variables: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> GraphQLResult: """ Convenience function for running queries against the Prefect GraphQL API Args: - query (Any): A representation of a graphql query to be executed. It will be parsed by prefect.utilities.graphql.parse_graphql(). - raise_on_error (bool): if True, a `ClientError` will be raised if the GraphQL returns any `errors`. - headers (dict): any additional headers that should be passed as part of the request - variables (dict): Variables to be filled into a query with the key being equivalent to the variables that are accepted by the query - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Data returned from the GraphQL query Raises: - ClientError if there are errors raised by the GraphQL mutation """ result = self.post( path="", server=self.api_server, headers=headers, params=dict(query=parse_graphql(query), variables=json.dumps(variables)), token=token, retry_on_api_error=retry_on_api_error, ) if raise_on_error and "errors" in result: if "UNAUTHENTICATED" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif "Malformed Authorization header" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif ( result["errors"][0].get("extensions", {}).get("code") == "VERSION_LOCKING_ERROR" ): raise VersionLockError(result["errors"]) raise ClientError(result["errors"]) else: return GraphQLResult(result) # type: ignore def _send_request( self, session: "requests.Session", method: str, url: str, params: Dict[str, JSONLike] = None, headers: dict = None, ) -> "requests.models.Response": if prefect.context.config.cloud.get("diagnostics") is True: self.logger.debug(f"Preparing request to {url}") clean_headers = { head: re.sub("Bearer .*", "Bearer XXXX", val) for head, val in headers.items() # type: ignore } self.logger.debug(f"Headers: {clean_headers}") self.logger.debug(f"Request: {params}") start_time = time.time() if method == "GET": response = session.get(url, headers=headers, params=params, timeout=30) elif method == "POST": response = session.post(url, headers=headers, json=params, timeout=30) elif method == "DELETE": response = session.delete(url, headers=headers, timeout=30) else: raise ValueError("Invalid method: {}".format(method)) if prefect.context.config.cloud.get("diagnostics") is True: end_time = time.time() self.logger.debug(f"Response: {response.json()}") self.logger.debug( f"Request duration: {round(end_time - start_time, 4)} seconds" ) # Check if request returned a successful status response.raise_for_status() return response def _request( self, method: str, path: str, params: Dict[str, JSONLike] = None, server: str = None, headers: dict = None, token: str = None, retry_on_api_error: bool = True, ) -> "requests.models.Response": """ Runs any specified request (GET, POST, DELETE) against the server Args: - method (str): The type of request to be made (GET, POST, DELETE) - path (str): Path of the API URL - params (dict, optional): Parameters used for the request - server (str, optional): The server to make requests against, base API server is used if not specified - headers (dict, optional): Headers to pass with the request - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - requests.models.Response: The response returned from the request Raises: - ClientError: if the client token is not in the context (due to not being logged in) - ValueError: if a method is specified outside of the accepted GET, POST, DELETE - requests.HTTPError: if a status code is returned that is not `200` or `401` """ if server is None: server = self.api_server assert isinstance(server, str) # mypy assert if token is None: token = self.get_auth_token() # 'import requests' is expensive time-wise, we should do this just-in-time to keep # the 'import prefect' time low import requests url = urljoin(server, path.lstrip("/")).rstrip("/") params = params or {} headers = headers or {} if token: headers["Authorization"] = "Bearer {}".format(token) headers["X-PREFECT-CORE-VERSION"] = str(prefect.__version__) if self._attached_headers: headers.update(self._attached_headers) session = requests.Session() retry_total = 6 if prefect.config.backend == "cloud" else 1 retries = requests.packages.urllib3.util.retry.Retry( total=retry_total, backoff_factor=1, status_forcelist=[500, 502, 503, 504], method_whitelist=["DELETE", "GET", "POST"], ) session.mount("https://", requests.adapters.HTTPAdapter(max_retries=retries)) response = self._send_request( session=session, method=method, url=url, params=params, headers=headers ) # parse the response try: json_resp = response.json() except JSONDecodeError as exc: if prefect.config.backend == "cloud" and "Authorization" not in headers: raise ClientError( "Malformed response received from Cloud - please ensure that you " "have an API token properly configured." ) from exc else: raise ClientError("Malformed response received from API.") from exc # check if there was an API_ERROR code in the response if "API_ERROR" in str(json_resp.get("errors")) and retry_on_api_error: success, retry_count = False, 0 # retry up to six times while success is False and retry_count < 6: response = self._send_request( session=session, method=method, url=url, params=params, headers=headers, ) if "API_ERROR" in str(response.json().get("errors")): retry_count += 1 time.sleep(0.25 * (2 ** (retry_count - 1))) else: success = True return response def attach_headers(self, headers: dict) -> None: """ Set headers to be attached to this Client Args: - headers (dict): A dictionary of headers to attach to this client. These headers get added on to the existing dictionary of headers. """ self._attached_headers.update(headers) # ------------------------------------------------------------------------- # Auth # ------------------------------------------------------------------------- @property def _local_settings_path(self) -> Path: """ Returns the local settings directory corresponding to the current API servers """ path = "{home}/client/{server}".format( home=prefect.context.config.home_dir, server=slugify(self.api_server, regex_pattern=r"[^-\.a-z0-9]+"), ) return Path(os.path.expanduser(path)) / "settings.toml" def _save_local_settings(self, settings: dict) -> None: """ Writes settings to local storage """ self._local_settings_path.parent.mkdir(exist_ok=True, parents=True) with self._local_settings_path.open("w+") as f: toml.dump(settings, f) def _load_local_settings(self) -> dict: """ Loads settings from local storage """ if self._local_settings_path.exists(): with self._local_settings_path.open("r") as f: return toml.load(f) # type: ignore return {} def save_api_token(self) -> None: """ Saves the API token in local storage. """ settings = self._load_local_settings() settings["api_token"] = self._api_token self._save_local_settings(settings) def get_auth_token(self) -> str: """ Returns an auth token: - if no explicit access token is stored, returns the api token - if there is an access token: - if there's a refresh token and the access token expires in the next 30 seconds, then we refresh the access token and store the result - return the access token Returns: - str: the access token """ if not self._access_token: return self._api_token expiration = self._access_token_expires_at or pendulum.now() if self._refresh_token and pendulum.now().add(seconds=30) > expiration: self._refresh_access_token() return self._access_token def get_available_tenants(self) -> List[Dict]: """ Returns a list of available tenants. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - List[Dict]: a list of dictionaries containing the id, slug, and name of available tenants """ result = self.graphql( {"query": {"tenant(order_by: {slug: asc})": {"id", "slug", "name"}}}, # use the API token to see all available tenants token=self._api_token, ) # type: ignore return result.data.tenant # type: ignore def login_to_tenant(self, tenant_slug: str = None, tenant_id: str = None) -> bool: """ Log in to a specific tenant NOTE: this should only be called by users who have provided a USER-scoped API token. Args: - tenant_slug (str): the tenant's slug - tenant_id (str): the tenant's id Returns: - bool: True if the login was successful Raises: - ValueError: if at least one of `tenant_slug` or `tenant_id` isn't provided - ValueError: if the `tenant_id` is not a valid UUID - ValueError: if no matching tenants are found """ if tenant_slug is None and tenant_id is None: raise ValueError( "At least one of `tenant_slug` or `tenant_id` must be provided." ) elif tenant_id: try: uuid.UUID(tenant_id) except ValueError as exc: raise ValueError("The `tenant_id` must be a valid UUID.") from exc tenant = self.graphql( { "query($slug: String, $id: uuid)": { "tenant(where: {slug: { _eq: $slug }, id: { _eq: $id } })": {"id"} } }, variables=dict(slug=tenant_slug, id=tenant_id), # use the API token to query the tenant token=self._api_token, ) # type: ignore if not tenant.data.tenant: # type: ignore raise ValueError("No matching tenants found.") tenant_id = tenant.data.tenant[0].id # type: ignore if prefect.config.backend == "cloud": payload = self.graphql( { "mutation($input: switch_tenant_input!)": { "switch_tenant(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(tenant_id=tenant_id)), # Use the API token to switch tenants token=self._api_token, ) # type: ignore self._access_token = payload.data.switch_tenant.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.switch_tenant.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.switch_tenant.refresh_token # type: ignore self._active_tenant_id = tenant_id # save the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = self._active_tenant_id self._save_local_settings(settings) return True def logout_from_tenant(self) -> None: self._access_token = None self._refresh_token = None self._active_tenant_id = None # remove the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = None self._save_local_settings(settings) def _refresh_access_token(self) -> bool: """ Refresh the client's JWT access token. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - bool: True if the refresh succeeds """ payload = self.graphql( { "mutation($input: refresh_token_input!)": { "refresh_token(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(access_token=self._access_token)), # pass the refresh token as the auth header token=self._refresh_token, ) # type: ignore self._access_token = payload.data.refresh_token.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.refresh_token.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.refresh_token.refresh_token # type: ignore return True # ------------------------------------------------------------------------- # Actions # ------------------------------------------------------------------------- def register( self, flow: "Flow", project_name: str = None, build: bool = True, set_schedule_active: bool = True, version_group_id: str = None, compressed: bool = True, no_url: bool = False, ) -> str: """ Push a new flow to Prefect Cloud Args: - flow (Flow): a flow to register - project_name (str, optional): the project that should contain this flow. - build (bool, optional): if `True`, the flow's environment is built prior to serialization; defaults to `True` - set_schedule_active (bool, optional): if `False`, will set the schedule to inactive in the database to prevent auto-scheduling runs (if the Flow has a schedule). Defaults to `True`. This can be changed later. - version_group_id (str, optional): the UUID version group ID to use for versioning this Flow in Cloud; if not provided, the version group ID associated with this Flow's project and name will be used. - compressed (bool, optional): if `True`, the serialized flow will be; defaults to `True` compressed - no_url (bool, optional): if `True`, the stdout from this function will not contain the URL link to the newly-registered flow in the Cloud UI Returns: - str: the ID of the newly-registered flow Raises: - ClientError: if the register failed """ required_parameters = {p for p in flow.parameters() if p.required} if flow.schedule is not None and required_parameters: required_names = {p.name for p in required_parameters} if not all( [ required_names <= set(c.parameter_defaults.keys()) for c in flow.schedule.clocks ] ): raise ClientError( "Flows with required parameters can not be scheduled automatically." ) if any(e.key for e in flow.edges) and flow.result is None: warnings.warn( "No result handler was specified on your Flow. Cloud features such as " "input caching and resuming task runs from failure may not work properly.", stacklevel=2, ) if compressed: create_mutation = { "mutation($input: create_flow_from_compressed_string_input!)": { "create_flow_from_compressed_string(input: $input)": {"id"} } } else: create_mutation = { "mutation($input: create_flow_input!)": { "create_flow(input: $input)": {"id"} } } project = None if project_name is None: raise TypeError( "'project_name' is a required field when registering a flow." ) query_project = { "query": { with_args("project", {"where": {"name": {"_eq": project_name}}}): { "id": True } } } project = self.graphql(query_project).data.project # type: ignore if not project: raise ValueError( "Project {} not found. Run `prefect create project '{}'` to create it.".format( project_name, project_name ) ) serialized_flow = flow.serialize(build=build) # type: Any # Set Docker storage image in environment metadata if provided if isinstance(flow.storage, prefect.environments.storage.Docker): flow.environment.metadata["image"] = flow.storage.name serialized_flow = flow.serialize(build=False) # If no image ever set, default metadata to all_extras image on current version if not flow.environment.metadata.get("image"): version = prefect.__version__.split("+")[0] flow.environment.metadata[ "image" ] = f"prefecthq/prefect:all_extras-{version}" serialized_flow = flow.serialize(build=False) # verify that the serialized flow can be deserialized try: prefect.serialization.flow.FlowSchema().load(serialized_flow) except Exception as exc: raise ValueError( "Flow could not be deserialized successfully. Error was: {}".format( repr(exc) ) ) from exc if compressed: serialized_flow = compress(serialized_flow) res = self.graphql( create_mutation, variables=dict( input=dict( project_id=(project[0].id if project else None), serialized_flow=serialized_flow, set_schedule_active=set_schedule_active, version_group_id=version_group_id, ) ), retry_on_api_error=False, ) # type: Any flow_id = ( res.data.create_flow_from_compressed_string.id if compressed else res.data.create_flow.id ) if not no_url: # Generate direct link to Cloud flow flow_url = self.get_cloud_url("flow", flow_id) prefix = "└── " print("Flow URL: {}".format(flow_url)) # Extra information to improve visibility msg = ( f" {prefix}ID: {flow_id}\n" f" {prefix}Project: {project_name}\n" f" {prefix}Labels: {list(flow.environment.labels)}" ) print(msg) return flow_id def get_cloud_url(self, subdirectory: str, id: str, as_user: bool = True) -> str: """ Convenience method for creating Prefect Cloud URLs for a given subdirectory. Args: - subdirectory (str): the subdirectory to use (e.g., `"flow-run"`) - id (str): the ID of the page - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the URL corresponding to the appropriate base URL, tenant slug, subdirectory and ID Example: ```python from prefect import Client client = Client() client.get_cloud_url("flow-run", "424242-ca-94611-111-55") # returns "https://cloud.prefect.io/my-tenant-slug/flow-run/424242-ca-94611-111-55" ``` """ # Generate direct link to UI if prefect.config.backend == "cloud": tenant_slug = self.get_default_tenant_slug(as_user=as_user) else: tenant_slug = "" base_url = ( re.sub("api-", "", prefect.config.cloud.api) if re.search("api-", prefect.config.cloud.api) else re.sub("api", "cloud", prefect.config.cloud.api) ) full_url = prefect.config.cloud.api if tenant_slug: full_url = "/".join([base_url.rstrip("/"), tenant_slug, subdirectory, id]) elif prefect.config.backend == "server": full_url = "/".join([prefect.config.server.ui.endpoint, subdirectory, id]) return full_url def get_default_tenant_slug(self, as_user: bool = True) -> str: """ Get the default tenant slug for the currently authenticated user Args: - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the slug of the current default tenant for this user """ if as_user: query = { "query": {"user": {"default_membership": {"tenant": "slug"}}} } # type: dict else: query = {"query": {"tenant": {"slug"}}} res = self.graphql(query) if as_user: user = res.get("data").user[0] slug = user.default_membership.tenant.slug else: slug = res.get("data").tenant[0].slug return slug def create_project(self, project_name: str, project_description: str = None) -> str: """ Create a new Project Args: - project_name (str): the project that should contain this flow - project_description (str, optional): the project description Returns: - str: the ID of the newly-created project Raises: - ClientError: if the project creation failed """ project_mutation = { "mutation($input: create_project_input!)": { "create_project(input: $input)": {"id"} } } res = self.graphql( project_mutation, variables=dict( input=dict( name=project_name, description=project_description, tenant_id=self._active_tenant_id, ) ), ) # type: Any return res.data.create_project.id def create_flow_run( self, flow_id: str = None, context: dict = None, parameters: dict = None, scheduled_start_time: datetime.datetime = None, idempotency_key: str = None, run_name: str = None, version_group_id: str = None, ) -> str: """ Create a new flow run for the given flow id. If `start_time` is not provided, the flow run will be scheduled to start immediately. If both `flow_id` and `version_group_id` are provided, only the `flow_id` will be used. Args: - flow_id (str, optional): the id of the Flow you wish to schedule - context (dict, optional): the run context - parameters (dict, optional): a dictionary of parameter values to pass to the flow run - scheduled_start_time (datetime, optional): the time to schedule the execution for; if not provided, defaults to now - idempotency_key (str, optional): an idempotency key; if provided, this run will be cached for 24 hours. Any subsequent attempts to create a run with the same idempotency key will return the ID of the originally created run (no new run will be created after the first). An error will be raised if parameters or context are provided and don't match the original. Each subsequent request will reset the TTL for 24 hours. - run_name (str, optional): The name assigned to this flow run - version_group_id (str, optional): if provided, the unique unarchived flow within this version group will be scheduled to run. This input can be used as a stable API for running flows which are regularly updated. Returns: - str: the ID of the newly-created flow run Raises: - ClientError: if the GraphQL query is bad for any reason """ create_mutation = { "mutation($input: create_flow_run_input!)": { "create_flow_run(input: $input)": {"id": True} } } if not flow_id and not version_group_id: raise ValueError("One of flow_id or version_group_id must be provided") if flow_id: inputs = dict(flow_id=flow_id) else: inputs = dict(version_group_id=version_group_id) # type: ignore if parameters is not None: inputs.update(parameters=parameters) # type: ignore if context is not None: inputs.update(context=context) # type: ignore if idempotency_key is not None: inputs.update(idempotency_key=idempotency_key) # type: ignore if scheduled_start_time is not None: inputs.update( scheduled_start_time=scheduled_start_time.isoformat() ) # type: ignore if run_name is not None: inputs.update(flow_run_name=run_name) # type: ignore res = self.graphql(create_mutation, variables=dict(input=inputs)) return res.data.create_flow_run.id # type: ignore def get_flow_run_info(self, flow_run_id: str) -> FlowRunInfoResult: """ Retrieves version and current state information for the given flow run. Args: - flow_run_id (str): the id of the flow run to get information for Returns: - GraphQLResult: an object representing information about the flow run Raises: - ClientError: if the GraphQL mutation is bad for any reason """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "id": True, "name": True, "flow_id": True, "parameters": True, "context": True, "version": True, "scheduled_start_time": True, "serialized_state": True, # load all task runs except dynamic task runs with_args("task_runs", {"where": {"map_index": {"_eq": -1}}}): { "id": True, "task": {"id": True, "slug": True}, "version": True, "serialized_state": True, }, } } } result = self.graphql(query).data.flow_run_by_pk # type: ignore if result is None: raise ClientError('Flow run ID not found: "{}"'.format(flow_run_id)) # convert scheduled_start_time from string to datetime result.scheduled_start_time = pendulum.parse(result.scheduled_start_time) # create "state" attribute from serialized_state result.state = prefect.engine.state.State.deserialize( result.pop("serialized_state") ) # reformat task_runs task_runs = [] for tr in result.task_runs: tr.state = prefect.engine.state.State.deserialize( tr.pop("serialized_state") ) task_info = tr.pop("task") tr.task_id = task_info["id"] tr.task_slug = task_info["slug"] task_runs.append(TaskRunInfoResult(**tr)) result.task_runs = task_runs result.context = ( result.context.to_dict() if result.context is not None else None ) result.parameters = ( result.parameters.to_dict() if result.parameters is not None else None ) return FlowRunInfoResult(**result) def update_flow_run_heartbeat(self, flow_run_id: str) -> None: """ Convenience method for heartbeating a flow run. Does NOT raise an error if the update fails. Args: - flow_run_id (str): the flow run ID to heartbeat """ mutation = { "mutation": { with_args( "update_flow_run_heartbeat", {"input": {"flow_run_id": flow_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def update_task_run_heartbeat(self, task_run_id: str) -> None: """ Convenience method for heartbeating a task run. Does NOT raise an error if the update fails. Args: - task_run_id (str): the task run ID to heartbeat """ mutation = { "mutation": { with_args( "update_task_run_heartbeat", {"input": {"task_run_id": task_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def get_flow_run_state(self, flow_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a flow run. Args: - flow_run_id (str): the id for this flow run Returns: - State: a Prefect State object """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "serialized_state": True, } } } flow_run = self.graphql(query).data.flow_run_by_pk return prefect.engine.state.State.deserialize(flow_run.serialized_state) def set_flow_run_state( self, flow_run_id: str, state: "prefect.engine.state.State", version: int = None, ) -> "prefect.engine.state.State": """ Sets new state for a flow run in the database. Args: - flow_run_id (str): the id of the flow run to set state for - state (State): the new state for this flow run - version (int, optional): the current version of the flow run state. This is optional but it can be supplied to enforce version-locking. Returns: - State: the state the current flow run should be considered in Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation($input: set_flow_run_states_input!)": { "set_flow_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, flow_run_id=flow_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_flow_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def get_latest_cached_states( self, task_id: str, cache_key: Optional[str], created_after: datetime.datetime ) -> List["prefect.engine.state.State"]: """ Pulls all Cached states for the given task that were created after the provided date. Args: - task_id (str): the task id for this task run - cache_key (Optional[str]): the cache key for this Task's cache; if `None`, the task id alone will be used - created_after (datetime.datetime): the earliest date the state should have been created at Returns: - List[State]: a list of Cached states created after the given date """ args = { "where": { "state": {"_eq": "Cached"}, "state_timestamp": {"_gte": created_after.isoformat()}, }, "order_by": {"state_timestamp": EnumValue("desc")}, "limit": 100, } # type: Dict[str, Any] # if a cache key was provided, match it against all tasks if cache_key is not None: args["where"].update({"cache_key": {"_eq": cache_key}}) # otherwise match against only this task, across all cache keys else: args["where"].update({"task_id": {"_eq": task_id}}) query = {"query": {with_args("task_run", args): "serialized_state"}} result = self.graphql(query) # type: Any deserializer = prefect.engine.state.State.deserialize valid_states = [ deserializer(res.serialized_state) for res in result.data.task_run ] return valid_states def get_task_run_info( self, flow_run_id: str, task_id: str, map_index: Optional[int] = None ) -> TaskRunInfoResult: """ Retrieves version and current state information for the given task run. Args: - flow_run_id (str): the id of the flow run that this task run lives in - task_id (str): the task id for this task run - map_index (int, optional): the mapping index for this task run; if `None`, it is assumed this task is _not_ mapped Returns: - NamedTuple: a tuple containing `id, task_id, version, state` Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation": { with_args( "get_or_create_task_run", { "input": { "flow_run_id": flow_run_id, "task_id": task_id, "map_index": -1 if map_index is None else map_index, } }, ): { "id": True, } } } result = self.graphql(mutation) # type: Any if result is None: raise ClientError("Failed to create task run.") task_run_id = result.data.get_or_create_task_run.id query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "version": True, "serialized_state": True, "task": {"slug": True}, } } } task_run = self.graphql(query).data.task_run_by_pk # type: ignore if task_run is None: raise ClientError('Task run ID not found: "{}"'.format(task_run_id)) state = prefect.engine.state.State.deserialize(task_run.serialized_state) return TaskRunInfoResult( id=task_run_id, task_id=task_id, task_slug=task_run.task.slug, version=task_run.version, state=state, ) def set_task_run_name(self, task_run_id: str, name: str) -> bool: """ Set the name of a task run Args: - task_run_id (str): the id of a task run - name (str): a name for this task run Returns: - bool: whether or not the task run name was updated """ mutation = { "mutation($input: set_task_run_name_input!)": { "set_task_run_name(input: $input)": { "success": True, } } } result = self.graphql( mutation, variables=dict(input=dict(task_run_id=task_run_id, name=name)) ) return result.data.set_task_run_name.success def get_task_run_state(self, task_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a task run. Args: - task_run_id (str): the id for this task run Returns: - State: a Prefect State object """ query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "serialized_state": True, } } } task_run = self.graphql(query).data.task_run_by_pk return prefect.engine.state.State.deserialize(task_run.serialized_state) def set_task_run_state( self, task_run_id: str, state: "prefect.engine.state.State", version: int = None, cache_for: datetime.timedelta = None, ) -> "prefect.engine.state.State": """ Sets new state for a task run. Args: - task_run_id (str): the id of the task run to set state for - state (State): the new state for this task run - version (int, optional): the current version of the task run state. This is optional but it can be supplied to enforce version-locking. - cache_for (timedelta, optional): how long to store the result of this task for, using the serializer set in config; if not provided, no caching occurs Raises: - ClientError: if the GraphQL mutation is bad for any reason Returns: - State: the state the current task run should be considered in """ mutation = { "mutation($input: set_task_run_states_input!)": { "set_task_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, task_run_id=task_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_task_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def set_secret(self, name: str, value: Any) -> None: """ Set a secret with the given name and value. Args: - name (str): the name of the secret; used for retrieving the secret during task runs - value (Any): the value of the secret Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the secret-setting was unsuccessful """ mutation = { "mutation($input: set_secret_input!)": { "set_secret(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(name=name, value=value)) ) # type: Any if not result.data.set_secret.success: raise ValueError("Setting secret failed.") def get_task_tag_limit(self, tag: str) -> Optional[int]: """ Retrieve the current task tag concurrency limit for a given tag. Args: - tag (str): the tag to update Raises: - ClientError: if the GraphQL query fails """ query = { "query": { with_args("task_tag_limit", {"where": {"tag": {"_eq": tag}}}): { "limit": True } } } result = self.graphql(query) # type: Any if result.data.task_tag_limit: return result.data.task_tag_limit[0].limit else: return None def update_task_tag_limit(self, tag: str, limit: int) -> None: """ Update the task tag concurrency limit for a given tag; requires tenant admin permissions. Args: - tag (str): the tag to update - limit (int): the concurrency limit to enforce on the tag; should be a value >= 0 Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag limit-setting was unsuccessful, or if a bad limit was provided """ if limit < 0: raise ValueError("Concurrency limits must be >= 0") mutation = { "mutation($input: update_task_tag_limit_input!)": { "update_task_tag_limit(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict(input=dict(tag=tag, limit=limit)) ) # type: Any if not result.data.update_task_tag_limit.id: raise ValueError("Updating the task tag concurrency limit failed.") def delete_task_tag_limit(self, limit_id: str) -> None: """ Deletes a given task tag concurrency limit; requires tenant admin permissions. Args: - limit_id (str): the ID of the tag to delete Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag deletion was unsuccessful, or if a bad tag ID was provided """ mutation = { "mutation($input: delete_task_tag_limit_input!)": { "delete_task_tag_limit(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(limit_id=limit_id)) ) # type: Any if not result.data.delete_task_tag_limit.success: raise ValueError("Deleting the task tag concurrency limit failed.") def write_run_logs(self, logs: List[Dict]) -> None: """ Uploads a collection of logs to Cloud. Args: - logs (List[Dict]): a list of log entries to add Raises: - ValueError: if uploading the logs fail """ mutation = { "mutation($input: write_run_logs_input!)": { "write_run_logs(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(logs=logs)) ) # type: Any if not result.data.write_run_logs.success: raise ValueError("Writing logs failed.") def register_agent( self, agent_type: str, name: str = None, labels: List[str] = None, agent_config_id: str = None, ) -> str: """ Register an agent with a backend API Args: - agent_type (str): The type of agent being registered - name: (str, optional): The name of the agent being registered - labels (List[str], optional): A list of any present labels on the agent being registered - agent_config_id (str, optional): The ID of an agent configuration to register with Returns: - The agent ID as a string """ mutation = { "mutation($input: register_agent_input!)": { "register_agent(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict( input=dict( type=agent_type, name=name, labels=labels or [], tenant_id=self._active_tenant_id, agent_config_id=agent_config_id, ) ), ) if not result.data.register_agent.id: raise ValueError("Error registering agent") return result.data.register_agent.id def get_agent_config(self, agent_config_id: str) -> dict: """ Get agent config settings Args: - agent_config_id (str): The ID of an agent configuration to retrieve Returns: - dict: the agent configuration's `settings` """ query = { "query": { with_args( "agent_config", {"where": {"id": {"_eq": agent_config_id}}} ): {"settings": True} } } result = self.graphql(query) # type: Any return result.data.agent_config[0].settings
get_flow_run_state
Retrieves the current state for a flow run. Args: - flow_run_id (str): the id for this flow run Returns: - State: a Prefect State object
import datetime import json import os import re import time import uuid import warnings from pathlib import Path from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Union from urllib.parse import urljoin # if simplejson is installed, `requests` defaults to using it instead of json # this allows the client to gracefully handle either json or simplejson try: from simplejson.errors import JSONDecodeError except ImportError: from json.decoder import JSONDecodeError import pendulum import toml from slugify import slugify import prefect from prefect.utilities.exceptions import ( AuthorizationError, ClientError, VersionLockError, ) from prefect.utilities.graphql import ( EnumValue, GraphQLResult, compress, parse_graphql, with_args, ) from prefect.utilities.logging import create_diagnostic_logger if TYPE_CHECKING: from prefect.core import Flow import requests JSONLike = Union[bool, dict, list, str, int, float, None] # type definitions for GraphQL results TaskRunInfoResult = NamedTuple( "TaskRunInfoResult", [ ("id", str), ("task_id", str), ("task_slug", str), ("version", int), ("state", "prefect.engine.state.State"), ], ) FlowRunInfoResult = NamedTuple( "FlowRunInfoResult", [ ("id", str), ("name", str), ("flow_id", str), ("parameters", Dict[str, Any]), ("context", Dict[str, Any]), ("version", int), ("scheduled_start_time", datetime.datetime), ("state", "prefect.engine.state.State"), ("task_runs", List[TaskRunInfoResult]), ], ) class Client: """ Client for communication with Prefect Cloud If the arguments aren't specified the client initialization first checks the prefect configuration and if the server is not set there it checks the current context. The token will only be present in the current context. Args: - api_server (str, optional): the URL to send all GraphQL requests to; if not provided, will be pulled from `cloud.graphql` config var - api_token (str, optional): a Prefect Cloud API token, taken from `config.cloud.auth_token` if not provided. If this token is USER-scoped, it may be used to log in to any tenant that the user is a member of. In that case, ephemeral JWTs will be loaded as necessary. Otherwise, the API token itself will be used as authorization. """ def __init__(self, api_server: str = None, api_token: str = None): self._access_token = None self._refresh_token = None self._access_token_expires_at = pendulum.now() self._active_tenant_id = None self._attached_headers = {} # type: Dict[str, str] self.logger = create_diagnostic_logger("Diagnostics") # store api server self.api_server = api_server or prefect.context.config.cloud.get("graphql") # store api token self._api_token = api_token or prefect.context.config.cloud.get( "auth_token", None ) if prefect.config.backend == "cloud": if not self._api_token: # if no api token was passed, attempt to load state from local storage settings = self._load_local_settings() self._api_token = settings.get("api_token") if self._api_token: self._active_tenant_id = settings.get("active_tenant_id") if self._active_tenant_id: try: self.login_to_tenant(tenant_id=self._active_tenant_id) except AuthorizationError: # if an authorization error is raised, then the token is invalid and should # be cleared self.logout_from_tenant() else: # TODO: Separate put this functionality and clean up initial tenant access handling if not self._active_tenant_id: tenant_info = self.graphql({"query": {"tenant": {"id"}}}) if tenant_info.data.tenant: self._active_tenant_id = tenant_info.data.tenant[0].id def create_tenant(self, name: str, slug: str = None) -> str: """ Creates a new tenant. Note this route only works when run against Prefect Server. Args: - name (str): the name of the tenant to create - slug (str, optional): the slug of the tenant to create; defaults to name Returns: - str: the ID of the newly created tenant, or the ID of the currently active tenant Raises: - ValueError: if run against Prefect Cloud """ if prefect.config.backend != "server": msg = "To create a tenant with Prefect Cloud, please signup at https://cloud.prefect.io/" raise ValueError(msg) if slug is None: slug = slugify(name) tenant_info = self.graphql( { "mutation($input: create_tenant_input!)": { "create_tenant(input: $input)": {"id"} } }, variables=dict(input=dict(name=name, slug=slug)), ) return tenant_info.data.create_tenant.id # ------------------------------------------------------------------------- # Utilities def get( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and GET request Args: - path (str): the path of the API url. For example, to GET http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the GET request to; defaults to `self.api_server` - headers (dict, optional): Headers to pass with the request - params (dict): GET parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="GET", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def post( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and POST request Args: - path (str): the path of the API url. For example, to POST http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the POST request to; defaults to `self.api_server` - headers(dict): headers to pass with the request - params (dict): POST parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="POST", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def graphql( self, query: Any, raise_on_error: bool = True, headers: Dict[str, str] = None, variables: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> GraphQLResult: """ Convenience function for running queries against the Prefect GraphQL API Args: - query (Any): A representation of a graphql query to be executed. It will be parsed by prefect.utilities.graphql.parse_graphql(). - raise_on_error (bool): if True, a `ClientError` will be raised if the GraphQL returns any `errors`. - headers (dict): any additional headers that should be passed as part of the request - variables (dict): Variables to be filled into a query with the key being equivalent to the variables that are accepted by the query - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Data returned from the GraphQL query Raises: - ClientError if there are errors raised by the GraphQL mutation """ result = self.post( path="", server=self.api_server, headers=headers, params=dict(query=parse_graphql(query), variables=json.dumps(variables)), token=token, retry_on_api_error=retry_on_api_error, ) if raise_on_error and "errors" in result: if "UNAUTHENTICATED" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif "Malformed Authorization header" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif ( result["errors"][0].get("extensions", {}).get("code") == "VERSION_LOCKING_ERROR" ): raise VersionLockError(result["errors"]) raise ClientError(result["errors"]) else: return GraphQLResult(result) # type: ignore def _send_request( self, session: "requests.Session", method: str, url: str, params: Dict[str, JSONLike] = None, headers: dict = None, ) -> "requests.models.Response": if prefect.context.config.cloud.get("diagnostics") is True: self.logger.debug(f"Preparing request to {url}") clean_headers = { head: re.sub("Bearer .*", "Bearer XXXX", val) for head, val in headers.items() # type: ignore } self.logger.debug(f"Headers: {clean_headers}") self.logger.debug(f"Request: {params}") start_time = time.time() if method == "GET": response = session.get(url, headers=headers, params=params, timeout=30) elif method == "POST": response = session.post(url, headers=headers, json=params, timeout=30) elif method == "DELETE": response = session.delete(url, headers=headers, timeout=30) else: raise ValueError("Invalid method: {}".format(method)) if prefect.context.config.cloud.get("diagnostics") is True: end_time = time.time() self.logger.debug(f"Response: {response.json()}") self.logger.debug( f"Request duration: {round(end_time - start_time, 4)} seconds" ) # Check if request returned a successful status response.raise_for_status() return response def _request( self, method: str, path: str, params: Dict[str, JSONLike] = None, server: str = None, headers: dict = None, token: str = None, retry_on_api_error: bool = True, ) -> "requests.models.Response": """ Runs any specified request (GET, POST, DELETE) against the server Args: - method (str): The type of request to be made (GET, POST, DELETE) - path (str): Path of the API URL - params (dict, optional): Parameters used for the request - server (str, optional): The server to make requests against, base API server is used if not specified - headers (dict, optional): Headers to pass with the request - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - requests.models.Response: The response returned from the request Raises: - ClientError: if the client token is not in the context (due to not being logged in) - ValueError: if a method is specified outside of the accepted GET, POST, DELETE - requests.HTTPError: if a status code is returned that is not `200` or `401` """ if server is None: server = self.api_server assert isinstance(server, str) # mypy assert if token is None: token = self.get_auth_token() # 'import requests' is expensive time-wise, we should do this just-in-time to keep # the 'import prefect' time low import requests url = urljoin(server, path.lstrip("/")).rstrip("/") params = params or {} headers = headers or {} if token: headers["Authorization"] = "Bearer {}".format(token) headers["X-PREFECT-CORE-VERSION"] = str(prefect.__version__) if self._attached_headers: headers.update(self._attached_headers) session = requests.Session() retry_total = 6 if prefect.config.backend == "cloud" else 1 retries = requests.packages.urllib3.util.retry.Retry( total=retry_total, backoff_factor=1, status_forcelist=[500, 502, 503, 504], method_whitelist=["DELETE", "GET", "POST"], ) session.mount("https://", requests.adapters.HTTPAdapter(max_retries=retries)) response = self._send_request( session=session, method=method, url=url, params=params, headers=headers ) # parse the response try: json_resp = response.json() except JSONDecodeError as exc: if prefect.config.backend == "cloud" and "Authorization" not in headers: raise ClientError( "Malformed response received from Cloud - please ensure that you " "have an API token properly configured." ) from exc else: raise ClientError("Malformed response received from API.") from exc # check if there was an API_ERROR code in the response if "API_ERROR" in str(json_resp.get("errors")) and retry_on_api_error: success, retry_count = False, 0 # retry up to six times while success is False and retry_count < 6: response = self._send_request( session=session, method=method, url=url, params=params, headers=headers, ) if "API_ERROR" in str(response.json().get("errors")): retry_count += 1 time.sleep(0.25 * (2 ** (retry_count - 1))) else: success = True return response def attach_headers(self, headers: dict) -> None: """ Set headers to be attached to this Client Args: - headers (dict): A dictionary of headers to attach to this client. These headers get added on to the existing dictionary of headers. """ self._attached_headers.update(headers) # ------------------------------------------------------------------------- # Auth # ------------------------------------------------------------------------- @property def _local_settings_path(self) -> Path: """ Returns the local settings directory corresponding to the current API servers """ path = "{home}/client/{server}".format( home=prefect.context.config.home_dir, server=slugify(self.api_server, regex_pattern=r"[^-\.a-z0-9]+"), ) return Path(os.path.expanduser(path)) / "settings.toml" def _save_local_settings(self, settings: dict) -> None: """ Writes settings to local storage """ self._local_settings_path.parent.mkdir(exist_ok=True, parents=True) with self._local_settings_path.open("w+") as f: toml.dump(settings, f) def _load_local_settings(self) -> dict: """ Loads settings from local storage """ if self._local_settings_path.exists(): with self._local_settings_path.open("r") as f: return toml.load(f) # type: ignore return {} def save_api_token(self) -> None: """ Saves the API token in local storage. """ settings = self._load_local_settings() settings["api_token"] = self._api_token self._save_local_settings(settings) def get_auth_token(self) -> str: """ Returns an auth token: - if no explicit access token is stored, returns the api token - if there is an access token: - if there's a refresh token and the access token expires in the next 30 seconds, then we refresh the access token and store the result - return the access token Returns: - str: the access token """ if not self._access_token: return self._api_token expiration = self._access_token_expires_at or pendulum.now() if self._refresh_token and pendulum.now().add(seconds=30) > expiration: self._refresh_access_token() return self._access_token def get_available_tenants(self) -> List[Dict]: """ Returns a list of available tenants. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - List[Dict]: a list of dictionaries containing the id, slug, and name of available tenants """ result = self.graphql( {"query": {"tenant(order_by: {slug: asc})": {"id", "slug", "name"}}}, # use the API token to see all available tenants token=self._api_token, ) # type: ignore return result.data.tenant # type: ignore def login_to_tenant(self, tenant_slug: str = None, tenant_id: str = None) -> bool: """ Log in to a specific tenant NOTE: this should only be called by users who have provided a USER-scoped API token. Args: - tenant_slug (str): the tenant's slug - tenant_id (str): the tenant's id Returns: - bool: True if the login was successful Raises: - ValueError: if at least one of `tenant_slug` or `tenant_id` isn't provided - ValueError: if the `tenant_id` is not a valid UUID - ValueError: if no matching tenants are found """ if tenant_slug is None and tenant_id is None: raise ValueError( "At least one of `tenant_slug` or `tenant_id` must be provided." ) elif tenant_id: try: uuid.UUID(tenant_id) except ValueError as exc: raise ValueError("The `tenant_id` must be a valid UUID.") from exc tenant = self.graphql( { "query($slug: String, $id: uuid)": { "tenant(where: {slug: { _eq: $slug }, id: { _eq: $id } })": {"id"} } }, variables=dict(slug=tenant_slug, id=tenant_id), # use the API token to query the tenant token=self._api_token, ) # type: ignore if not tenant.data.tenant: # type: ignore raise ValueError("No matching tenants found.") tenant_id = tenant.data.tenant[0].id # type: ignore if prefect.config.backend == "cloud": payload = self.graphql( { "mutation($input: switch_tenant_input!)": { "switch_tenant(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(tenant_id=tenant_id)), # Use the API token to switch tenants token=self._api_token, ) # type: ignore self._access_token = payload.data.switch_tenant.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.switch_tenant.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.switch_tenant.refresh_token # type: ignore self._active_tenant_id = tenant_id # save the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = self._active_tenant_id self._save_local_settings(settings) return True def logout_from_tenant(self) -> None: self._access_token = None self._refresh_token = None self._active_tenant_id = None # remove the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = None self._save_local_settings(settings) def _refresh_access_token(self) -> bool: """ Refresh the client's JWT access token. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - bool: True if the refresh succeeds """ payload = self.graphql( { "mutation($input: refresh_token_input!)": { "refresh_token(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(access_token=self._access_token)), # pass the refresh token as the auth header token=self._refresh_token, ) # type: ignore self._access_token = payload.data.refresh_token.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.refresh_token.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.refresh_token.refresh_token # type: ignore return True # ------------------------------------------------------------------------- # Actions # ------------------------------------------------------------------------- def register( self, flow: "Flow", project_name: str = None, build: bool = True, set_schedule_active: bool = True, version_group_id: str = None, compressed: bool = True, no_url: bool = False, ) -> str: """ Push a new flow to Prefect Cloud Args: - flow (Flow): a flow to register - project_name (str, optional): the project that should contain this flow. - build (bool, optional): if `True`, the flow's environment is built prior to serialization; defaults to `True` - set_schedule_active (bool, optional): if `False`, will set the schedule to inactive in the database to prevent auto-scheduling runs (if the Flow has a schedule). Defaults to `True`. This can be changed later. - version_group_id (str, optional): the UUID version group ID to use for versioning this Flow in Cloud; if not provided, the version group ID associated with this Flow's project and name will be used. - compressed (bool, optional): if `True`, the serialized flow will be; defaults to `True` compressed - no_url (bool, optional): if `True`, the stdout from this function will not contain the URL link to the newly-registered flow in the Cloud UI Returns: - str: the ID of the newly-registered flow Raises: - ClientError: if the register failed """ required_parameters = {p for p in flow.parameters() if p.required} if flow.schedule is not None and required_parameters: required_names = {p.name for p in required_parameters} if not all( [ required_names <= set(c.parameter_defaults.keys()) for c in flow.schedule.clocks ] ): raise ClientError( "Flows with required parameters can not be scheduled automatically." ) if any(e.key for e in flow.edges) and flow.result is None: warnings.warn( "No result handler was specified on your Flow. Cloud features such as " "input caching and resuming task runs from failure may not work properly.", stacklevel=2, ) if compressed: create_mutation = { "mutation($input: create_flow_from_compressed_string_input!)": { "create_flow_from_compressed_string(input: $input)": {"id"} } } else: create_mutation = { "mutation($input: create_flow_input!)": { "create_flow(input: $input)": {"id"} } } project = None if project_name is None: raise TypeError( "'project_name' is a required field when registering a flow." ) query_project = { "query": { with_args("project", {"where": {"name": {"_eq": project_name}}}): { "id": True } } } project = self.graphql(query_project).data.project # type: ignore if not project: raise ValueError( "Project {} not found. Run `prefect create project '{}'` to create it.".format( project_name, project_name ) ) serialized_flow = flow.serialize(build=build) # type: Any # Set Docker storage image in environment metadata if provided if isinstance(flow.storage, prefect.environments.storage.Docker): flow.environment.metadata["image"] = flow.storage.name serialized_flow = flow.serialize(build=False) # If no image ever set, default metadata to all_extras image on current version if not flow.environment.metadata.get("image"): version = prefect.__version__.split("+")[0] flow.environment.metadata[ "image" ] = f"prefecthq/prefect:all_extras-{version}" serialized_flow = flow.serialize(build=False) # verify that the serialized flow can be deserialized try: prefect.serialization.flow.FlowSchema().load(serialized_flow) except Exception as exc: raise ValueError( "Flow could not be deserialized successfully. Error was: {}".format( repr(exc) ) ) from exc if compressed: serialized_flow = compress(serialized_flow) res = self.graphql( create_mutation, variables=dict( input=dict( project_id=(project[0].id if project else None), serialized_flow=serialized_flow, set_schedule_active=set_schedule_active, version_group_id=version_group_id, ) ), retry_on_api_error=False, ) # type: Any flow_id = ( res.data.create_flow_from_compressed_string.id if compressed else res.data.create_flow.id ) if not no_url: # Generate direct link to Cloud flow flow_url = self.get_cloud_url("flow", flow_id) prefix = "└── " print("Flow URL: {}".format(flow_url)) # Extra information to improve visibility msg = ( f" {prefix}ID: {flow_id}\n" f" {prefix}Project: {project_name}\n" f" {prefix}Labels: {list(flow.environment.labels)}" ) print(msg) return flow_id def get_cloud_url(self, subdirectory: str, id: str, as_user: bool = True) -> str: """ Convenience method for creating Prefect Cloud URLs for a given subdirectory. Args: - subdirectory (str): the subdirectory to use (e.g., `"flow-run"`) - id (str): the ID of the page - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the URL corresponding to the appropriate base URL, tenant slug, subdirectory and ID Example: ```python from prefect import Client client = Client() client.get_cloud_url("flow-run", "424242-ca-94611-111-55") # returns "https://cloud.prefect.io/my-tenant-slug/flow-run/424242-ca-94611-111-55" ``` """ # Generate direct link to UI if prefect.config.backend == "cloud": tenant_slug = self.get_default_tenant_slug(as_user=as_user) else: tenant_slug = "" base_url = ( re.sub("api-", "", prefect.config.cloud.api) if re.search("api-", prefect.config.cloud.api) else re.sub("api", "cloud", prefect.config.cloud.api) ) full_url = prefect.config.cloud.api if tenant_slug: full_url = "/".join([base_url.rstrip("/"), tenant_slug, subdirectory, id]) elif prefect.config.backend == "server": full_url = "/".join([prefect.config.server.ui.endpoint, subdirectory, id]) return full_url def get_default_tenant_slug(self, as_user: bool = True) -> str: """ Get the default tenant slug for the currently authenticated user Args: - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the slug of the current default tenant for this user """ if as_user: query = { "query": {"user": {"default_membership": {"tenant": "slug"}}} } # type: dict else: query = {"query": {"tenant": {"slug"}}} res = self.graphql(query) if as_user: user = res.get("data").user[0] slug = user.default_membership.tenant.slug else: slug = res.get("data").tenant[0].slug return slug def create_project(self, project_name: str, project_description: str = None) -> str: """ Create a new Project Args: - project_name (str): the project that should contain this flow - project_description (str, optional): the project description Returns: - str: the ID of the newly-created project Raises: - ClientError: if the project creation failed """ project_mutation = { "mutation($input: create_project_input!)": { "create_project(input: $input)": {"id"} } } res = self.graphql( project_mutation, variables=dict( input=dict( name=project_name, description=project_description, tenant_id=self._active_tenant_id, ) ), ) # type: Any return res.data.create_project.id def create_flow_run( self, flow_id: str = None, context: dict = None, parameters: dict = None, scheduled_start_time: datetime.datetime = None, idempotency_key: str = None, run_name: str = None, version_group_id: str = None, ) -> str: """ Create a new flow run for the given flow id. If `start_time` is not provided, the flow run will be scheduled to start immediately. If both `flow_id` and `version_group_id` are provided, only the `flow_id` will be used. Args: - flow_id (str, optional): the id of the Flow you wish to schedule - context (dict, optional): the run context - parameters (dict, optional): a dictionary of parameter values to pass to the flow run - scheduled_start_time (datetime, optional): the time to schedule the execution for; if not provided, defaults to now - idempotency_key (str, optional): an idempotency key; if provided, this run will be cached for 24 hours. Any subsequent attempts to create a run with the same idempotency key will return the ID of the originally created run (no new run will be created after the first). An error will be raised if parameters or context are provided and don't match the original. Each subsequent request will reset the TTL for 24 hours. - run_name (str, optional): The name assigned to this flow run - version_group_id (str, optional): if provided, the unique unarchived flow within this version group will be scheduled to run. This input can be used as a stable API for running flows which are regularly updated. Returns: - str: the ID of the newly-created flow run Raises: - ClientError: if the GraphQL query is bad for any reason """ create_mutation = { "mutation($input: create_flow_run_input!)": { "create_flow_run(input: $input)": {"id": True} } } if not flow_id and not version_group_id: raise ValueError("One of flow_id or version_group_id must be provided") if flow_id: inputs = dict(flow_id=flow_id) else: inputs = dict(version_group_id=version_group_id) # type: ignore if parameters is not None: inputs.update(parameters=parameters) # type: ignore if context is not None: inputs.update(context=context) # type: ignore if idempotency_key is not None: inputs.update(idempotency_key=idempotency_key) # type: ignore if scheduled_start_time is not None: inputs.update( scheduled_start_time=scheduled_start_time.isoformat() ) # type: ignore if run_name is not None: inputs.update(flow_run_name=run_name) # type: ignore res = self.graphql(create_mutation, variables=dict(input=inputs)) return res.data.create_flow_run.id # type: ignore def get_flow_run_info(self, flow_run_id: str) -> FlowRunInfoResult: """ Retrieves version and current state information for the given flow run. Args: - flow_run_id (str): the id of the flow run to get information for Returns: - GraphQLResult: an object representing information about the flow run Raises: - ClientError: if the GraphQL mutation is bad for any reason """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "id": True, "name": True, "flow_id": True, "parameters": True, "context": True, "version": True, "scheduled_start_time": True, "serialized_state": True, # load all task runs except dynamic task runs with_args("task_runs", {"where": {"map_index": {"_eq": -1}}}): { "id": True, "task": {"id": True, "slug": True}, "version": True, "serialized_state": True, }, } } } result = self.graphql(query).data.flow_run_by_pk # type: ignore if result is None: raise ClientError('Flow run ID not found: "{}"'.format(flow_run_id)) # convert scheduled_start_time from string to datetime result.scheduled_start_time = pendulum.parse(result.scheduled_start_time) # create "state" attribute from serialized_state result.state = prefect.engine.state.State.deserialize( result.pop("serialized_state") ) # reformat task_runs task_runs = [] for tr in result.task_runs: tr.state = prefect.engine.state.State.deserialize( tr.pop("serialized_state") ) task_info = tr.pop("task") tr.task_id = task_info["id"] tr.task_slug = task_info["slug"] task_runs.append(TaskRunInfoResult(**tr)) result.task_runs = task_runs result.context = ( result.context.to_dict() if result.context is not None else None ) result.parameters = ( result.parameters.to_dict() if result.parameters is not None else None ) return FlowRunInfoResult(**result) def update_flow_run_heartbeat(self, flow_run_id: str) -> None: """ Convenience method for heartbeating a flow run. Does NOT raise an error if the update fails. Args: - flow_run_id (str): the flow run ID to heartbeat """ mutation = { "mutation": { with_args( "update_flow_run_heartbeat", {"input": {"flow_run_id": flow_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def update_task_run_heartbeat(self, task_run_id: str) -> None: """ Convenience method for heartbeating a task run. Does NOT raise an error if the update fails. Args: - task_run_id (str): the task run ID to heartbeat """ mutation = { "mutation": { with_args( "update_task_run_heartbeat", {"input": {"task_run_id": task_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) # MASKED: get_flow_run_state function (lines 1072-1092) def set_flow_run_state( self, flow_run_id: str, state: "prefect.engine.state.State", version: int = None, ) -> "prefect.engine.state.State": """ Sets new state for a flow run in the database. Args: - flow_run_id (str): the id of the flow run to set state for - state (State): the new state for this flow run - version (int, optional): the current version of the flow run state. This is optional but it can be supplied to enforce version-locking. Returns: - State: the state the current flow run should be considered in Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation($input: set_flow_run_states_input!)": { "set_flow_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, flow_run_id=flow_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_flow_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def get_latest_cached_states( self, task_id: str, cache_key: Optional[str], created_after: datetime.datetime ) -> List["prefect.engine.state.State"]: """ Pulls all Cached states for the given task that were created after the provided date. Args: - task_id (str): the task id for this task run - cache_key (Optional[str]): the cache key for this Task's cache; if `None`, the task id alone will be used - created_after (datetime.datetime): the earliest date the state should have been created at Returns: - List[State]: a list of Cached states created after the given date """ args = { "where": { "state": {"_eq": "Cached"}, "state_timestamp": {"_gte": created_after.isoformat()}, }, "order_by": {"state_timestamp": EnumValue("desc")}, "limit": 100, } # type: Dict[str, Any] # if a cache key was provided, match it against all tasks if cache_key is not None: args["where"].update({"cache_key": {"_eq": cache_key}}) # otherwise match against only this task, across all cache keys else: args["where"].update({"task_id": {"_eq": task_id}}) query = {"query": {with_args("task_run", args): "serialized_state"}} result = self.graphql(query) # type: Any deserializer = prefect.engine.state.State.deserialize valid_states = [ deserializer(res.serialized_state) for res in result.data.task_run ] return valid_states def get_task_run_info( self, flow_run_id: str, task_id: str, map_index: Optional[int] = None ) -> TaskRunInfoResult: """ Retrieves version and current state information for the given task run. Args: - flow_run_id (str): the id of the flow run that this task run lives in - task_id (str): the task id for this task run - map_index (int, optional): the mapping index for this task run; if `None`, it is assumed this task is _not_ mapped Returns: - NamedTuple: a tuple containing `id, task_id, version, state` Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation": { with_args( "get_or_create_task_run", { "input": { "flow_run_id": flow_run_id, "task_id": task_id, "map_index": -1 if map_index is None else map_index, } }, ): { "id": True, } } } result = self.graphql(mutation) # type: Any if result is None: raise ClientError("Failed to create task run.") task_run_id = result.data.get_or_create_task_run.id query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "version": True, "serialized_state": True, "task": {"slug": True}, } } } task_run = self.graphql(query).data.task_run_by_pk # type: ignore if task_run is None: raise ClientError('Task run ID not found: "{}"'.format(task_run_id)) state = prefect.engine.state.State.deserialize(task_run.serialized_state) return TaskRunInfoResult( id=task_run_id, task_id=task_id, task_slug=task_run.task.slug, version=task_run.version, state=state, ) def set_task_run_name(self, task_run_id: str, name: str) -> bool: """ Set the name of a task run Args: - task_run_id (str): the id of a task run - name (str): a name for this task run Returns: - bool: whether or not the task run name was updated """ mutation = { "mutation($input: set_task_run_name_input!)": { "set_task_run_name(input: $input)": { "success": True, } } } result = self.graphql( mutation, variables=dict(input=dict(task_run_id=task_run_id, name=name)) ) return result.data.set_task_run_name.success def get_task_run_state(self, task_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a task run. Args: - task_run_id (str): the id for this task run Returns: - State: a Prefect State object """ query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "serialized_state": True, } } } task_run = self.graphql(query).data.task_run_by_pk return prefect.engine.state.State.deserialize(task_run.serialized_state) def set_task_run_state( self, task_run_id: str, state: "prefect.engine.state.State", version: int = None, cache_for: datetime.timedelta = None, ) -> "prefect.engine.state.State": """ Sets new state for a task run. Args: - task_run_id (str): the id of the task run to set state for - state (State): the new state for this task run - version (int, optional): the current version of the task run state. This is optional but it can be supplied to enforce version-locking. - cache_for (timedelta, optional): how long to store the result of this task for, using the serializer set in config; if not provided, no caching occurs Raises: - ClientError: if the GraphQL mutation is bad for any reason Returns: - State: the state the current task run should be considered in """ mutation = { "mutation($input: set_task_run_states_input!)": { "set_task_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, task_run_id=task_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_task_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def set_secret(self, name: str, value: Any) -> None: """ Set a secret with the given name and value. Args: - name (str): the name of the secret; used for retrieving the secret during task runs - value (Any): the value of the secret Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the secret-setting was unsuccessful """ mutation = { "mutation($input: set_secret_input!)": { "set_secret(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(name=name, value=value)) ) # type: Any if not result.data.set_secret.success: raise ValueError("Setting secret failed.") def get_task_tag_limit(self, tag: str) -> Optional[int]: """ Retrieve the current task tag concurrency limit for a given tag. Args: - tag (str): the tag to update Raises: - ClientError: if the GraphQL query fails """ query = { "query": { with_args("task_tag_limit", {"where": {"tag": {"_eq": tag}}}): { "limit": True } } } result = self.graphql(query) # type: Any if result.data.task_tag_limit: return result.data.task_tag_limit[0].limit else: return None def update_task_tag_limit(self, tag: str, limit: int) -> None: """ Update the task tag concurrency limit for a given tag; requires tenant admin permissions. Args: - tag (str): the tag to update - limit (int): the concurrency limit to enforce on the tag; should be a value >= 0 Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag limit-setting was unsuccessful, or if a bad limit was provided """ if limit < 0: raise ValueError("Concurrency limits must be >= 0") mutation = { "mutation($input: update_task_tag_limit_input!)": { "update_task_tag_limit(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict(input=dict(tag=tag, limit=limit)) ) # type: Any if not result.data.update_task_tag_limit.id: raise ValueError("Updating the task tag concurrency limit failed.") def delete_task_tag_limit(self, limit_id: str) -> None: """ Deletes a given task tag concurrency limit; requires tenant admin permissions. Args: - limit_id (str): the ID of the tag to delete Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag deletion was unsuccessful, or if a bad tag ID was provided """ mutation = { "mutation($input: delete_task_tag_limit_input!)": { "delete_task_tag_limit(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(limit_id=limit_id)) ) # type: Any if not result.data.delete_task_tag_limit.success: raise ValueError("Deleting the task tag concurrency limit failed.") def write_run_logs(self, logs: List[Dict]) -> None: """ Uploads a collection of logs to Cloud. Args: - logs (List[Dict]): a list of log entries to add Raises: - ValueError: if uploading the logs fail """ mutation = { "mutation($input: write_run_logs_input!)": { "write_run_logs(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(logs=logs)) ) # type: Any if not result.data.write_run_logs.success: raise ValueError("Writing logs failed.") def register_agent( self, agent_type: str, name: str = None, labels: List[str] = None, agent_config_id: str = None, ) -> str: """ Register an agent with a backend API Args: - agent_type (str): The type of agent being registered - name: (str, optional): The name of the agent being registered - labels (List[str], optional): A list of any present labels on the agent being registered - agent_config_id (str, optional): The ID of an agent configuration to register with Returns: - The agent ID as a string """ mutation = { "mutation($input: register_agent_input!)": { "register_agent(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict( input=dict( type=agent_type, name=name, labels=labels or [], tenant_id=self._active_tenant_id, agent_config_id=agent_config_id, ) ), ) if not result.data.register_agent.id: raise ValueError("Error registering agent") return result.data.register_agent.id def get_agent_config(self, agent_config_id: str) -> dict: """ Get agent config settings Args: - agent_config_id (str): The ID of an agent configuration to retrieve Returns: - dict: the agent configuration's `settings` """ query = { "query": { with_args( "agent_config", {"where": {"id": {"_eq": agent_config_id}}} ): {"settings": True} } } result = self.graphql(query) # type: Any return result.data.agent_config[0].settings
def get_flow_run_state(self, flow_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a flow run. Args: - flow_run_id (str): the id for this flow run Returns: - State: a Prefect State object """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "serialized_state": True, } } } flow_run = self.graphql(query).data.flow_run_by_pk return prefect.engine.state.State.deserialize(flow_run.serialized_state)
1,072
1,092
import datetime import json import os import re import time import uuid import warnings from pathlib import Path from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Union from urllib.parse import urljoin # if simplejson is installed, `requests` defaults to using it instead of json # this allows the client to gracefully handle either json or simplejson try: from simplejson.errors import JSONDecodeError except ImportError: from json.decoder import JSONDecodeError import pendulum import toml from slugify import slugify import prefect from prefect.utilities.exceptions import ( AuthorizationError, ClientError, VersionLockError, ) from prefect.utilities.graphql import ( EnumValue, GraphQLResult, compress, parse_graphql, with_args, ) from prefect.utilities.logging import create_diagnostic_logger if TYPE_CHECKING: from prefect.core import Flow import requests JSONLike = Union[bool, dict, list, str, int, float, None] # type definitions for GraphQL results TaskRunInfoResult = NamedTuple( "TaskRunInfoResult", [ ("id", str), ("task_id", str), ("task_slug", str), ("version", int), ("state", "prefect.engine.state.State"), ], ) FlowRunInfoResult = NamedTuple( "FlowRunInfoResult", [ ("id", str), ("name", str), ("flow_id", str), ("parameters", Dict[str, Any]), ("context", Dict[str, Any]), ("version", int), ("scheduled_start_time", datetime.datetime), ("state", "prefect.engine.state.State"), ("task_runs", List[TaskRunInfoResult]), ], ) class Client: """ Client for communication with Prefect Cloud If the arguments aren't specified the client initialization first checks the prefect configuration and if the server is not set there it checks the current context. The token will only be present in the current context. Args: - api_server (str, optional): the URL to send all GraphQL requests to; if not provided, will be pulled from `cloud.graphql` config var - api_token (str, optional): a Prefect Cloud API token, taken from `config.cloud.auth_token` if not provided. If this token is USER-scoped, it may be used to log in to any tenant that the user is a member of. In that case, ephemeral JWTs will be loaded as necessary. Otherwise, the API token itself will be used as authorization. """ def __init__(self, api_server: str = None, api_token: str = None): self._access_token = None self._refresh_token = None self._access_token_expires_at = pendulum.now() self._active_tenant_id = None self._attached_headers = {} # type: Dict[str, str] self.logger = create_diagnostic_logger("Diagnostics") # store api server self.api_server = api_server or prefect.context.config.cloud.get("graphql") # store api token self._api_token = api_token or prefect.context.config.cloud.get( "auth_token", None ) if prefect.config.backend == "cloud": if not self._api_token: # if no api token was passed, attempt to load state from local storage settings = self._load_local_settings() self._api_token = settings.get("api_token") if self._api_token: self._active_tenant_id = settings.get("active_tenant_id") if self._active_tenant_id: try: self.login_to_tenant(tenant_id=self._active_tenant_id) except AuthorizationError: # if an authorization error is raised, then the token is invalid and should # be cleared self.logout_from_tenant() else: # TODO: Separate put this functionality and clean up initial tenant access handling if not self._active_tenant_id: tenant_info = self.graphql({"query": {"tenant": {"id"}}}) if tenant_info.data.tenant: self._active_tenant_id = tenant_info.data.tenant[0].id def create_tenant(self, name: str, slug: str = None) -> str: """ Creates a new tenant. Note this route only works when run against Prefect Server. Args: - name (str): the name of the tenant to create - slug (str, optional): the slug of the tenant to create; defaults to name Returns: - str: the ID of the newly created tenant, or the ID of the currently active tenant Raises: - ValueError: if run against Prefect Cloud """ if prefect.config.backend != "server": msg = "To create a tenant with Prefect Cloud, please signup at https://cloud.prefect.io/" raise ValueError(msg) if slug is None: slug = slugify(name) tenant_info = self.graphql( { "mutation($input: create_tenant_input!)": { "create_tenant(input: $input)": {"id"} } }, variables=dict(input=dict(name=name, slug=slug)), ) return tenant_info.data.create_tenant.id # ------------------------------------------------------------------------- # Utilities def get( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and GET request Args: - path (str): the path of the API url. For example, to GET http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the GET request to; defaults to `self.api_server` - headers (dict, optional): Headers to pass with the request - params (dict): GET parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="GET", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def post( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and POST request Args: - path (str): the path of the API url. For example, to POST http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the POST request to; defaults to `self.api_server` - headers(dict): headers to pass with the request - params (dict): POST parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="POST", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def graphql( self, query: Any, raise_on_error: bool = True, headers: Dict[str, str] = None, variables: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> GraphQLResult: """ Convenience function for running queries against the Prefect GraphQL API Args: - query (Any): A representation of a graphql query to be executed. It will be parsed by prefect.utilities.graphql.parse_graphql(). - raise_on_error (bool): if True, a `ClientError` will be raised if the GraphQL returns any `errors`. - headers (dict): any additional headers that should be passed as part of the request - variables (dict): Variables to be filled into a query with the key being equivalent to the variables that are accepted by the query - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Data returned from the GraphQL query Raises: - ClientError if there are errors raised by the GraphQL mutation """ result = self.post( path="", server=self.api_server, headers=headers, params=dict(query=parse_graphql(query), variables=json.dumps(variables)), token=token, retry_on_api_error=retry_on_api_error, ) if raise_on_error and "errors" in result: if "UNAUTHENTICATED" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif "Malformed Authorization header" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif ( result["errors"][0].get("extensions", {}).get("code") == "VERSION_LOCKING_ERROR" ): raise VersionLockError(result["errors"]) raise ClientError(result["errors"]) else: return GraphQLResult(result) # type: ignore def _send_request( self, session: "requests.Session", method: str, url: str, params: Dict[str, JSONLike] = None, headers: dict = None, ) -> "requests.models.Response": if prefect.context.config.cloud.get("diagnostics") is True: self.logger.debug(f"Preparing request to {url}") clean_headers = { head: re.sub("Bearer .*", "Bearer XXXX", val) for head, val in headers.items() # type: ignore } self.logger.debug(f"Headers: {clean_headers}") self.logger.debug(f"Request: {params}") start_time = time.time() if method == "GET": response = session.get(url, headers=headers, params=params, timeout=30) elif method == "POST": response = session.post(url, headers=headers, json=params, timeout=30) elif method == "DELETE": response = session.delete(url, headers=headers, timeout=30) else: raise ValueError("Invalid method: {}".format(method)) if prefect.context.config.cloud.get("diagnostics") is True: end_time = time.time() self.logger.debug(f"Response: {response.json()}") self.logger.debug( f"Request duration: {round(end_time - start_time, 4)} seconds" ) # Check if request returned a successful status response.raise_for_status() return response def _request( self, method: str, path: str, params: Dict[str, JSONLike] = None, server: str = None, headers: dict = None, token: str = None, retry_on_api_error: bool = True, ) -> "requests.models.Response": """ Runs any specified request (GET, POST, DELETE) against the server Args: - method (str): The type of request to be made (GET, POST, DELETE) - path (str): Path of the API URL - params (dict, optional): Parameters used for the request - server (str, optional): The server to make requests against, base API server is used if not specified - headers (dict, optional): Headers to pass with the request - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - requests.models.Response: The response returned from the request Raises: - ClientError: if the client token is not in the context (due to not being logged in) - ValueError: if a method is specified outside of the accepted GET, POST, DELETE - requests.HTTPError: if a status code is returned that is not `200` or `401` """ if server is None: server = self.api_server assert isinstance(server, str) # mypy assert if token is None: token = self.get_auth_token() # 'import requests' is expensive time-wise, we should do this just-in-time to keep # the 'import prefect' time low import requests url = urljoin(server, path.lstrip("/")).rstrip("/") params = params or {} headers = headers or {} if token: headers["Authorization"] = "Bearer {}".format(token) headers["X-PREFECT-CORE-VERSION"] = str(prefect.__version__) if self._attached_headers: headers.update(self._attached_headers) session = requests.Session() retry_total = 6 if prefect.config.backend == "cloud" else 1 retries = requests.packages.urllib3.util.retry.Retry( total=retry_total, backoff_factor=1, status_forcelist=[500, 502, 503, 504], method_whitelist=["DELETE", "GET", "POST"], ) session.mount("https://", requests.adapters.HTTPAdapter(max_retries=retries)) response = self._send_request( session=session, method=method, url=url, params=params, headers=headers ) # parse the response try: json_resp = response.json() except JSONDecodeError as exc: if prefect.config.backend == "cloud" and "Authorization" not in headers: raise ClientError( "Malformed response received from Cloud - please ensure that you " "have an API token properly configured." ) from exc else: raise ClientError("Malformed response received from API.") from exc # check if there was an API_ERROR code in the response if "API_ERROR" in str(json_resp.get("errors")) and retry_on_api_error: success, retry_count = False, 0 # retry up to six times while success is False and retry_count < 6: response = self._send_request( session=session, method=method, url=url, params=params, headers=headers, ) if "API_ERROR" in str(response.json().get("errors")): retry_count += 1 time.sleep(0.25 * (2 ** (retry_count - 1))) else: success = True return response def attach_headers(self, headers: dict) -> None: """ Set headers to be attached to this Client Args: - headers (dict): A dictionary of headers to attach to this client. These headers get added on to the existing dictionary of headers. """ self._attached_headers.update(headers) # ------------------------------------------------------------------------- # Auth # ------------------------------------------------------------------------- @property def _local_settings_path(self) -> Path: """ Returns the local settings directory corresponding to the current API servers """ path = "{home}/client/{server}".format( home=prefect.context.config.home_dir, server=slugify(self.api_server, regex_pattern=r"[^-\.a-z0-9]+"), ) return Path(os.path.expanduser(path)) / "settings.toml" def _save_local_settings(self, settings: dict) -> None: """ Writes settings to local storage """ self._local_settings_path.parent.mkdir(exist_ok=True, parents=True) with self._local_settings_path.open("w+") as f: toml.dump(settings, f) def _load_local_settings(self) -> dict: """ Loads settings from local storage """ if self._local_settings_path.exists(): with self._local_settings_path.open("r") as f: return toml.load(f) # type: ignore return {} def save_api_token(self) -> None: """ Saves the API token in local storage. """ settings = self._load_local_settings() settings["api_token"] = self._api_token self._save_local_settings(settings) def get_auth_token(self) -> str: """ Returns an auth token: - if no explicit access token is stored, returns the api token - if there is an access token: - if there's a refresh token and the access token expires in the next 30 seconds, then we refresh the access token and store the result - return the access token Returns: - str: the access token """ if not self._access_token: return self._api_token expiration = self._access_token_expires_at or pendulum.now() if self._refresh_token and pendulum.now().add(seconds=30) > expiration: self._refresh_access_token() return self._access_token def get_available_tenants(self) -> List[Dict]: """ Returns a list of available tenants. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - List[Dict]: a list of dictionaries containing the id, slug, and name of available tenants """ result = self.graphql( {"query": {"tenant(order_by: {slug: asc})": {"id", "slug", "name"}}}, # use the API token to see all available tenants token=self._api_token, ) # type: ignore return result.data.tenant # type: ignore def login_to_tenant(self, tenant_slug: str = None, tenant_id: str = None) -> bool: """ Log in to a specific tenant NOTE: this should only be called by users who have provided a USER-scoped API token. Args: - tenant_slug (str): the tenant's slug - tenant_id (str): the tenant's id Returns: - bool: True if the login was successful Raises: - ValueError: if at least one of `tenant_slug` or `tenant_id` isn't provided - ValueError: if the `tenant_id` is not a valid UUID - ValueError: if no matching tenants are found """ if tenant_slug is None and tenant_id is None: raise ValueError( "At least one of `tenant_slug` or `tenant_id` must be provided." ) elif tenant_id: try: uuid.UUID(tenant_id) except ValueError as exc: raise ValueError("The `tenant_id` must be a valid UUID.") from exc tenant = self.graphql( { "query($slug: String, $id: uuid)": { "tenant(where: {slug: { _eq: $slug }, id: { _eq: $id } })": {"id"} } }, variables=dict(slug=tenant_slug, id=tenant_id), # use the API token to query the tenant token=self._api_token, ) # type: ignore if not tenant.data.tenant: # type: ignore raise ValueError("No matching tenants found.") tenant_id = tenant.data.tenant[0].id # type: ignore if prefect.config.backend == "cloud": payload = self.graphql( { "mutation($input: switch_tenant_input!)": { "switch_tenant(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(tenant_id=tenant_id)), # Use the API token to switch tenants token=self._api_token, ) # type: ignore self._access_token = payload.data.switch_tenant.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.switch_tenant.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.switch_tenant.refresh_token # type: ignore self._active_tenant_id = tenant_id # save the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = self._active_tenant_id self._save_local_settings(settings) return True def logout_from_tenant(self) -> None: self._access_token = None self._refresh_token = None self._active_tenant_id = None # remove the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = None self._save_local_settings(settings) def _refresh_access_token(self) -> bool: """ Refresh the client's JWT access token. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - bool: True if the refresh succeeds """ payload = self.graphql( { "mutation($input: refresh_token_input!)": { "refresh_token(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(access_token=self._access_token)), # pass the refresh token as the auth header token=self._refresh_token, ) # type: ignore self._access_token = payload.data.refresh_token.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.refresh_token.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.refresh_token.refresh_token # type: ignore return True # ------------------------------------------------------------------------- # Actions # ------------------------------------------------------------------------- def register( self, flow: "Flow", project_name: str = None, build: bool = True, set_schedule_active: bool = True, version_group_id: str = None, compressed: bool = True, no_url: bool = False, ) -> str: """ Push a new flow to Prefect Cloud Args: - flow (Flow): a flow to register - project_name (str, optional): the project that should contain this flow. - build (bool, optional): if `True`, the flow's environment is built prior to serialization; defaults to `True` - set_schedule_active (bool, optional): if `False`, will set the schedule to inactive in the database to prevent auto-scheduling runs (if the Flow has a schedule). Defaults to `True`. This can be changed later. - version_group_id (str, optional): the UUID version group ID to use for versioning this Flow in Cloud; if not provided, the version group ID associated with this Flow's project and name will be used. - compressed (bool, optional): if `True`, the serialized flow will be; defaults to `True` compressed - no_url (bool, optional): if `True`, the stdout from this function will not contain the URL link to the newly-registered flow in the Cloud UI Returns: - str: the ID of the newly-registered flow Raises: - ClientError: if the register failed """ required_parameters = {p for p in flow.parameters() if p.required} if flow.schedule is not None and required_parameters: required_names = {p.name for p in required_parameters} if not all( [ required_names <= set(c.parameter_defaults.keys()) for c in flow.schedule.clocks ] ): raise ClientError( "Flows with required parameters can not be scheduled automatically." ) if any(e.key for e in flow.edges) and flow.result is None: warnings.warn( "No result handler was specified on your Flow. Cloud features such as " "input caching and resuming task runs from failure may not work properly.", stacklevel=2, ) if compressed: create_mutation = { "mutation($input: create_flow_from_compressed_string_input!)": { "create_flow_from_compressed_string(input: $input)": {"id"} } } else: create_mutation = { "mutation($input: create_flow_input!)": { "create_flow(input: $input)": {"id"} } } project = None if project_name is None: raise TypeError( "'project_name' is a required field when registering a flow." ) query_project = { "query": { with_args("project", {"where": {"name": {"_eq": project_name}}}): { "id": True } } } project = self.graphql(query_project).data.project # type: ignore if not project: raise ValueError( "Project {} not found. Run `prefect create project '{}'` to create it.".format( project_name, project_name ) ) serialized_flow = flow.serialize(build=build) # type: Any # Set Docker storage image in environment metadata if provided if isinstance(flow.storage, prefect.environments.storage.Docker): flow.environment.metadata["image"] = flow.storage.name serialized_flow = flow.serialize(build=False) # If no image ever set, default metadata to all_extras image on current version if not flow.environment.metadata.get("image"): version = prefect.__version__.split("+")[0] flow.environment.metadata[ "image" ] = f"prefecthq/prefect:all_extras-{version}" serialized_flow = flow.serialize(build=False) # verify that the serialized flow can be deserialized try: prefect.serialization.flow.FlowSchema().load(serialized_flow) except Exception as exc: raise ValueError( "Flow could not be deserialized successfully. Error was: {}".format( repr(exc) ) ) from exc if compressed: serialized_flow = compress(serialized_flow) res = self.graphql( create_mutation, variables=dict( input=dict( project_id=(project[0].id if project else None), serialized_flow=serialized_flow, set_schedule_active=set_schedule_active, version_group_id=version_group_id, ) ), retry_on_api_error=False, ) # type: Any flow_id = ( res.data.create_flow_from_compressed_string.id if compressed else res.data.create_flow.id ) if not no_url: # Generate direct link to Cloud flow flow_url = self.get_cloud_url("flow", flow_id) prefix = "└── " print("Flow URL: {}".format(flow_url)) # Extra information to improve visibility msg = ( f" {prefix}ID: {flow_id}\n" f" {prefix}Project: {project_name}\n" f" {prefix}Labels: {list(flow.environment.labels)}" ) print(msg) return flow_id def get_cloud_url(self, subdirectory: str, id: str, as_user: bool = True) -> str: """ Convenience method for creating Prefect Cloud URLs for a given subdirectory. Args: - subdirectory (str): the subdirectory to use (e.g., `"flow-run"`) - id (str): the ID of the page - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the URL corresponding to the appropriate base URL, tenant slug, subdirectory and ID Example: ```python from prefect import Client client = Client() client.get_cloud_url("flow-run", "424242-ca-94611-111-55") # returns "https://cloud.prefect.io/my-tenant-slug/flow-run/424242-ca-94611-111-55" ``` """ # Generate direct link to UI if prefect.config.backend == "cloud": tenant_slug = self.get_default_tenant_slug(as_user=as_user) else: tenant_slug = "" base_url = ( re.sub("api-", "", prefect.config.cloud.api) if re.search("api-", prefect.config.cloud.api) else re.sub("api", "cloud", prefect.config.cloud.api) ) full_url = prefect.config.cloud.api if tenant_slug: full_url = "/".join([base_url.rstrip("/"), tenant_slug, subdirectory, id]) elif prefect.config.backend == "server": full_url = "/".join([prefect.config.server.ui.endpoint, subdirectory, id]) return full_url def get_default_tenant_slug(self, as_user: bool = True) -> str: """ Get the default tenant slug for the currently authenticated user Args: - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the slug of the current default tenant for this user """ if as_user: query = { "query": {"user": {"default_membership": {"tenant": "slug"}}} } # type: dict else: query = {"query": {"tenant": {"slug"}}} res = self.graphql(query) if as_user: user = res.get("data").user[0] slug = user.default_membership.tenant.slug else: slug = res.get("data").tenant[0].slug return slug def create_project(self, project_name: str, project_description: str = None) -> str: """ Create a new Project Args: - project_name (str): the project that should contain this flow - project_description (str, optional): the project description Returns: - str: the ID of the newly-created project Raises: - ClientError: if the project creation failed """ project_mutation = { "mutation($input: create_project_input!)": { "create_project(input: $input)": {"id"} } } res = self.graphql( project_mutation, variables=dict( input=dict( name=project_name, description=project_description, tenant_id=self._active_tenant_id, ) ), ) # type: Any return res.data.create_project.id def create_flow_run( self, flow_id: str = None, context: dict = None, parameters: dict = None, scheduled_start_time: datetime.datetime = None, idempotency_key: str = None, run_name: str = None, version_group_id: str = None, ) -> str: """ Create a new flow run for the given flow id. If `start_time` is not provided, the flow run will be scheduled to start immediately. If both `flow_id` and `version_group_id` are provided, only the `flow_id` will be used. Args: - flow_id (str, optional): the id of the Flow you wish to schedule - context (dict, optional): the run context - parameters (dict, optional): a dictionary of parameter values to pass to the flow run - scheduled_start_time (datetime, optional): the time to schedule the execution for; if not provided, defaults to now - idempotency_key (str, optional): an idempotency key; if provided, this run will be cached for 24 hours. Any subsequent attempts to create a run with the same idempotency key will return the ID of the originally created run (no new run will be created after the first). An error will be raised if parameters or context are provided and don't match the original. Each subsequent request will reset the TTL for 24 hours. - run_name (str, optional): The name assigned to this flow run - version_group_id (str, optional): if provided, the unique unarchived flow within this version group will be scheduled to run. This input can be used as a stable API for running flows which are regularly updated. Returns: - str: the ID of the newly-created flow run Raises: - ClientError: if the GraphQL query is bad for any reason """ create_mutation = { "mutation($input: create_flow_run_input!)": { "create_flow_run(input: $input)": {"id": True} } } if not flow_id and not version_group_id: raise ValueError("One of flow_id or version_group_id must be provided") if flow_id: inputs = dict(flow_id=flow_id) else: inputs = dict(version_group_id=version_group_id) # type: ignore if parameters is not None: inputs.update(parameters=parameters) # type: ignore if context is not None: inputs.update(context=context) # type: ignore if idempotency_key is not None: inputs.update(idempotency_key=idempotency_key) # type: ignore if scheduled_start_time is not None: inputs.update( scheduled_start_time=scheduled_start_time.isoformat() ) # type: ignore if run_name is not None: inputs.update(flow_run_name=run_name) # type: ignore res = self.graphql(create_mutation, variables=dict(input=inputs)) return res.data.create_flow_run.id # type: ignore def get_flow_run_info(self, flow_run_id: str) -> FlowRunInfoResult: """ Retrieves version and current state information for the given flow run. Args: - flow_run_id (str): the id of the flow run to get information for Returns: - GraphQLResult: an object representing information about the flow run Raises: - ClientError: if the GraphQL mutation is bad for any reason """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "id": True, "name": True, "flow_id": True, "parameters": True, "context": True, "version": True, "scheduled_start_time": True, "serialized_state": True, # load all task runs except dynamic task runs with_args("task_runs", {"where": {"map_index": {"_eq": -1}}}): { "id": True, "task": {"id": True, "slug": True}, "version": True, "serialized_state": True, }, } } } result = self.graphql(query).data.flow_run_by_pk # type: ignore if result is None: raise ClientError('Flow run ID not found: "{}"'.format(flow_run_id)) # convert scheduled_start_time from string to datetime result.scheduled_start_time = pendulum.parse(result.scheduled_start_time) # create "state" attribute from serialized_state result.state = prefect.engine.state.State.deserialize( result.pop("serialized_state") ) # reformat task_runs task_runs = [] for tr in result.task_runs: tr.state = prefect.engine.state.State.deserialize( tr.pop("serialized_state") ) task_info = tr.pop("task") tr.task_id = task_info["id"] tr.task_slug = task_info["slug"] task_runs.append(TaskRunInfoResult(**tr)) result.task_runs = task_runs result.context = ( result.context.to_dict() if result.context is not None else None ) result.parameters = ( result.parameters.to_dict() if result.parameters is not None else None ) return FlowRunInfoResult(**result) def update_flow_run_heartbeat(self, flow_run_id: str) -> None: """ Convenience method for heartbeating a flow run. Does NOT raise an error if the update fails. Args: - flow_run_id (str): the flow run ID to heartbeat """ mutation = { "mutation": { with_args( "update_flow_run_heartbeat", {"input": {"flow_run_id": flow_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def update_task_run_heartbeat(self, task_run_id: str) -> None: """ Convenience method for heartbeating a task run. Does NOT raise an error if the update fails. Args: - task_run_id (str): the task run ID to heartbeat """ mutation = { "mutation": { with_args( "update_task_run_heartbeat", {"input": {"task_run_id": task_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def get_flow_run_state(self, flow_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a flow run. Args: - flow_run_id (str): the id for this flow run Returns: - State: a Prefect State object """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "serialized_state": True, } } } flow_run = self.graphql(query).data.flow_run_by_pk return prefect.engine.state.State.deserialize(flow_run.serialized_state) def set_flow_run_state( self, flow_run_id: str, state: "prefect.engine.state.State", version: int = None, ) -> "prefect.engine.state.State": """ Sets new state for a flow run in the database. Args: - flow_run_id (str): the id of the flow run to set state for - state (State): the new state for this flow run - version (int, optional): the current version of the flow run state. This is optional but it can be supplied to enforce version-locking. Returns: - State: the state the current flow run should be considered in Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation($input: set_flow_run_states_input!)": { "set_flow_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, flow_run_id=flow_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_flow_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def get_latest_cached_states( self, task_id: str, cache_key: Optional[str], created_after: datetime.datetime ) -> List["prefect.engine.state.State"]: """ Pulls all Cached states for the given task that were created after the provided date. Args: - task_id (str): the task id for this task run - cache_key (Optional[str]): the cache key for this Task's cache; if `None`, the task id alone will be used - created_after (datetime.datetime): the earliest date the state should have been created at Returns: - List[State]: a list of Cached states created after the given date """ args = { "where": { "state": {"_eq": "Cached"}, "state_timestamp": {"_gte": created_after.isoformat()}, }, "order_by": {"state_timestamp": EnumValue("desc")}, "limit": 100, } # type: Dict[str, Any] # if a cache key was provided, match it against all tasks if cache_key is not None: args["where"].update({"cache_key": {"_eq": cache_key}}) # otherwise match against only this task, across all cache keys else: args["where"].update({"task_id": {"_eq": task_id}}) query = {"query": {with_args("task_run", args): "serialized_state"}} result = self.graphql(query) # type: Any deserializer = prefect.engine.state.State.deserialize valid_states = [ deserializer(res.serialized_state) for res in result.data.task_run ] return valid_states def get_task_run_info( self, flow_run_id: str, task_id: str, map_index: Optional[int] = None ) -> TaskRunInfoResult: """ Retrieves version and current state information for the given task run. Args: - flow_run_id (str): the id of the flow run that this task run lives in - task_id (str): the task id for this task run - map_index (int, optional): the mapping index for this task run; if `None`, it is assumed this task is _not_ mapped Returns: - NamedTuple: a tuple containing `id, task_id, version, state` Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation": { with_args( "get_or_create_task_run", { "input": { "flow_run_id": flow_run_id, "task_id": task_id, "map_index": -1 if map_index is None else map_index, } }, ): { "id": True, } } } result = self.graphql(mutation) # type: Any if result is None: raise ClientError("Failed to create task run.") task_run_id = result.data.get_or_create_task_run.id query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "version": True, "serialized_state": True, "task": {"slug": True}, } } } task_run = self.graphql(query).data.task_run_by_pk # type: ignore if task_run is None: raise ClientError('Task run ID not found: "{}"'.format(task_run_id)) state = prefect.engine.state.State.deserialize(task_run.serialized_state) return TaskRunInfoResult( id=task_run_id, task_id=task_id, task_slug=task_run.task.slug, version=task_run.version, state=state, ) def set_task_run_name(self, task_run_id: str, name: str) -> bool: """ Set the name of a task run Args: - task_run_id (str): the id of a task run - name (str): a name for this task run Returns: - bool: whether or not the task run name was updated """ mutation = { "mutation($input: set_task_run_name_input!)": { "set_task_run_name(input: $input)": { "success": True, } } } result = self.graphql( mutation, variables=dict(input=dict(task_run_id=task_run_id, name=name)) ) return result.data.set_task_run_name.success def get_task_run_state(self, task_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a task run. Args: - task_run_id (str): the id for this task run Returns: - State: a Prefect State object """ query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "serialized_state": True, } } } task_run = self.graphql(query).data.task_run_by_pk return prefect.engine.state.State.deserialize(task_run.serialized_state) def set_task_run_state( self, task_run_id: str, state: "prefect.engine.state.State", version: int = None, cache_for: datetime.timedelta = None, ) -> "prefect.engine.state.State": """ Sets new state for a task run. Args: - task_run_id (str): the id of the task run to set state for - state (State): the new state for this task run - version (int, optional): the current version of the task run state. This is optional but it can be supplied to enforce version-locking. - cache_for (timedelta, optional): how long to store the result of this task for, using the serializer set in config; if not provided, no caching occurs Raises: - ClientError: if the GraphQL mutation is bad for any reason Returns: - State: the state the current task run should be considered in """ mutation = { "mutation($input: set_task_run_states_input!)": { "set_task_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, task_run_id=task_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_task_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def set_secret(self, name: str, value: Any) -> None: """ Set a secret with the given name and value. Args: - name (str): the name of the secret; used for retrieving the secret during task runs - value (Any): the value of the secret Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the secret-setting was unsuccessful """ mutation = { "mutation($input: set_secret_input!)": { "set_secret(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(name=name, value=value)) ) # type: Any if not result.data.set_secret.success: raise ValueError("Setting secret failed.") def get_task_tag_limit(self, tag: str) -> Optional[int]: """ Retrieve the current task tag concurrency limit for a given tag. Args: - tag (str): the tag to update Raises: - ClientError: if the GraphQL query fails """ query = { "query": { with_args("task_tag_limit", {"where": {"tag": {"_eq": tag}}}): { "limit": True } } } result = self.graphql(query) # type: Any if result.data.task_tag_limit: return result.data.task_tag_limit[0].limit else: return None def update_task_tag_limit(self, tag: str, limit: int) -> None: """ Update the task tag concurrency limit for a given tag; requires tenant admin permissions. Args: - tag (str): the tag to update - limit (int): the concurrency limit to enforce on the tag; should be a value >= 0 Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag limit-setting was unsuccessful, or if a bad limit was provided """ if limit < 0: raise ValueError("Concurrency limits must be >= 0") mutation = { "mutation($input: update_task_tag_limit_input!)": { "update_task_tag_limit(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict(input=dict(tag=tag, limit=limit)) ) # type: Any if not result.data.update_task_tag_limit.id: raise ValueError("Updating the task tag concurrency limit failed.") def delete_task_tag_limit(self, limit_id: str) -> None: """ Deletes a given task tag concurrency limit; requires tenant admin permissions. Args: - limit_id (str): the ID of the tag to delete Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag deletion was unsuccessful, or if a bad tag ID was provided """ mutation = { "mutation($input: delete_task_tag_limit_input!)": { "delete_task_tag_limit(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(limit_id=limit_id)) ) # type: Any if not result.data.delete_task_tag_limit.success: raise ValueError("Deleting the task tag concurrency limit failed.") def write_run_logs(self, logs: List[Dict]) -> None: """ Uploads a collection of logs to Cloud. Args: - logs (List[Dict]): a list of log entries to add Raises: - ValueError: if uploading the logs fail """ mutation = { "mutation($input: write_run_logs_input!)": { "write_run_logs(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(logs=logs)) ) # type: Any if not result.data.write_run_logs.success: raise ValueError("Writing logs failed.") def register_agent( self, agent_type: str, name: str = None, labels: List[str] = None, agent_config_id: str = None, ) -> str: """ Register an agent with a backend API Args: - agent_type (str): The type of agent being registered - name: (str, optional): The name of the agent being registered - labels (List[str], optional): A list of any present labels on the agent being registered - agent_config_id (str, optional): The ID of an agent configuration to register with Returns: - The agent ID as a string """ mutation = { "mutation($input: register_agent_input!)": { "register_agent(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict( input=dict( type=agent_type, name=name, labels=labels or [], tenant_id=self._active_tenant_id, agent_config_id=agent_config_id, ) ), ) if not result.data.register_agent.id: raise ValueError("Error registering agent") return result.data.register_agent.id def get_agent_config(self, agent_config_id: str) -> dict: """ Get agent config settings Args: - agent_config_id (str): The ID of an agent configuration to retrieve Returns: - dict: the agent configuration's `settings` """ query = { "query": { with_args( "agent_config", {"where": {"id": {"_eq": agent_config_id}}} ): {"settings": True} } } result = self.graphql(query) # type: Any return result.data.agent_config[0].settings
set_flow_run_state
Sets new state for a flow run in the database. Args: - flow_run_id (str): the id of the flow run to set state for - state (State): the new state for this flow run - version (int, optional): the current version of the flow run state. This is optional but it can be supplied to enforce version-locking. Returns: - State: the state the current flow run should be considered in Raises: - ClientError: if the GraphQL mutation is bad for any reason
import datetime import json import os import re import time import uuid import warnings from pathlib import Path from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Union from urllib.parse import urljoin # if simplejson is installed, `requests` defaults to using it instead of json # this allows the client to gracefully handle either json or simplejson try: from simplejson.errors import JSONDecodeError except ImportError: from json.decoder import JSONDecodeError import pendulum import toml from slugify import slugify import prefect from prefect.utilities.exceptions import ( AuthorizationError, ClientError, VersionLockError, ) from prefect.utilities.graphql import ( EnumValue, GraphQLResult, compress, parse_graphql, with_args, ) from prefect.utilities.logging import create_diagnostic_logger if TYPE_CHECKING: from prefect.core import Flow import requests JSONLike = Union[bool, dict, list, str, int, float, None] # type definitions for GraphQL results TaskRunInfoResult = NamedTuple( "TaskRunInfoResult", [ ("id", str), ("task_id", str), ("task_slug", str), ("version", int), ("state", "prefect.engine.state.State"), ], ) FlowRunInfoResult = NamedTuple( "FlowRunInfoResult", [ ("id", str), ("name", str), ("flow_id", str), ("parameters", Dict[str, Any]), ("context", Dict[str, Any]), ("version", int), ("scheduled_start_time", datetime.datetime), ("state", "prefect.engine.state.State"), ("task_runs", List[TaskRunInfoResult]), ], ) class Client: """ Client for communication with Prefect Cloud If the arguments aren't specified the client initialization first checks the prefect configuration and if the server is not set there it checks the current context. The token will only be present in the current context. Args: - api_server (str, optional): the URL to send all GraphQL requests to; if not provided, will be pulled from `cloud.graphql` config var - api_token (str, optional): a Prefect Cloud API token, taken from `config.cloud.auth_token` if not provided. If this token is USER-scoped, it may be used to log in to any tenant that the user is a member of. In that case, ephemeral JWTs will be loaded as necessary. Otherwise, the API token itself will be used as authorization. """ def __init__(self, api_server: str = None, api_token: str = None): self._access_token = None self._refresh_token = None self._access_token_expires_at = pendulum.now() self._active_tenant_id = None self._attached_headers = {} # type: Dict[str, str] self.logger = create_diagnostic_logger("Diagnostics") # store api server self.api_server = api_server or prefect.context.config.cloud.get("graphql") # store api token self._api_token = api_token or prefect.context.config.cloud.get( "auth_token", None ) if prefect.config.backend == "cloud": if not self._api_token: # if no api token was passed, attempt to load state from local storage settings = self._load_local_settings() self._api_token = settings.get("api_token") if self._api_token: self._active_tenant_id = settings.get("active_tenant_id") if self._active_tenant_id: try: self.login_to_tenant(tenant_id=self._active_tenant_id) except AuthorizationError: # if an authorization error is raised, then the token is invalid and should # be cleared self.logout_from_tenant() else: # TODO: Separate put this functionality and clean up initial tenant access handling if not self._active_tenant_id: tenant_info = self.graphql({"query": {"tenant": {"id"}}}) if tenant_info.data.tenant: self._active_tenant_id = tenant_info.data.tenant[0].id def create_tenant(self, name: str, slug: str = None) -> str: """ Creates a new tenant. Note this route only works when run against Prefect Server. Args: - name (str): the name of the tenant to create - slug (str, optional): the slug of the tenant to create; defaults to name Returns: - str: the ID of the newly created tenant, or the ID of the currently active tenant Raises: - ValueError: if run against Prefect Cloud """ if prefect.config.backend != "server": msg = "To create a tenant with Prefect Cloud, please signup at https://cloud.prefect.io/" raise ValueError(msg) if slug is None: slug = slugify(name) tenant_info = self.graphql( { "mutation($input: create_tenant_input!)": { "create_tenant(input: $input)": {"id"} } }, variables=dict(input=dict(name=name, slug=slug)), ) return tenant_info.data.create_tenant.id # ------------------------------------------------------------------------- # Utilities def get( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and GET request Args: - path (str): the path of the API url. For example, to GET http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the GET request to; defaults to `self.api_server` - headers (dict, optional): Headers to pass with the request - params (dict): GET parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="GET", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def post( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and POST request Args: - path (str): the path of the API url. For example, to POST http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the POST request to; defaults to `self.api_server` - headers(dict): headers to pass with the request - params (dict): POST parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="POST", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def graphql( self, query: Any, raise_on_error: bool = True, headers: Dict[str, str] = None, variables: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> GraphQLResult: """ Convenience function for running queries against the Prefect GraphQL API Args: - query (Any): A representation of a graphql query to be executed. It will be parsed by prefect.utilities.graphql.parse_graphql(). - raise_on_error (bool): if True, a `ClientError` will be raised if the GraphQL returns any `errors`. - headers (dict): any additional headers that should be passed as part of the request - variables (dict): Variables to be filled into a query with the key being equivalent to the variables that are accepted by the query - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Data returned from the GraphQL query Raises: - ClientError if there are errors raised by the GraphQL mutation """ result = self.post( path="", server=self.api_server, headers=headers, params=dict(query=parse_graphql(query), variables=json.dumps(variables)), token=token, retry_on_api_error=retry_on_api_error, ) if raise_on_error and "errors" in result: if "UNAUTHENTICATED" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif "Malformed Authorization header" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif ( result["errors"][0].get("extensions", {}).get("code") == "VERSION_LOCKING_ERROR" ): raise VersionLockError(result["errors"]) raise ClientError(result["errors"]) else: return GraphQLResult(result) # type: ignore def _send_request( self, session: "requests.Session", method: str, url: str, params: Dict[str, JSONLike] = None, headers: dict = None, ) -> "requests.models.Response": if prefect.context.config.cloud.get("diagnostics") is True: self.logger.debug(f"Preparing request to {url}") clean_headers = { head: re.sub("Bearer .*", "Bearer XXXX", val) for head, val in headers.items() # type: ignore } self.logger.debug(f"Headers: {clean_headers}") self.logger.debug(f"Request: {params}") start_time = time.time() if method == "GET": response = session.get(url, headers=headers, params=params, timeout=30) elif method == "POST": response = session.post(url, headers=headers, json=params, timeout=30) elif method == "DELETE": response = session.delete(url, headers=headers, timeout=30) else: raise ValueError("Invalid method: {}".format(method)) if prefect.context.config.cloud.get("diagnostics") is True: end_time = time.time() self.logger.debug(f"Response: {response.json()}") self.logger.debug( f"Request duration: {round(end_time - start_time, 4)} seconds" ) # Check if request returned a successful status response.raise_for_status() return response def _request( self, method: str, path: str, params: Dict[str, JSONLike] = None, server: str = None, headers: dict = None, token: str = None, retry_on_api_error: bool = True, ) -> "requests.models.Response": """ Runs any specified request (GET, POST, DELETE) against the server Args: - method (str): The type of request to be made (GET, POST, DELETE) - path (str): Path of the API URL - params (dict, optional): Parameters used for the request - server (str, optional): The server to make requests against, base API server is used if not specified - headers (dict, optional): Headers to pass with the request - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - requests.models.Response: The response returned from the request Raises: - ClientError: if the client token is not in the context (due to not being logged in) - ValueError: if a method is specified outside of the accepted GET, POST, DELETE - requests.HTTPError: if a status code is returned that is not `200` or `401` """ if server is None: server = self.api_server assert isinstance(server, str) # mypy assert if token is None: token = self.get_auth_token() # 'import requests' is expensive time-wise, we should do this just-in-time to keep # the 'import prefect' time low import requests url = urljoin(server, path.lstrip("/")).rstrip("/") params = params or {} headers = headers or {} if token: headers["Authorization"] = "Bearer {}".format(token) headers["X-PREFECT-CORE-VERSION"] = str(prefect.__version__) if self._attached_headers: headers.update(self._attached_headers) session = requests.Session() retry_total = 6 if prefect.config.backend == "cloud" else 1 retries = requests.packages.urllib3.util.retry.Retry( total=retry_total, backoff_factor=1, status_forcelist=[500, 502, 503, 504], method_whitelist=["DELETE", "GET", "POST"], ) session.mount("https://", requests.adapters.HTTPAdapter(max_retries=retries)) response = self._send_request( session=session, method=method, url=url, params=params, headers=headers ) # parse the response try: json_resp = response.json() except JSONDecodeError as exc: if prefect.config.backend == "cloud" and "Authorization" not in headers: raise ClientError( "Malformed response received from Cloud - please ensure that you " "have an API token properly configured." ) from exc else: raise ClientError("Malformed response received from API.") from exc # check if there was an API_ERROR code in the response if "API_ERROR" in str(json_resp.get("errors")) and retry_on_api_error: success, retry_count = False, 0 # retry up to six times while success is False and retry_count < 6: response = self._send_request( session=session, method=method, url=url, params=params, headers=headers, ) if "API_ERROR" in str(response.json().get("errors")): retry_count += 1 time.sleep(0.25 * (2 ** (retry_count - 1))) else: success = True return response def attach_headers(self, headers: dict) -> None: """ Set headers to be attached to this Client Args: - headers (dict): A dictionary of headers to attach to this client. These headers get added on to the existing dictionary of headers. """ self._attached_headers.update(headers) # ------------------------------------------------------------------------- # Auth # ------------------------------------------------------------------------- @property def _local_settings_path(self) -> Path: """ Returns the local settings directory corresponding to the current API servers """ path = "{home}/client/{server}".format( home=prefect.context.config.home_dir, server=slugify(self.api_server, regex_pattern=r"[^-\.a-z0-9]+"), ) return Path(os.path.expanduser(path)) / "settings.toml" def _save_local_settings(self, settings: dict) -> None: """ Writes settings to local storage """ self._local_settings_path.parent.mkdir(exist_ok=True, parents=True) with self._local_settings_path.open("w+") as f: toml.dump(settings, f) def _load_local_settings(self) -> dict: """ Loads settings from local storage """ if self._local_settings_path.exists(): with self._local_settings_path.open("r") as f: return toml.load(f) # type: ignore return {} def save_api_token(self) -> None: """ Saves the API token in local storage. """ settings = self._load_local_settings() settings["api_token"] = self._api_token self._save_local_settings(settings) def get_auth_token(self) -> str: """ Returns an auth token: - if no explicit access token is stored, returns the api token - if there is an access token: - if there's a refresh token and the access token expires in the next 30 seconds, then we refresh the access token and store the result - return the access token Returns: - str: the access token """ if not self._access_token: return self._api_token expiration = self._access_token_expires_at or pendulum.now() if self._refresh_token and pendulum.now().add(seconds=30) > expiration: self._refresh_access_token() return self._access_token def get_available_tenants(self) -> List[Dict]: """ Returns a list of available tenants. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - List[Dict]: a list of dictionaries containing the id, slug, and name of available tenants """ result = self.graphql( {"query": {"tenant(order_by: {slug: asc})": {"id", "slug", "name"}}}, # use the API token to see all available tenants token=self._api_token, ) # type: ignore return result.data.tenant # type: ignore def login_to_tenant(self, tenant_slug: str = None, tenant_id: str = None) -> bool: """ Log in to a specific tenant NOTE: this should only be called by users who have provided a USER-scoped API token. Args: - tenant_slug (str): the tenant's slug - tenant_id (str): the tenant's id Returns: - bool: True if the login was successful Raises: - ValueError: if at least one of `tenant_slug` or `tenant_id` isn't provided - ValueError: if the `tenant_id` is not a valid UUID - ValueError: if no matching tenants are found """ if tenant_slug is None and tenant_id is None: raise ValueError( "At least one of `tenant_slug` or `tenant_id` must be provided." ) elif tenant_id: try: uuid.UUID(tenant_id) except ValueError as exc: raise ValueError("The `tenant_id` must be a valid UUID.") from exc tenant = self.graphql( { "query($slug: String, $id: uuid)": { "tenant(where: {slug: { _eq: $slug }, id: { _eq: $id } })": {"id"} } }, variables=dict(slug=tenant_slug, id=tenant_id), # use the API token to query the tenant token=self._api_token, ) # type: ignore if not tenant.data.tenant: # type: ignore raise ValueError("No matching tenants found.") tenant_id = tenant.data.tenant[0].id # type: ignore if prefect.config.backend == "cloud": payload = self.graphql( { "mutation($input: switch_tenant_input!)": { "switch_tenant(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(tenant_id=tenant_id)), # Use the API token to switch tenants token=self._api_token, ) # type: ignore self._access_token = payload.data.switch_tenant.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.switch_tenant.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.switch_tenant.refresh_token # type: ignore self._active_tenant_id = tenant_id # save the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = self._active_tenant_id self._save_local_settings(settings) return True def logout_from_tenant(self) -> None: self._access_token = None self._refresh_token = None self._active_tenant_id = None # remove the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = None self._save_local_settings(settings) def _refresh_access_token(self) -> bool: """ Refresh the client's JWT access token. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - bool: True if the refresh succeeds """ payload = self.graphql( { "mutation($input: refresh_token_input!)": { "refresh_token(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(access_token=self._access_token)), # pass the refresh token as the auth header token=self._refresh_token, ) # type: ignore self._access_token = payload.data.refresh_token.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.refresh_token.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.refresh_token.refresh_token # type: ignore return True # ------------------------------------------------------------------------- # Actions # ------------------------------------------------------------------------- def register( self, flow: "Flow", project_name: str = None, build: bool = True, set_schedule_active: bool = True, version_group_id: str = None, compressed: bool = True, no_url: bool = False, ) -> str: """ Push a new flow to Prefect Cloud Args: - flow (Flow): a flow to register - project_name (str, optional): the project that should contain this flow. - build (bool, optional): if `True`, the flow's environment is built prior to serialization; defaults to `True` - set_schedule_active (bool, optional): if `False`, will set the schedule to inactive in the database to prevent auto-scheduling runs (if the Flow has a schedule). Defaults to `True`. This can be changed later. - version_group_id (str, optional): the UUID version group ID to use for versioning this Flow in Cloud; if not provided, the version group ID associated with this Flow's project and name will be used. - compressed (bool, optional): if `True`, the serialized flow will be; defaults to `True` compressed - no_url (bool, optional): if `True`, the stdout from this function will not contain the URL link to the newly-registered flow in the Cloud UI Returns: - str: the ID of the newly-registered flow Raises: - ClientError: if the register failed """ required_parameters = {p for p in flow.parameters() if p.required} if flow.schedule is not None and required_parameters: required_names = {p.name for p in required_parameters} if not all( [ required_names <= set(c.parameter_defaults.keys()) for c in flow.schedule.clocks ] ): raise ClientError( "Flows with required parameters can not be scheduled automatically." ) if any(e.key for e in flow.edges) and flow.result is None: warnings.warn( "No result handler was specified on your Flow. Cloud features such as " "input caching and resuming task runs from failure may not work properly.", stacklevel=2, ) if compressed: create_mutation = { "mutation($input: create_flow_from_compressed_string_input!)": { "create_flow_from_compressed_string(input: $input)": {"id"} } } else: create_mutation = { "mutation($input: create_flow_input!)": { "create_flow(input: $input)": {"id"} } } project = None if project_name is None: raise TypeError( "'project_name' is a required field when registering a flow." ) query_project = { "query": { with_args("project", {"where": {"name": {"_eq": project_name}}}): { "id": True } } } project = self.graphql(query_project).data.project # type: ignore if not project: raise ValueError( "Project {} not found. Run `prefect create project '{}'` to create it.".format( project_name, project_name ) ) serialized_flow = flow.serialize(build=build) # type: Any # Set Docker storage image in environment metadata if provided if isinstance(flow.storage, prefect.environments.storage.Docker): flow.environment.metadata["image"] = flow.storage.name serialized_flow = flow.serialize(build=False) # If no image ever set, default metadata to all_extras image on current version if not flow.environment.metadata.get("image"): version = prefect.__version__.split("+")[0] flow.environment.metadata[ "image" ] = f"prefecthq/prefect:all_extras-{version}" serialized_flow = flow.serialize(build=False) # verify that the serialized flow can be deserialized try: prefect.serialization.flow.FlowSchema().load(serialized_flow) except Exception as exc: raise ValueError( "Flow could not be deserialized successfully. Error was: {}".format( repr(exc) ) ) from exc if compressed: serialized_flow = compress(serialized_flow) res = self.graphql( create_mutation, variables=dict( input=dict( project_id=(project[0].id if project else None), serialized_flow=serialized_flow, set_schedule_active=set_schedule_active, version_group_id=version_group_id, ) ), retry_on_api_error=False, ) # type: Any flow_id = ( res.data.create_flow_from_compressed_string.id if compressed else res.data.create_flow.id ) if not no_url: # Generate direct link to Cloud flow flow_url = self.get_cloud_url("flow", flow_id) prefix = "└── " print("Flow URL: {}".format(flow_url)) # Extra information to improve visibility msg = ( f" {prefix}ID: {flow_id}\n" f" {prefix}Project: {project_name}\n" f" {prefix}Labels: {list(flow.environment.labels)}" ) print(msg) return flow_id def get_cloud_url(self, subdirectory: str, id: str, as_user: bool = True) -> str: """ Convenience method for creating Prefect Cloud URLs for a given subdirectory. Args: - subdirectory (str): the subdirectory to use (e.g., `"flow-run"`) - id (str): the ID of the page - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the URL corresponding to the appropriate base URL, tenant slug, subdirectory and ID Example: ```python from prefect import Client client = Client() client.get_cloud_url("flow-run", "424242-ca-94611-111-55") # returns "https://cloud.prefect.io/my-tenant-slug/flow-run/424242-ca-94611-111-55" ``` """ # Generate direct link to UI if prefect.config.backend == "cloud": tenant_slug = self.get_default_tenant_slug(as_user=as_user) else: tenant_slug = "" base_url = ( re.sub("api-", "", prefect.config.cloud.api) if re.search("api-", prefect.config.cloud.api) else re.sub("api", "cloud", prefect.config.cloud.api) ) full_url = prefect.config.cloud.api if tenant_slug: full_url = "/".join([base_url.rstrip("/"), tenant_slug, subdirectory, id]) elif prefect.config.backend == "server": full_url = "/".join([prefect.config.server.ui.endpoint, subdirectory, id]) return full_url def get_default_tenant_slug(self, as_user: bool = True) -> str: """ Get the default tenant slug for the currently authenticated user Args: - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the slug of the current default tenant for this user """ if as_user: query = { "query": {"user": {"default_membership": {"tenant": "slug"}}} } # type: dict else: query = {"query": {"tenant": {"slug"}}} res = self.graphql(query) if as_user: user = res.get("data").user[0] slug = user.default_membership.tenant.slug else: slug = res.get("data").tenant[0].slug return slug def create_project(self, project_name: str, project_description: str = None) -> str: """ Create a new Project Args: - project_name (str): the project that should contain this flow - project_description (str, optional): the project description Returns: - str: the ID of the newly-created project Raises: - ClientError: if the project creation failed """ project_mutation = { "mutation($input: create_project_input!)": { "create_project(input: $input)": {"id"} } } res = self.graphql( project_mutation, variables=dict( input=dict( name=project_name, description=project_description, tenant_id=self._active_tenant_id, ) ), ) # type: Any return res.data.create_project.id def create_flow_run( self, flow_id: str = None, context: dict = None, parameters: dict = None, scheduled_start_time: datetime.datetime = None, idempotency_key: str = None, run_name: str = None, version_group_id: str = None, ) -> str: """ Create a new flow run for the given flow id. If `start_time` is not provided, the flow run will be scheduled to start immediately. If both `flow_id` and `version_group_id` are provided, only the `flow_id` will be used. Args: - flow_id (str, optional): the id of the Flow you wish to schedule - context (dict, optional): the run context - parameters (dict, optional): a dictionary of parameter values to pass to the flow run - scheduled_start_time (datetime, optional): the time to schedule the execution for; if not provided, defaults to now - idempotency_key (str, optional): an idempotency key; if provided, this run will be cached for 24 hours. Any subsequent attempts to create a run with the same idempotency key will return the ID of the originally created run (no new run will be created after the first). An error will be raised if parameters or context are provided and don't match the original. Each subsequent request will reset the TTL for 24 hours. - run_name (str, optional): The name assigned to this flow run - version_group_id (str, optional): if provided, the unique unarchived flow within this version group will be scheduled to run. This input can be used as a stable API for running flows which are regularly updated. Returns: - str: the ID of the newly-created flow run Raises: - ClientError: if the GraphQL query is bad for any reason """ create_mutation = { "mutation($input: create_flow_run_input!)": { "create_flow_run(input: $input)": {"id": True} } } if not flow_id and not version_group_id: raise ValueError("One of flow_id or version_group_id must be provided") if flow_id: inputs = dict(flow_id=flow_id) else: inputs = dict(version_group_id=version_group_id) # type: ignore if parameters is not None: inputs.update(parameters=parameters) # type: ignore if context is not None: inputs.update(context=context) # type: ignore if idempotency_key is not None: inputs.update(idempotency_key=idempotency_key) # type: ignore if scheduled_start_time is not None: inputs.update( scheduled_start_time=scheduled_start_time.isoformat() ) # type: ignore if run_name is not None: inputs.update(flow_run_name=run_name) # type: ignore res = self.graphql(create_mutation, variables=dict(input=inputs)) return res.data.create_flow_run.id # type: ignore def get_flow_run_info(self, flow_run_id: str) -> FlowRunInfoResult: """ Retrieves version and current state information for the given flow run. Args: - flow_run_id (str): the id of the flow run to get information for Returns: - GraphQLResult: an object representing information about the flow run Raises: - ClientError: if the GraphQL mutation is bad for any reason """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "id": True, "name": True, "flow_id": True, "parameters": True, "context": True, "version": True, "scheduled_start_time": True, "serialized_state": True, # load all task runs except dynamic task runs with_args("task_runs", {"where": {"map_index": {"_eq": -1}}}): { "id": True, "task": {"id": True, "slug": True}, "version": True, "serialized_state": True, }, } } } result = self.graphql(query).data.flow_run_by_pk # type: ignore if result is None: raise ClientError('Flow run ID not found: "{}"'.format(flow_run_id)) # convert scheduled_start_time from string to datetime result.scheduled_start_time = pendulum.parse(result.scheduled_start_time) # create "state" attribute from serialized_state result.state = prefect.engine.state.State.deserialize( result.pop("serialized_state") ) # reformat task_runs task_runs = [] for tr in result.task_runs: tr.state = prefect.engine.state.State.deserialize( tr.pop("serialized_state") ) task_info = tr.pop("task") tr.task_id = task_info["id"] tr.task_slug = task_info["slug"] task_runs.append(TaskRunInfoResult(**tr)) result.task_runs = task_runs result.context = ( result.context.to_dict() if result.context is not None else None ) result.parameters = ( result.parameters.to_dict() if result.parameters is not None else None ) return FlowRunInfoResult(**result) def update_flow_run_heartbeat(self, flow_run_id: str) -> None: """ Convenience method for heartbeating a flow run. Does NOT raise an error if the update fails. Args: - flow_run_id (str): the flow run ID to heartbeat """ mutation = { "mutation": { with_args( "update_flow_run_heartbeat", {"input": {"flow_run_id": flow_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def update_task_run_heartbeat(self, task_run_id: str) -> None: """ Convenience method for heartbeating a task run. Does NOT raise an error if the update fails. Args: - task_run_id (str): the task run ID to heartbeat """ mutation = { "mutation": { with_args( "update_task_run_heartbeat", {"input": {"task_run_id": task_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def get_flow_run_state(self, flow_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a flow run. Args: - flow_run_id (str): the id for this flow run Returns: - State: a Prefect State object """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "serialized_state": True, } } } flow_run = self.graphql(query).data.flow_run_by_pk return prefect.engine.state.State.deserialize(flow_run.serialized_state) # MASKED: set_flow_run_state function (lines 1094-1150) def get_latest_cached_states( self, task_id: str, cache_key: Optional[str], created_after: datetime.datetime ) -> List["prefect.engine.state.State"]: """ Pulls all Cached states for the given task that were created after the provided date. Args: - task_id (str): the task id for this task run - cache_key (Optional[str]): the cache key for this Task's cache; if `None`, the task id alone will be used - created_after (datetime.datetime): the earliest date the state should have been created at Returns: - List[State]: a list of Cached states created after the given date """ args = { "where": { "state": {"_eq": "Cached"}, "state_timestamp": {"_gte": created_after.isoformat()}, }, "order_by": {"state_timestamp": EnumValue("desc")}, "limit": 100, } # type: Dict[str, Any] # if a cache key was provided, match it against all tasks if cache_key is not None: args["where"].update({"cache_key": {"_eq": cache_key}}) # otherwise match against only this task, across all cache keys else: args["where"].update({"task_id": {"_eq": task_id}}) query = {"query": {with_args("task_run", args): "serialized_state"}} result = self.graphql(query) # type: Any deserializer = prefect.engine.state.State.deserialize valid_states = [ deserializer(res.serialized_state) for res in result.data.task_run ] return valid_states def get_task_run_info( self, flow_run_id: str, task_id: str, map_index: Optional[int] = None ) -> TaskRunInfoResult: """ Retrieves version and current state information for the given task run. Args: - flow_run_id (str): the id of the flow run that this task run lives in - task_id (str): the task id for this task run - map_index (int, optional): the mapping index for this task run; if `None`, it is assumed this task is _not_ mapped Returns: - NamedTuple: a tuple containing `id, task_id, version, state` Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation": { with_args( "get_or_create_task_run", { "input": { "flow_run_id": flow_run_id, "task_id": task_id, "map_index": -1 if map_index is None else map_index, } }, ): { "id": True, } } } result = self.graphql(mutation) # type: Any if result is None: raise ClientError("Failed to create task run.") task_run_id = result.data.get_or_create_task_run.id query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "version": True, "serialized_state": True, "task": {"slug": True}, } } } task_run = self.graphql(query).data.task_run_by_pk # type: ignore if task_run is None: raise ClientError('Task run ID not found: "{}"'.format(task_run_id)) state = prefect.engine.state.State.deserialize(task_run.serialized_state) return TaskRunInfoResult( id=task_run_id, task_id=task_id, task_slug=task_run.task.slug, version=task_run.version, state=state, ) def set_task_run_name(self, task_run_id: str, name: str) -> bool: """ Set the name of a task run Args: - task_run_id (str): the id of a task run - name (str): a name for this task run Returns: - bool: whether or not the task run name was updated """ mutation = { "mutation($input: set_task_run_name_input!)": { "set_task_run_name(input: $input)": { "success": True, } } } result = self.graphql( mutation, variables=dict(input=dict(task_run_id=task_run_id, name=name)) ) return result.data.set_task_run_name.success def get_task_run_state(self, task_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a task run. Args: - task_run_id (str): the id for this task run Returns: - State: a Prefect State object """ query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "serialized_state": True, } } } task_run = self.graphql(query).data.task_run_by_pk return prefect.engine.state.State.deserialize(task_run.serialized_state) def set_task_run_state( self, task_run_id: str, state: "prefect.engine.state.State", version: int = None, cache_for: datetime.timedelta = None, ) -> "prefect.engine.state.State": """ Sets new state for a task run. Args: - task_run_id (str): the id of the task run to set state for - state (State): the new state for this task run - version (int, optional): the current version of the task run state. This is optional but it can be supplied to enforce version-locking. - cache_for (timedelta, optional): how long to store the result of this task for, using the serializer set in config; if not provided, no caching occurs Raises: - ClientError: if the GraphQL mutation is bad for any reason Returns: - State: the state the current task run should be considered in """ mutation = { "mutation($input: set_task_run_states_input!)": { "set_task_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, task_run_id=task_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_task_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def set_secret(self, name: str, value: Any) -> None: """ Set a secret with the given name and value. Args: - name (str): the name of the secret; used for retrieving the secret during task runs - value (Any): the value of the secret Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the secret-setting was unsuccessful """ mutation = { "mutation($input: set_secret_input!)": { "set_secret(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(name=name, value=value)) ) # type: Any if not result.data.set_secret.success: raise ValueError("Setting secret failed.") def get_task_tag_limit(self, tag: str) -> Optional[int]: """ Retrieve the current task tag concurrency limit for a given tag. Args: - tag (str): the tag to update Raises: - ClientError: if the GraphQL query fails """ query = { "query": { with_args("task_tag_limit", {"where": {"tag": {"_eq": tag}}}): { "limit": True } } } result = self.graphql(query) # type: Any if result.data.task_tag_limit: return result.data.task_tag_limit[0].limit else: return None def update_task_tag_limit(self, tag: str, limit: int) -> None: """ Update the task tag concurrency limit for a given tag; requires tenant admin permissions. Args: - tag (str): the tag to update - limit (int): the concurrency limit to enforce on the tag; should be a value >= 0 Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag limit-setting was unsuccessful, or if a bad limit was provided """ if limit < 0: raise ValueError("Concurrency limits must be >= 0") mutation = { "mutation($input: update_task_tag_limit_input!)": { "update_task_tag_limit(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict(input=dict(tag=tag, limit=limit)) ) # type: Any if not result.data.update_task_tag_limit.id: raise ValueError("Updating the task tag concurrency limit failed.") def delete_task_tag_limit(self, limit_id: str) -> None: """ Deletes a given task tag concurrency limit; requires tenant admin permissions. Args: - limit_id (str): the ID of the tag to delete Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag deletion was unsuccessful, or if a bad tag ID was provided """ mutation = { "mutation($input: delete_task_tag_limit_input!)": { "delete_task_tag_limit(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(limit_id=limit_id)) ) # type: Any if not result.data.delete_task_tag_limit.success: raise ValueError("Deleting the task tag concurrency limit failed.") def write_run_logs(self, logs: List[Dict]) -> None: """ Uploads a collection of logs to Cloud. Args: - logs (List[Dict]): a list of log entries to add Raises: - ValueError: if uploading the logs fail """ mutation = { "mutation($input: write_run_logs_input!)": { "write_run_logs(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(logs=logs)) ) # type: Any if not result.data.write_run_logs.success: raise ValueError("Writing logs failed.") def register_agent( self, agent_type: str, name: str = None, labels: List[str] = None, agent_config_id: str = None, ) -> str: """ Register an agent with a backend API Args: - agent_type (str): The type of agent being registered - name: (str, optional): The name of the agent being registered - labels (List[str], optional): A list of any present labels on the agent being registered - agent_config_id (str, optional): The ID of an agent configuration to register with Returns: - The agent ID as a string """ mutation = { "mutation($input: register_agent_input!)": { "register_agent(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict( input=dict( type=agent_type, name=name, labels=labels or [], tenant_id=self._active_tenant_id, agent_config_id=agent_config_id, ) ), ) if not result.data.register_agent.id: raise ValueError("Error registering agent") return result.data.register_agent.id def get_agent_config(self, agent_config_id: str) -> dict: """ Get agent config settings Args: - agent_config_id (str): The ID of an agent configuration to retrieve Returns: - dict: the agent configuration's `settings` """ query = { "query": { with_args( "agent_config", {"where": {"id": {"_eq": agent_config_id}}} ): {"settings": True} } } result = self.graphql(query) # type: Any return result.data.agent_config[0].settings
def set_flow_run_state( self, flow_run_id: str, state: "prefect.engine.state.State", version: int = None, ) -> "prefect.engine.state.State": """ Sets new state for a flow run in the database. Args: - flow_run_id (str): the id of the flow run to set state for - state (State): the new state for this flow run - version (int, optional): the current version of the flow run state. This is optional but it can be supplied to enforce version-locking. Returns: - State: the state the current flow run should be considered in Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation($input: set_flow_run_states_input!)": { "set_flow_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, flow_run_id=flow_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_flow_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state
1,094
1,150
import datetime import json import os import re import time import uuid import warnings from pathlib import Path from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Union from urllib.parse import urljoin # if simplejson is installed, `requests` defaults to using it instead of json # this allows the client to gracefully handle either json or simplejson try: from simplejson.errors import JSONDecodeError except ImportError: from json.decoder import JSONDecodeError import pendulum import toml from slugify import slugify import prefect from prefect.utilities.exceptions import ( AuthorizationError, ClientError, VersionLockError, ) from prefect.utilities.graphql import ( EnumValue, GraphQLResult, compress, parse_graphql, with_args, ) from prefect.utilities.logging import create_diagnostic_logger if TYPE_CHECKING: from prefect.core import Flow import requests JSONLike = Union[bool, dict, list, str, int, float, None] # type definitions for GraphQL results TaskRunInfoResult = NamedTuple( "TaskRunInfoResult", [ ("id", str), ("task_id", str), ("task_slug", str), ("version", int), ("state", "prefect.engine.state.State"), ], ) FlowRunInfoResult = NamedTuple( "FlowRunInfoResult", [ ("id", str), ("name", str), ("flow_id", str), ("parameters", Dict[str, Any]), ("context", Dict[str, Any]), ("version", int), ("scheduled_start_time", datetime.datetime), ("state", "prefect.engine.state.State"), ("task_runs", List[TaskRunInfoResult]), ], ) class Client: """ Client for communication with Prefect Cloud If the arguments aren't specified the client initialization first checks the prefect configuration and if the server is not set there it checks the current context. The token will only be present in the current context. Args: - api_server (str, optional): the URL to send all GraphQL requests to; if not provided, will be pulled from `cloud.graphql` config var - api_token (str, optional): a Prefect Cloud API token, taken from `config.cloud.auth_token` if not provided. If this token is USER-scoped, it may be used to log in to any tenant that the user is a member of. In that case, ephemeral JWTs will be loaded as necessary. Otherwise, the API token itself will be used as authorization. """ def __init__(self, api_server: str = None, api_token: str = None): self._access_token = None self._refresh_token = None self._access_token_expires_at = pendulum.now() self._active_tenant_id = None self._attached_headers = {} # type: Dict[str, str] self.logger = create_diagnostic_logger("Diagnostics") # store api server self.api_server = api_server or prefect.context.config.cloud.get("graphql") # store api token self._api_token = api_token or prefect.context.config.cloud.get( "auth_token", None ) if prefect.config.backend == "cloud": if not self._api_token: # if no api token was passed, attempt to load state from local storage settings = self._load_local_settings() self._api_token = settings.get("api_token") if self._api_token: self._active_tenant_id = settings.get("active_tenant_id") if self._active_tenant_id: try: self.login_to_tenant(tenant_id=self._active_tenant_id) except AuthorizationError: # if an authorization error is raised, then the token is invalid and should # be cleared self.logout_from_tenant() else: # TODO: Separate put this functionality and clean up initial tenant access handling if not self._active_tenant_id: tenant_info = self.graphql({"query": {"tenant": {"id"}}}) if tenant_info.data.tenant: self._active_tenant_id = tenant_info.data.tenant[0].id def create_tenant(self, name: str, slug: str = None) -> str: """ Creates a new tenant. Note this route only works when run against Prefect Server. Args: - name (str): the name of the tenant to create - slug (str, optional): the slug of the tenant to create; defaults to name Returns: - str: the ID of the newly created tenant, or the ID of the currently active tenant Raises: - ValueError: if run against Prefect Cloud """ if prefect.config.backend != "server": msg = "To create a tenant with Prefect Cloud, please signup at https://cloud.prefect.io/" raise ValueError(msg) if slug is None: slug = slugify(name) tenant_info = self.graphql( { "mutation($input: create_tenant_input!)": { "create_tenant(input: $input)": {"id"} } }, variables=dict(input=dict(name=name, slug=slug)), ) return tenant_info.data.create_tenant.id # ------------------------------------------------------------------------- # Utilities def get( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and GET request Args: - path (str): the path of the API url. For example, to GET http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the GET request to; defaults to `self.api_server` - headers (dict, optional): Headers to pass with the request - params (dict): GET parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="GET", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def post( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and POST request Args: - path (str): the path of the API url. For example, to POST http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the POST request to; defaults to `self.api_server` - headers(dict): headers to pass with the request - params (dict): POST parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="POST", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def graphql( self, query: Any, raise_on_error: bool = True, headers: Dict[str, str] = None, variables: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> GraphQLResult: """ Convenience function for running queries against the Prefect GraphQL API Args: - query (Any): A representation of a graphql query to be executed. It will be parsed by prefect.utilities.graphql.parse_graphql(). - raise_on_error (bool): if True, a `ClientError` will be raised if the GraphQL returns any `errors`. - headers (dict): any additional headers that should be passed as part of the request - variables (dict): Variables to be filled into a query with the key being equivalent to the variables that are accepted by the query - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Data returned from the GraphQL query Raises: - ClientError if there are errors raised by the GraphQL mutation """ result = self.post( path="", server=self.api_server, headers=headers, params=dict(query=parse_graphql(query), variables=json.dumps(variables)), token=token, retry_on_api_error=retry_on_api_error, ) if raise_on_error and "errors" in result: if "UNAUTHENTICATED" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif "Malformed Authorization header" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif ( result["errors"][0].get("extensions", {}).get("code") == "VERSION_LOCKING_ERROR" ): raise VersionLockError(result["errors"]) raise ClientError(result["errors"]) else: return GraphQLResult(result) # type: ignore def _send_request( self, session: "requests.Session", method: str, url: str, params: Dict[str, JSONLike] = None, headers: dict = None, ) -> "requests.models.Response": if prefect.context.config.cloud.get("diagnostics") is True: self.logger.debug(f"Preparing request to {url}") clean_headers = { head: re.sub("Bearer .*", "Bearer XXXX", val) for head, val in headers.items() # type: ignore } self.logger.debug(f"Headers: {clean_headers}") self.logger.debug(f"Request: {params}") start_time = time.time() if method == "GET": response = session.get(url, headers=headers, params=params, timeout=30) elif method == "POST": response = session.post(url, headers=headers, json=params, timeout=30) elif method == "DELETE": response = session.delete(url, headers=headers, timeout=30) else: raise ValueError("Invalid method: {}".format(method)) if prefect.context.config.cloud.get("diagnostics") is True: end_time = time.time() self.logger.debug(f"Response: {response.json()}") self.logger.debug( f"Request duration: {round(end_time - start_time, 4)} seconds" ) # Check if request returned a successful status response.raise_for_status() return response def _request( self, method: str, path: str, params: Dict[str, JSONLike] = None, server: str = None, headers: dict = None, token: str = None, retry_on_api_error: bool = True, ) -> "requests.models.Response": """ Runs any specified request (GET, POST, DELETE) against the server Args: - method (str): The type of request to be made (GET, POST, DELETE) - path (str): Path of the API URL - params (dict, optional): Parameters used for the request - server (str, optional): The server to make requests against, base API server is used if not specified - headers (dict, optional): Headers to pass with the request - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - requests.models.Response: The response returned from the request Raises: - ClientError: if the client token is not in the context (due to not being logged in) - ValueError: if a method is specified outside of the accepted GET, POST, DELETE - requests.HTTPError: if a status code is returned that is not `200` or `401` """ if server is None: server = self.api_server assert isinstance(server, str) # mypy assert if token is None: token = self.get_auth_token() # 'import requests' is expensive time-wise, we should do this just-in-time to keep # the 'import prefect' time low import requests url = urljoin(server, path.lstrip("/")).rstrip("/") params = params or {} headers = headers or {} if token: headers["Authorization"] = "Bearer {}".format(token) headers["X-PREFECT-CORE-VERSION"] = str(prefect.__version__) if self._attached_headers: headers.update(self._attached_headers) session = requests.Session() retry_total = 6 if prefect.config.backend == "cloud" else 1 retries = requests.packages.urllib3.util.retry.Retry( total=retry_total, backoff_factor=1, status_forcelist=[500, 502, 503, 504], method_whitelist=["DELETE", "GET", "POST"], ) session.mount("https://", requests.adapters.HTTPAdapter(max_retries=retries)) response = self._send_request( session=session, method=method, url=url, params=params, headers=headers ) # parse the response try: json_resp = response.json() except JSONDecodeError as exc: if prefect.config.backend == "cloud" and "Authorization" not in headers: raise ClientError( "Malformed response received from Cloud - please ensure that you " "have an API token properly configured." ) from exc else: raise ClientError("Malformed response received from API.") from exc # check if there was an API_ERROR code in the response if "API_ERROR" in str(json_resp.get("errors")) and retry_on_api_error: success, retry_count = False, 0 # retry up to six times while success is False and retry_count < 6: response = self._send_request( session=session, method=method, url=url, params=params, headers=headers, ) if "API_ERROR" in str(response.json().get("errors")): retry_count += 1 time.sleep(0.25 * (2 ** (retry_count - 1))) else: success = True return response def attach_headers(self, headers: dict) -> None: """ Set headers to be attached to this Client Args: - headers (dict): A dictionary of headers to attach to this client. These headers get added on to the existing dictionary of headers. """ self._attached_headers.update(headers) # ------------------------------------------------------------------------- # Auth # ------------------------------------------------------------------------- @property def _local_settings_path(self) -> Path: """ Returns the local settings directory corresponding to the current API servers """ path = "{home}/client/{server}".format( home=prefect.context.config.home_dir, server=slugify(self.api_server, regex_pattern=r"[^-\.a-z0-9]+"), ) return Path(os.path.expanduser(path)) / "settings.toml" def _save_local_settings(self, settings: dict) -> None: """ Writes settings to local storage """ self._local_settings_path.parent.mkdir(exist_ok=True, parents=True) with self._local_settings_path.open("w+") as f: toml.dump(settings, f) def _load_local_settings(self) -> dict: """ Loads settings from local storage """ if self._local_settings_path.exists(): with self._local_settings_path.open("r") as f: return toml.load(f) # type: ignore return {} def save_api_token(self) -> None: """ Saves the API token in local storage. """ settings = self._load_local_settings() settings["api_token"] = self._api_token self._save_local_settings(settings) def get_auth_token(self) -> str: """ Returns an auth token: - if no explicit access token is stored, returns the api token - if there is an access token: - if there's a refresh token and the access token expires in the next 30 seconds, then we refresh the access token and store the result - return the access token Returns: - str: the access token """ if not self._access_token: return self._api_token expiration = self._access_token_expires_at or pendulum.now() if self._refresh_token and pendulum.now().add(seconds=30) > expiration: self._refresh_access_token() return self._access_token def get_available_tenants(self) -> List[Dict]: """ Returns a list of available tenants. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - List[Dict]: a list of dictionaries containing the id, slug, and name of available tenants """ result = self.graphql( {"query": {"tenant(order_by: {slug: asc})": {"id", "slug", "name"}}}, # use the API token to see all available tenants token=self._api_token, ) # type: ignore return result.data.tenant # type: ignore def login_to_tenant(self, tenant_slug: str = None, tenant_id: str = None) -> bool: """ Log in to a specific tenant NOTE: this should only be called by users who have provided a USER-scoped API token. Args: - tenant_slug (str): the tenant's slug - tenant_id (str): the tenant's id Returns: - bool: True if the login was successful Raises: - ValueError: if at least one of `tenant_slug` or `tenant_id` isn't provided - ValueError: if the `tenant_id` is not a valid UUID - ValueError: if no matching tenants are found """ if tenant_slug is None and tenant_id is None: raise ValueError( "At least one of `tenant_slug` or `tenant_id` must be provided." ) elif tenant_id: try: uuid.UUID(tenant_id) except ValueError as exc: raise ValueError("The `tenant_id` must be a valid UUID.") from exc tenant = self.graphql( { "query($slug: String, $id: uuid)": { "tenant(where: {slug: { _eq: $slug }, id: { _eq: $id } })": {"id"} } }, variables=dict(slug=tenant_slug, id=tenant_id), # use the API token to query the tenant token=self._api_token, ) # type: ignore if not tenant.data.tenant: # type: ignore raise ValueError("No matching tenants found.") tenant_id = tenant.data.tenant[0].id # type: ignore if prefect.config.backend == "cloud": payload = self.graphql( { "mutation($input: switch_tenant_input!)": { "switch_tenant(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(tenant_id=tenant_id)), # Use the API token to switch tenants token=self._api_token, ) # type: ignore self._access_token = payload.data.switch_tenant.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.switch_tenant.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.switch_tenant.refresh_token # type: ignore self._active_tenant_id = tenant_id # save the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = self._active_tenant_id self._save_local_settings(settings) return True def logout_from_tenant(self) -> None: self._access_token = None self._refresh_token = None self._active_tenant_id = None # remove the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = None self._save_local_settings(settings) def _refresh_access_token(self) -> bool: """ Refresh the client's JWT access token. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - bool: True if the refresh succeeds """ payload = self.graphql( { "mutation($input: refresh_token_input!)": { "refresh_token(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(access_token=self._access_token)), # pass the refresh token as the auth header token=self._refresh_token, ) # type: ignore self._access_token = payload.data.refresh_token.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.refresh_token.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.refresh_token.refresh_token # type: ignore return True # ------------------------------------------------------------------------- # Actions # ------------------------------------------------------------------------- def register( self, flow: "Flow", project_name: str = None, build: bool = True, set_schedule_active: bool = True, version_group_id: str = None, compressed: bool = True, no_url: bool = False, ) -> str: """ Push a new flow to Prefect Cloud Args: - flow (Flow): a flow to register - project_name (str, optional): the project that should contain this flow. - build (bool, optional): if `True`, the flow's environment is built prior to serialization; defaults to `True` - set_schedule_active (bool, optional): if `False`, will set the schedule to inactive in the database to prevent auto-scheduling runs (if the Flow has a schedule). Defaults to `True`. This can be changed later. - version_group_id (str, optional): the UUID version group ID to use for versioning this Flow in Cloud; if not provided, the version group ID associated with this Flow's project and name will be used. - compressed (bool, optional): if `True`, the serialized flow will be; defaults to `True` compressed - no_url (bool, optional): if `True`, the stdout from this function will not contain the URL link to the newly-registered flow in the Cloud UI Returns: - str: the ID of the newly-registered flow Raises: - ClientError: if the register failed """ required_parameters = {p for p in flow.parameters() if p.required} if flow.schedule is not None and required_parameters: required_names = {p.name for p in required_parameters} if not all( [ required_names <= set(c.parameter_defaults.keys()) for c in flow.schedule.clocks ] ): raise ClientError( "Flows with required parameters can not be scheduled automatically." ) if any(e.key for e in flow.edges) and flow.result is None: warnings.warn( "No result handler was specified on your Flow. Cloud features such as " "input caching and resuming task runs from failure may not work properly.", stacklevel=2, ) if compressed: create_mutation = { "mutation($input: create_flow_from_compressed_string_input!)": { "create_flow_from_compressed_string(input: $input)": {"id"} } } else: create_mutation = { "mutation($input: create_flow_input!)": { "create_flow(input: $input)": {"id"} } } project = None if project_name is None: raise TypeError( "'project_name' is a required field when registering a flow." ) query_project = { "query": { with_args("project", {"where": {"name": {"_eq": project_name}}}): { "id": True } } } project = self.graphql(query_project).data.project # type: ignore if not project: raise ValueError( "Project {} not found. Run `prefect create project '{}'` to create it.".format( project_name, project_name ) ) serialized_flow = flow.serialize(build=build) # type: Any # Set Docker storage image in environment metadata if provided if isinstance(flow.storage, prefect.environments.storage.Docker): flow.environment.metadata["image"] = flow.storage.name serialized_flow = flow.serialize(build=False) # If no image ever set, default metadata to all_extras image on current version if not flow.environment.metadata.get("image"): version = prefect.__version__.split("+")[0] flow.environment.metadata[ "image" ] = f"prefecthq/prefect:all_extras-{version}" serialized_flow = flow.serialize(build=False) # verify that the serialized flow can be deserialized try: prefect.serialization.flow.FlowSchema().load(serialized_flow) except Exception as exc: raise ValueError( "Flow could not be deserialized successfully. Error was: {}".format( repr(exc) ) ) from exc if compressed: serialized_flow = compress(serialized_flow) res = self.graphql( create_mutation, variables=dict( input=dict( project_id=(project[0].id if project else None), serialized_flow=serialized_flow, set_schedule_active=set_schedule_active, version_group_id=version_group_id, ) ), retry_on_api_error=False, ) # type: Any flow_id = ( res.data.create_flow_from_compressed_string.id if compressed else res.data.create_flow.id ) if not no_url: # Generate direct link to Cloud flow flow_url = self.get_cloud_url("flow", flow_id) prefix = "└── " print("Flow URL: {}".format(flow_url)) # Extra information to improve visibility msg = ( f" {prefix}ID: {flow_id}\n" f" {prefix}Project: {project_name}\n" f" {prefix}Labels: {list(flow.environment.labels)}" ) print(msg) return flow_id def get_cloud_url(self, subdirectory: str, id: str, as_user: bool = True) -> str: """ Convenience method for creating Prefect Cloud URLs for a given subdirectory. Args: - subdirectory (str): the subdirectory to use (e.g., `"flow-run"`) - id (str): the ID of the page - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the URL corresponding to the appropriate base URL, tenant slug, subdirectory and ID Example: ```python from prefect import Client client = Client() client.get_cloud_url("flow-run", "424242-ca-94611-111-55") # returns "https://cloud.prefect.io/my-tenant-slug/flow-run/424242-ca-94611-111-55" ``` """ # Generate direct link to UI if prefect.config.backend == "cloud": tenant_slug = self.get_default_tenant_slug(as_user=as_user) else: tenant_slug = "" base_url = ( re.sub("api-", "", prefect.config.cloud.api) if re.search("api-", prefect.config.cloud.api) else re.sub("api", "cloud", prefect.config.cloud.api) ) full_url = prefect.config.cloud.api if tenant_slug: full_url = "/".join([base_url.rstrip("/"), tenant_slug, subdirectory, id]) elif prefect.config.backend == "server": full_url = "/".join([prefect.config.server.ui.endpoint, subdirectory, id]) return full_url def get_default_tenant_slug(self, as_user: bool = True) -> str: """ Get the default tenant slug for the currently authenticated user Args: - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the slug of the current default tenant for this user """ if as_user: query = { "query": {"user": {"default_membership": {"tenant": "slug"}}} } # type: dict else: query = {"query": {"tenant": {"slug"}}} res = self.graphql(query) if as_user: user = res.get("data").user[0] slug = user.default_membership.tenant.slug else: slug = res.get("data").tenant[0].slug return slug def create_project(self, project_name: str, project_description: str = None) -> str: """ Create a new Project Args: - project_name (str): the project that should contain this flow - project_description (str, optional): the project description Returns: - str: the ID of the newly-created project Raises: - ClientError: if the project creation failed """ project_mutation = { "mutation($input: create_project_input!)": { "create_project(input: $input)": {"id"} } } res = self.graphql( project_mutation, variables=dict( input=dict( name=project_name, description=project_description, tenant_id=self._active_tenant_id, ) ), ) # type: Any return res.data.create_project.id def create_flow_run( self, flow_id: str = None, context: dict = None, parameters: dict = None, scheduled_start_time: datetime.datetime = None, idempotency_key: str = None, run_name: str = None, version_group_id: str = None, ) -> str: """ Create a new flow run for the given flow id. If `start_time` is not provided, the flow run will be scheduled to start immediately. If both `flow_id` and `version_group_id` are provided, only the `flow_id` will be used. Args: - flow_id (str, optional): the id of the Flow you wish to schedule - context (dict, optional): the run context - parameters (dict, optional): a dictionary of parameter values to pass to the flow run - scheduled_start_time (datetime, optional): the time to schedule the execution for; if not provided, defaults to now - idempotency_key (str, optional): an idempotency key; if provided, this run will be cached for 24 hours. Any subsequent attempts to create a run with the same idempotency key will return the ID of the originally created run (no new run will be created after the first). An error will be raised if parameters or context are provided and don't match the original. Each subsequent request will reset the TTL for 24 hours. - run_name (str, optional): The name assigned to this flow run - version_group_id (str, optional): if provided, the unique unarchived flow within this version group will be scheduled to run. This input can be used as a stable API for running flows which are regularly updated. Returns: - str: the ID of the newly-created flow run Raises: - ClientError: if the GraphQL query is bad for any reason """ create_mutation = { "mutation($input: create_flow_run_input!)": { "create_flow_run(input: $input)": {"id": True} } } if not flow_id and not version_group_id: raise ValueError("One of flow_id or version_group_id must be provided") if flow_id: inputs = dict(flow_id=flow_id) else: inputs = dict(version_group_id=version_group_id) # type: ignore if parameters is not None: inputs.update(parameters=parameters) # type: ignore if context is not None: inputs.update(context=context) # type: ignore if idempotency_key is not None: inputs.update(idempotency_key=idempotency_key) # type: ignore if scheduled_start_time is not None: inputs.update( scheduled_start_time=scheduled_start_time.isoformat() ) # type: ignore if run_name is not None: inputs.update(flow_run_name=run_name) # type: ignore res = self.graphql(create_mutation, variables=dict(input=inputs)) return res.data.create_flow_run.id # type: ignore def get_flow_run_info(self, flow_run_id: str) -> FlowRunInfoResult: """ Retrieves version and current state information for the given flow run. Args: - flow_run_id (str): the id of the flow run to get information for Returns: - GraphQLResult: an object representing information about the flow run Raises: - ClientError: if the GraphQL mutation is bad for any reason """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "id": True, "name": True, "flow_id": True, "parameters": True, "context": True, "version": True, "scheduled_start_time": True, "serialized_state": True, # load all task runs except dynamic task runs with_args("task_runs", {"where": {"map_index": {"_eq": -1}}}): { "id": True, "task": {"id": True, "slug": True}, "version": True, "serialized_state": True, }, } } } result = self.graphql(query).data.flow_run_by_pk # type: ignore if result is None: raise ClientError('Flow run ID not found: "{}"'.format(flow_run_id)) # convert scheduled_start_time from string to datetime result.scheduled_start_time = pendulum.parse(result.scheduled_start_time) # create "state" attribute from serialized_state result.state = prefect.engine.state.State.deserialize( result.pop("serialized_state") ) # reformat task_runs task_runs = [] for tr in result.task_runs: tr.state = prefect.engine.state.State.deserialize( tr.pop("serialized_state") ) task_info = tr.pop("task") tr.task_id = task_info["id"] tr.task_slug = task_info["slug"] task_runs.append(TaskRunInfoResult(**tr)) result.task_runs = task_runs result.context = ( result.context.to_dict() if result.context is not None else None ) result.parameters = ( result.parameters.to_dict() if result.parameters is not None else None ) return FlowRunInfoResult(**result) def update_flow_run_heartbeat(self, flow_run_id: str) -> None: """ Convenience method for heartbeating a flow run. Does NOT raise an error if the update fails. Args: - flow_run_id (str): the flow run ID to heartbeat """ mutation = { "mutation": { with_args( "update_flow_run_heartbeat", {"input": {"flow_run_id": flow_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def update_task_run_heartbeat(self, task_run_id: str) -> None: """ Convenience method for heartbeating a task run. Does NOT raise an error if the update fails. Args: - task_run_id (str): the task run ID to heartbeat """ mutation = { "mutation": { with_args( "update_task_run_heartbeat", {"input": {"task_run_id": task_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def get_flow_run_state(self, flow_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a flow run. Args: - flow_run_id (str): the id for this flow run Returns: - State: a Prefect State object """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "serialized_state": True, } } } flow_run = self.graphql(query).data.flow_run_by_pk return prefect.engine.state.State.deserialize(flow_run.serialized_state) def set_flow_run_state( self, flow_run_id: str, state: "prefect.engine.state.State", version: int = None, ) -> "prefect.engine.state.State": """ Sets new state for a flow run in the database. Args: - flow_run_id (str): the id of the flow run to set state for - state (State): the new state for this flow run - version (int, optional): the current version of the flow run state. This is optional but it can be supplied to enforce version-locking. Returns: - State: the state the current flow run should be considered in Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation($input: set_flow_run_states_input!)": { "set_flow_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, flow_run_id=flow_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_flow_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def get_latest_cached_states( self, task_id: str, cache_key: Optional[str], created_after: datetime.datetime ) -> List["prefect.engine.state.State"]: """ Pulls all Cached states for the given task that were created after the provided date. Args: - task_id (str): the task id for this task run - cache_key (Optional[str]): the cache key for this Task's cache; if `None`, the task id alone will be used - created_after (datetime.datetime): the earliest date the state should have been created at Returns: - List[State]: a list of Cached states created after the given date """ args = { "where": { "state": {"_eq": "Cached"}, "state_timestamp": {"_gte": created_after.isoformat()}, }, "order_by": {"state_timestamp": EnumValue("desc")}, "limit": 100, } # type: Dict[str, Any] # if a cache key was provided, match it against all tasks if cache_key is not None: args["where"].update({"cache_key": {"_eq": cache_key}}) # otherwise match against only this task, across all cache keys else: args["where"].update({"task_id": {"_eq": task_id}}) query = {"query": {with_args("task_run", args): "serialized_state"}} result = self.graphql(query) # type: Any deserializer = prefect.engine.state.State.deserialize valid_states = [ deserializer(res.serialized_state) for res in result.data.task_run ] return valid_states def get_task_run_info( self, flow_run_id: str, task_id: str, map_index: Optional[int] = None ) -> TaskRunInfoResult: """ Retrieves version and current state information for the given task run. Args: - flow_run_id (str): the id of the flow run that this task run lives in - task_id (str): the task id for this task run - map_index (int, optional): the mapping index for this task run; if `None`, it is assumed this task is _not_ mapped Returns: - NamedTuple: a tuple containing `id, task_id, version, state` Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation": { with_args( "get_or_create_task_run", { "input": { "flow_run_id": flow_run_id, "task_id": task_id, "map_index": -1 if map_index is None else map_index, } }, ): { "id": True, } } } result = self.graphql(mutation) # type: Any if result is None: raise ClientError("Failed to create task run.") task_run_id = result.data.get_or_create_task_run.id query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "version": True, "serialized_state": True, "task": {"slug": True}, } } } task_run = self.graphql(query).data.task_run_by_pk # type: ignore if task_run is None: raise ClientError('Task run ID not found: "{}"'.format(task_run_id)) state = prefect.engine.state.State.deserialize(task_run.serialized_state) return TaskRunInfoResult( id=task_run_id, task_id=task_id, task_slug=task_run.task.slug, version=task_run.version, state=state, ) def set_task_run_name(self, task_run_id: str, name: str) -> bool: """ Set the name of a task run Args: - task_run_id (str): the id of a task run - name (str): a name for this task run Returns: - bool: whether or not the task run name was updated """ mutation = { "mutation($input: set_task_run_name_input!)": { "set_task_run_name(input: $input)": { "success": True, } } } result = self.graphql( mutation, variables=dict(input=dict(task_run_id=task_run_id, name=name)) ) return result.data.set_task_run_name.success def get_task_run_state(self, task_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a task run. Args: - task_run_id (str): the id for this task run Returns: - State: a Prefect State object """ query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "serialized_state": True, } } } task_run = self.graphql(query).data.task_run_by_pk return prefect.engine.state.State.deserialize(task_run.serialized_state) def set_task_run_state( self, task_run_id: str, state: "prefect.engine.state.State", version: int = None, cache_for: datetime.timedelta = None, ) -> "prefect.engine.state.State": """ Sets new state for a task run. Args: - task_run_id (str): the id of the task run to set state for - state (State): the new state for this task run - version (int, optional): the current version of the task run state. This is optional but it can be supplied to enforce version-locking. - cache_for (timedelta, optional): how long to store the result of this task for, using the serializer set in config; if not provided, no caching occurs Raises: - ClientError: if the GraphQL mutation is bad for any reason Returns: - State: the state the current task run should be considered in """ mutation = { "mutation($input: set_task_run_states_input!)": { "set_task_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, task_run_id=task_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_task_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def set_secret(self, name: str, value: Any) -> None: """ Set a secret with the given name and value. Args: - name (str): the name of the secret; used for retrieving the secret during task runs - value (Any): the value of the secret Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the secret-setting was unsuccessful """ mutation = { "mutation($input: set_secret_input!)": { "set_secret(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(name=name, value=value)) ) # type: Any if not result.data.set_secret.success: raise ValueError("Setting secret failed.") def get_task_tag_limit(self, tag: str) -> Optional[int]: """ Retrieve the current task tag concurrency limit for a given tag. Args: - tag (str): the tag to update Raises: - ClientError: if the GraphQL query fails """ query = { "query": { with_args("task_tag_limit", {"where": {"tag": {"_eq": tag}}}): { "limit": True } } } result = self.graphql(query) # type: Any if result.data.task_tag_limit: return result.data.task_tag_limit[0].limit else: return None def update_task_tag_limit(self, tag: str, limit: int) -> None: """ Update the task tag concurrency limit for a given tag; requires tenant admin permissions. Args: - tag (str): the tag to update - limit (int): the concurrency limit to enforce on the tag; should be a value >= 0 Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag limit-setting was unsuccessful, or if a bad limit was provided """ if limit < 0: raise ValueError("Concurrency limits must be >= 0") mutation = { "mutation($input: update_task_tag_limit_input!)": { "update_task_tag_limit(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict(input=dict(tag=tag, limit=limit)) ) # type: Any if not result.data.update_task_tag_limit.id: raise ValueError("Updating the task tag concurrency limit failed.") def delete_task_tag_limit(self, limit_id: str) -> None: """ Deletes a given task tag concurrency limit; requires tenant admin permissions. Args: - limit_id (str): the ID of the tag to delete Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag deletion was unsuccessful, or if a bad tag ID was provided """ mutation = { "mutation($input: delete_task_tag_limit_input!)": { "delete_task_tag_limit(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(limit_id=limit_id)) ) # type: Any if not result.data.delete_task_tag_limit.success: raise ValueError("Deleting the task tag concurrency limit failed.") def write_run_logs(self, logs: List[Dict]) -> None: """ Uploads a collection of logs to Cloud. Args: - logs (List[Dict]): a list of log entries to add Raises: - ValueError: if uploading the logs fail """ mutation = { "mutation($input: write_run_logs_input!)": { "write_run_logs(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(logs=logs)) ) # type: Any if not result.data.write_run_logs.success: raise ValueError("Writing logs failed.") def register_agent( self, agent_type: str, name: str = None, labels: List[str] = None, agent_config_id: str = None, ) -> str: """ Register an agent with a backend API Args: - agent_type (str): The type of agent being registered - name: (str, optional): The name of the agent being registered - labels (List[str], optional): A list of any present labels on the agent being registered - agent_config_id (str, optional): The ID of an agent configuration to register with Returns: - The agent ID as a string """ mutation = { "mutation($input: register_agent_input!)": { "register_agent(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict( input=dict( type=agent_type, name=name, labels=labels or [], tenant_id=self._active_tenant_id, agent_config_id=agent_config_id, ) ), ) if not result.data.register_agent.id: raise ValueError("Error registering agent") return result.data.register_agent.id def get_agent_config(self, agent_config_id: str) -> dict: """ Get agent config settings Args: - agent_config_id (str): The ID of an agent configuration to retrieve Returns: - dict: the agent configuration's `settings` """ query = { "query": { with_args( "agent_config", {"where": {"id": {"_eq": agent_config_id}}} ): {"settings": True} } } result = self.graphql(query) # type: Any return result.data.agent_config[0].settings
get_task_run_info
Retrieves version and current state information for the given task run. Args: - flow_run_id (str): the id of the flow run that this task run lives in - task_id (str): the task id for this task run - map_index (int, optional): the mapping index for this task run; if `None`, it is assumed this task is _not_ mapped Returns: - NamedTuple: a tuple containing `id, task_id, version, state` Raises: - ClientError: if the GraphQL mutation is bad for any reason
import datetime import json import os import re import time import uuid import warnings from pathlib import Path from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Union from urllib.parse import urljoin # if simplejson is installed, `requests` defaults to using it instead of json # this allows the client to gracefully handle either json or simplejson try: from simplejson.errors import JSONDecodeError except ImportError: from json.decoder import JSONDecodeError import pendulum import toml from slugify import slugify import prefect from prefect.utilities.exceptions import ( AuthorizationError, ClientError, VersionLockError, ) from prefect.utilities.graphql import ( EnumValue, GraphQLResult, compress, parse_graphql, with_args, ) from prefect.utilities.logging import create_diagnostic_logger if TYPE_CHECKING: from prefect.core import Flow import requests JSONLike = Union[bool, dict, list, str, int, float, None] # type definitions for GraphQL results TaskRunInfoResult = NamedTuple( "TaskRunInfoResult", [ ("id", str), ("task_id", str), ("task_slug", str), ("version", int), ("state", "prefect.engine.state.State"), ], ) FlowRunInfoResult = NamedTuple( "FlowRunInfoResult", [ ("id", str), ("name", str), ("flow_id", str), ("parameters", Dict[str, Any]), ("context", Dict[str, Any]), ("version", int), ("scheduled_start_time", datetime.datetime), ("state", "prefect.engine.state.State"), ("task_runs", List[TaskRunInfoResult]), ], ) class Client: """ Client for communication with Prefect Cloud If the arguments aren't specified the client initialization first checks the prefect configuration and if the server is not set there it checks the current context. The token will only be present in the current context. Args: - api_server (str, optional): the URL to send all GraphQL requests to; if not provided, will be pulled from `cloud.graphql` config var - api_token (str, optional): a Prefect Cloud API token, taken from `config.cloud.auth_token` if not provided. If this token is USER-scoped, it may be used to log in to any tenant that the user is a member of. In that case, ephemeral JWTs will be loaded as necessary. Otherwise, the API token itself will be used as authorization. """ def __init__(self, api_server: str = None, api_token: str = None): self._access_token = None self._refresh_token = None self._access_token_expires_at = pendulum.now() self._active_tenant_id = None self._attached_headers = {} # type: Dict[str, str] self.logger = create_diagnostic_logger("Diagnostics") # store api server self.api_server = api_server or prefect.context.config.cloud.get("graphql") # store api token self._api_token = api_token or prefect.context.config.cloud.get( "auth_token", None ) if prefect.config.backend == "cloud": if not self._api_token: # if no api token was passed, attempt to load state from local storage settings = self._load_local_settings() self._api_token = settings.get("api_token") if self._api_token: self._active_tenant_id = settings.get("active_tenant_id") if self._active_tenant_id: try: self.login_to_tenant(tenant_id=self._active_tenant_id) except AuthorizationError: # if an authorization error is raised, then the token is invalid and should # be cleared self.logout_from_tenant() else: # TODO: Separate put this functionality and clean up initial tenant access handling if not self._active_tenant_id: tenant_info = self.graphql({"query": {"tenant": {"id"}}}) if tenant_info.data.tenant: self._active_tenant_id = tenant_info.data.tenant[0].id def create_tenant(self, name: str, slug: str = None) -> str: """ Creates a new tenant. Note this route only works when run against Prefect Server. Args: - name (str): the name of the tenant to create - slug (str, optional): the slug of the tenant to create; defaults to name Returns: - str: the ID of the newly created tenant, or the ID of the currently active tenant Raises: - ValueError: if run against Prefect Cloud """ if prefect.config.backend != "server": msg = "To create a tenant with Prefect Cloud, please signup at https://cloud.prefect.io/" raise ValueError(msg) if slug is None: slug = slugify(name) tenant_info = self.graphql( { "mutation($input: create_tenant_input!)": { "create_tenant(input: $input)": {"id"} } }, variables=dict(input=dict(name=name, slug=slug)), ) return tenant_info.data.create_tenant.id # ------------------------------------------------------------------------- # Utilities def get( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and GET request Args: - path (str): the path of the API url. For example, to GET http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the GET request to; defaults to `self.api_server` - headers (dict, optional): Headers to pass with the request - params (dict): GET parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="GET", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def post( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and POST request Args: - path (str): the path of the API url. For example, to POST http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the POST request to; defaults to `self.api_server` - headers(dict): headers to pass with the request - params (dict): POST parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="POST", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def graphql( self, query: Any, raise_on_error: bool = True, headers: Dict[str, str] = None, variables: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> GraphQLResult: """ Convenience function for running queries against the Prefect GraphQL API Args: - query (Any): A representation of a graphql query to be executed. It will be parsed by prefect.utilities.graphql.parse_graphql(). - raise_on_error (bool): if True, a `ClientError` will be raised if the GraphQL returns any `errors`. - headers (dict): any additional headers that should be passed as part of the request - variables (dict): Variables to be filled into a query with the key being equivalent to the variables that are accepted by the query - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Data returned from the GraphQL query Raises: - ClientError if there are errors raised by the GraphQL mutation """ result = self.post( path="", server=self.api_server, headers=headers, params=dict(query=parse_graphql(query), variables=json.dumps(variables)), token=token, retry_on_api_error=retry_on_api_error, ) if raise_on_error and "errors" in result: if "UNAUTHENTICATED" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif "Malformed Authorization header" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif ( result["errors"][0].get("extensions", {}).get("code") == "VERSION_LOCKING_ERROR" ): raise VersionLockError(result["errors"]) raise ClientError(result["errors"]) else: return GraphQLResult(result) # type: ignore def _send_request( self, session: "requests.Session", method: str, url: str, params: Dict[str, JSONLike] = None, headers: dict = None, ) -> "requests.models.Response": if prefect.context.config.cloud.get("diagnostics") is True: self.logger.debug(f"Preparing request to {url}") clean_headers = { head: re.sub("Bearer .*", "Bearer XXXX", val) for head, val in headers.items() # type: ignore } self.logger.debug(f"Headers: {clean_headers}") self.logger.debug(f"Request: {params}") start_time = time.time() if method == "GET": response = session.get(url, headers=headers, params=params, timeout=30) elif method == "POST": response = session.post(url, headers=headers, json=params, timeout=30) elif method == "DELETE": response = session.delete(url, headers=headers, timeout=30) else: raise ValueError("Invalid method: {}".format(method)) if prefect.context.config.cloud.get("diagnostics") is True: end_time = time.time() self.logger.debug(f"Response: {response.json()}") self.logger.debug( f"Request duration: {round(end_time - start_time, 4)} seconds" ) # Check if request returned a successful status response.raise_for_status() return response def _request( self, method: str, path: str, params: Dict[str, JSONLike] = None, server: str = None, headers: dict = None, token: str = None, retry_on_api_error: bool = True, ) -> "requests.models.Response": """ Runs any specified request (GET, POST, DELETE) against the server Args: - method (str): The type of request to be made (GET, POST, DELETE) - path (str): Path of the API URL - params (dict, optional): Parameters used for the request - server (str, optional): The server to make requests against, base API server is used if not specified - headers (dict, optional): Headers to pass with the request - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - requests.models.Response: The response returned from the request Raises: - ClientError: if the client token is not in the context (due to not being logged in) - ValueError: if a method is specified outside of the accepted GET, POST, DELETE - requests.HTTPError: if a status code is returned that is not `200` or `401` """ if server is None: server = self.api_server assert isinstance(server, str) # mypy assert if token is None: token = self.get_auth_token() # 'import requests' is expensive time-wise, we should do this just-in-time to keep # the 'import prefect' time low import requests url = urljoin(server, path.lstrip("/")).rstrip("/") params = params or {} headers = headers or {} if token: headers["Authorization"] = "Bearer {}".format(token) headers["X-PREFECT-CORE-VERSION"] = str(prefect.__version__) if self._attached_headers: headers.update(self._attached_headers) session = requests.Session() retry_total = 6 if prefect.config.backend == "cloud" else 1 retries = requests.packages.urllib3.util.retry.Retry( total=retry_total, backoff_factor=1, status_forcelist=[500, 502, 503, 504], method_whitelist=["DELETE", "GET", "POST"], ) session.mount("https://", requests.adapters.HTTPAdapter(max_retries=retries)) response = self._send_request( session=session, method=method, url=url, params=params, headers=headers ) # parse the response try: json_resp = response.json() except JSONDecodeError as exc: if prefect.config.backend == "cloud" and "Authorization" not in headers: raise ClientError( "Malformed response received from Cloud - please ensure that you " "have an API token properly configured." ) from exc else: raise ClientError("Malformed response received from API.") from exc # check if there was an API_ERROR code in the response if "API_ERROR" in str(json_resp.get("errors")) and retry_on_api_error: success, retry_count = False, 0 # retry up to six times while success is False and retry_count < 6: response = self._send_request( session=session, method=method, url=url, params=params, headers=headers, ) if "API_ERROR" in str(response.json().get("errors")): retry_count += 1 time.sleep(0.25 * (2 ** (retry_count - 1))) else: success = True return response def attach_headers(self, headers: dict) -> None: """ Set headers to be attached to this Client Args: - headers (dict): A dictionary of headers to attach to this client. These headers get added on to the existing dictionary of headers. """ self._attached_headers.update(headers) # ------------------------------------------------------------------------- # Auth # ------------------------------------------------------------------------- @property def _local_settings_path(self) -> Path: """ Returns the local settings directory corresponding to the current API servers """ path = "{home}/client/{server}".format( home=prefect.context.config.home_dir, server=slugify(self.api_server, regex_pattern=r"[^-\.a-z0-9]+"), ) return Path(os.path.expanduser(path)) / "settings.toml" def _save_local_settings(self, settings: dict) -> None: """ Writes settings to local storage """ self._local_settings_path.parent.mkdir(exist_ok=True, parents=True) with self._local_settings_path.open("w+") as f: toml.dump(settings, f) def _load_local_settings(self) -> dict: """ Loads settings from local storage """ if self._local_settings_path.exists(): with self._local_settings_path.open("r") as f: return toml.load(f) # type: ignore return {} def save_api_token(self) -> None: """ Saves the API token in local storage. """ settings = self._load_local_settings() settings["api_token"] = self._api_token self._save_local_settings(settings) def get_auth_token(self) -> str: """ Returns an auth token: - if no explicit access token is stored, returns the api token - if there is an access token: - if there's a refresh token and the access token expires in the next 30 seconds, then we refresh the access token and store the result - return the access token Returns: - str: the access token """ if not self._access_token: return self._api_token expiration = self._access_token_expires_at or pendulum.now() if self._refresh_token and pendulum.now().add(seconds=30) > expiration: self._refresh_access_token() return self._access_token def get_available_tenants(self) -> List[Dict]: """ Returns a list of available tenants. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - List[Dict]: a list of dictionaries containing the id, slug, and name of available tenants """ result = self.graphql( {"query": {"tenant(order_by: {slug: asc})": {"id", "slug", "name"}}}, # use the API token to see all available tenants token=self._api_token, ) # type: ignore return result.data.tenant # type: ignore def login_to_tenant(self, tenant_slug: str = None, tenant_id: str = None) -> bool: """ Log in to a specific tenant NOTE: this should only be called by users who have provided a USER-scoped API token. Args: - tenant_slug (str): the tenant's slug - tenant_id (str): the tenant's id Returns: - bool: True if the login was successful Raises: - ValueError: if at least one of `tenant_slug` or `tenant_id` isn't provided - ValueError: if the `tenant_id` is not a valid UUID - ValueError: if no matching tenants are found """ if tenant_slug is None and tenant_id is None: raise ValueError( "At least one of `tenant_slug` or `tenant_id` must be provided." ) elif tenant_id: try: uuid.UUID(tenant_id) except ValueError as exc: raise ValueError("The `tenant_id` must be a valid UUID.") from exc tenant = self.graphql( { "query($slug: String, $id: uuid)": { "tenant(where: {slug: { _eq: $slug }, id: { _eq: $id } })": {"id"} } }, variables=dict(slug=tenant_slug, id=tenant_id), # use the API token to query the tenant token=self._api_token, ) # type: ignore if not tenant.data.tenant: # type: ignore raise ValueError("No matching tenants found.") tenant_id = tenant.data.tenant[0].id # type: ignore if prefect.config.backend == "cloud": payload = self.graphql( { "mutation($input: switch_tenant_input!)": { "switch_tenant(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(tenant_id=tenant_id)), # Use the API token to switch tenants token=self._api_token, ) # type: ignore self._access_token = payload.data.switch_tenant.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.switch_tenant.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.switch_tenant.refresh_token # type: ignore self._active_tenant_id = tenant_id # save the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = self._active_tenant_id self._save_local_settings(settings) return True def logout_from_tenant(self) -> None: self._access_token = None self._refresh_token = None self._active_tenant_id = None # remove the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = None self._save_local_settings(settings) def _refresh_access_token(self) -> bool: """ Refresh the client's JWT access token. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - bool: True if the refresh succeeds """ payload = self.graphql( { "mutation($input: refresh_token_input!)": { "refresh_token(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(access_token=self._access_token)), # pass the refresh token as the auth header token=self._refresh_token, ) # type: ignore self._access_token = payload.data.refresh_token.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.refresh_token.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.refresh_token.refresh_token # type: ignore return True # ------------------------------------------------------------------------- # Actions # ------------------------------------------------------------------------- def register( self, flow: "Flow", project_name: str = None, build: bool = True, set_schedule_active: bool = True, version_group_id: str = None, compressed: bool = True, no_url: bool = False, ) -> str: """ Push a new flow to Prefect Cloud Args: - flow (Flow): a flow to register - project_name (str, optional): the project that should contain this flow. - build (bool, optional): if `True`, the flow's environment is built prior to serialization; defaults to `True` - set_schedule_active (bool, optional): if `False`, will set the schedule to inactive in the database to prevent auto-scheduling runs (if the Flow has a schedule). Defaults to `True`. This can be changed later. - version_group_id (str, optional): the UUID version group ID to use for versioning this Flow in Cloud; if not provided, the version group ID associated with this Flow's project and name will be used. - compressed (bool, optional): if `True`, the serialized flow will be; defaults to `True` compressed - no_url (bool, optional): if `True`, the stdout from this function will not contain the URL link to the newly-registered flow in the Cloud UI Returns: - str: the ID of the newly-registered flow Raises: - ClientError: if the register failed """ required_parameters = {p for p in flow.parameters() if p.required} if flow.schedule is not None and required_parameters: required_names = {p.name for p in required_parameters} if not all( [ required_names <= set(c.parameter_defaults.keys()) for c in flow.schedule.clocks ] ): raise ClientError( "Flows with required parameters can not be scheduled automatically." ) if any(e.key for e in flow.edges) and flow.result is None: warnings.warn( "No result handler was specified on your Flow. Cloud features such as " "input caching and resuming task runs from failure may not work properly.", stacklevel=2, ) if compressed: create_mutation = { "mutation($input: create_flow_from_compressed_string_input!)": { "create_flow_from_compressed_string(input: $input)": {"id"} } } else: create_mutation = { "mutation($input: create_flow_input!)": { "create_flow(input: $input)": {"id"} } } project = None if project_name is None: raise TypeError( "'project_name' is a required field when registering a flow." ) query_project = { "query": { with_args("project", {"where": {"name": {"_eq": project_name}}}): { "id": True } } } project = self.graphql(query_project).data.project # type: ignore if not project: raise ValueError( "Project {} not found. Run `prefect create project '{}'` to create it.".format( project_name, project_name ) ) serialized_flow = flow.serialize(build=build) # type: Any # Set Docker storage image in environment metadata if provided if isinstance(flow.storage, prefect.environments.storage.Docker): flow.environment.metadata["image"] = flow.storage.name serialized_flow = flow.serialize(build=False) # If no image ever set, default metadata to all_extras image on current version if not flow.environment.metadata.get("image"): version = prefect.__version__.split("+")[0] flow.environment.metadata[ "image" ] = f"prefecthq/prefect:all_extras-{version}" serialized_flow = flow.serialize(build=False) # verify that the serialized flow can be deserialized try: prefect.serialization.flow.FlowSchema().load(serialized_flow) except Exception as exc: raise ValueError( "Flow could not be deserialized successfully. Error was: {}".format( repr(exc) ) ) from exc if compressed: serialized_flow = compress(serialized_flow) res = self.graphql( create_mutation, variables=dict( input=dict( project_id=(project[0].id if project else None), serialized_flow=serialized_flow, set_schedule_active=set_schedule_active, version_group_id=version_group_id, ) ), retry_on_api_error=False, ) # type: Any flow_id = ( res.data.create_flow_from_compressed_string.id if compressed else res.data.create_flow.id ) if not no_url: # Generate direct link to Cloud flow flow_url = self.get_cloud_url("flow", flow_id) prefix = "└── " print("Flow URL: {}".format(flow_url)) # Extra information to improve visibility msg = ( f" {prefix}ID: {flow_id}\n" f" {prefix}Project: {project_name}\n" f" {prefix}Labels: {list(flow.environment.labels)}" ) print(msg) return flow_id def get_cloud_url(self, subdirectory: str, id: str, as_user: bool = True) -> str: """ Convenience method for creating Prefect Cloud URLs for a given subdirectory. Args: - subdirectory (str): the subdirectory to use (e.g., `"flow-run"`) - id (str): the ID of the page - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the URL corresponding to the appropriate base URL, tenant slug, subdirectory and ID Example: ```python from prefect import Client client = Client() client.get_cloud_url("flow-run", "424242-ca-94611-111-55") # returns "https://cloud.prefect.io/my-tenant-slug/flow-run/424242-ca-94611-111-55" ``` """ # Generate direct link to UI if prefect.config.backend == "cloud": tenant_slug = self.get_default_tenant_slug(as_user=as_user) else: tenant_slug = "" base_url = ( re.sub("api-", "", prefect.config.cloud.api) if re.search("api-", prefect.config.cloud.api) else re.sub("api", "cloud", prefect.config.cloud.api) ) full_url = prefect.config.cloud.api if tenant_slug: full_url = "/".join([base_url.rstrip("/"), tenant_slug, subdirectory, id]) elif prefect.config.backend == "server": full_url = "/".join([prefect.config.server.ui.endpoint, subdirectory, id]) return full_url def get_default_tenant_slug(self, as_user: bool = True) -> str: """ Get the default tenant slug for the currently authenticated user Args: - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the slug of the current default tenant for this user """ if as_user: query = { "query": {"user": {"default_membership": {"tenant": "slug"}}} } # type: dict else: query = {"query": {"tenant": {"slug"}}} res = self.graphql(query) if as_user: user = res.get("data").user[0] slug = user.default_membership.tenant.slug else: slug = res.get("data").tenant[0].slug return slug def create_project(self, project_name: str, project_description: str = None) -> str: """ Create a new Project Args: - project_name (str): the project that should contain this flow - project_description (str, optional): the project description Returns: - str: the ID of the newly-created project Raises: - ClientError: if the project creation failed """ project_mutation = { "mutation($input: create_project_input!)": { "create_project(input: $input)": {"id"} } } res = self.graphql( project_mutation, variables=dict( input=dict( name=project_name, description=project_description, tenant_id=self._active_tenant_id, ) ), ) # type: Any return res.data.create_project.id def create_flow_run( self, flow_id: str = None, context: dict = None, parameters: dict = None, scheduled_start_time: datetime.datetime = None, idempotency_key: str = None, run_name: str = None, version_group_id: str = None, ) -> str: """ Create a new flow run for the given flow id. If `start_time` is not provided, the flow run will be scheduled to start immediately. If both `flow_id` and `version_group_id` are provided, only the `flow_id` will be used. Args: - flow_id (str, optional): the id of the Flow you wish to schedule - context (dict, optional): the run context - parameters (dict, optional): a dictionary of parameter values to pass to the flow run - scheduled_start_time (datetime, optional): the time to schedule the execution for; if not provided, defaults to now - idempotency_key (str, optional): an idempotency key; if provided, this run will be cached for 24 hours. Any subsequent attempts to create a run with the same idempotency key will return the ID of the originally created run (no new run will be created after the first). An error will be raised if parameters or context are provided and don't match the original. Each subsequent request will reset the TTL for 24 hours. - run_name (str, optional): The name assigned to this flow run - version_group_id (str, optional): if provided, the unique unarchived flow within this version group will be scheduled to run. This input can be used as a stable API for running flows which are regularly updated. Returns: - str: the ID of the newly-created flow run Raises: - ClientError: if the GraphQL query is bad for any reason """ create_mutation = { "mutation($input: create_flow_run_input!)": { "create_flow_run(input: $input)": {"id": True} } } if not flow_id and not version_group_id: raise ValueError("One of flow_id or version_group_id must be provided") if flow_id: inputs = dict(flow_id=flow_id) else: inputs = dict(version_group_id=version_group_id) # type: ignore if parameters is not None: inputs.update(parameters=parameters) # type: ignore if context is not None: inputs.update(context=context) # type: ignore if idempotency_key is not None: inputs.update(idempotency_key=idempotency_key) # type: ignore if scheduled_start_time is not None: inputs.update( scheduled_start_time=scheduled_start_time.isoformat() ) # type: ignore if run_name is not None: inputs.update(flow_run_name=run_name) # type: ignore res = self.graphql(create_mutation, variables=dict(input=inputs)) return res.data.create_flow_run.id # type: ignore def get_flow_run_info(self, flow_run_id: str) -> FlowRunInfoResult: """ Retrieves version and current state information for the given flow run. Args: - flow_run_id (str): the id of the flow run to get information for Returns: - GraphQLResult: an object representing information about the flow run Raises: - ClientError: if the GraphQL mutation is bad for any reason """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "id": True, "name": True, "flow_id": True, "parameters": True, "context": True, "version": True, "scheduled_start_time": True, "serialized_state": True, # load all task runs except dynamic task runs with_args("task_runs", {"where": {"map_index": {"_eq": -1}}}): { "id": True, "task": {"id": True, "slug": True}, "version": True, "serialized_state": True, }, } } } result = self.graphql(query).data.flow_run_by_pk # type: ignore if result is None: raise ClientError('Flow run ID not found: "{}"'.format(flow_run_id)) # convert scheduled_start_time from string to datetime result.scheduled_start_time = pendulum.parse(result.scheduled_start_time) # create "state" attribute from serialized_state result.state = prefect.engine.state.State.deserialize( result.pop("serialized_state") ) # reformat task_runs task_runs = [] for tr in result.task_runs: tr.state = prefect.engine.state.State.deserialize( tr.pop("serialized_state") ) task_info = tr.pop("task") tr.task_id = task_info["id"] tr.task_slug = task_info["slug"] task_runs.append(TaskRunInfoResult(**tr)) result.task_runs = task_runs result.context = ( result.context.to_dict() if result.context is not None else None ) result.parameters = ( result.parameters.to_dict() if result.parameters is not None else None ) return FlowRunInfoResult(**result) def update_flow_run_heartbeat(self, flow_run_id: str) -> None: """ Convenience method for heartbeating a flow run. Does NOT raise an error if the update fails. Args: - flow_run_id (str): the flow run ID to heartbeat """ mutation = { "mutation": { with_args( "update_flow_run_heartbeat", {"input": {"flow_run_id": flow_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def update_task_run_heartbeat(self, task_run_id: str) -> None: """ Convenience method for heartbeating a task run. Does NOT raise an error if the update fails. Args: - task_run_id (str): the task run ID to heartbeat """ mutation = { "mutation": { with_args( "update_task_run_heartbeat", {"input": {"task_run_id": task_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def get_flow_run_state(self, flow_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a flow run. Args: - flow_run_id (str): the id for this flow run Returns: - State: a Prefect State object """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "serialized_state": True, } } } flow_run = self.graphql(query).data.flow_run_by_pk return prefect.engine.state.State.deserialize(flow_run.serialized_state) def set_flow_run_state( self, flow_run_id: str, state: "prefect.engine.state.State", version: int = None, ) -> "prefect.engine.state.State": """ Sets new state for a flow run in the database. Args: - flow_run_id (str): the id of the flow run to set state for - state (State): the new state for this flow run - version (int, optional): the current version of the flow run state. This is optional but it can be supplied to enforce version-locking. Returns: - State: the state the current flow run should be considered in Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation($input: set_flow_run_states_input!)": { "set_flow_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, flow_run_id=flow_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_flow_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def get_latest_cached_states( self, task_id: str, cache_key: Optional[str], created_after: datetime.datetime ) -> List["prefect.engine.state.State"]: """ Pulls all Cached states for the given task that were created after the provided date. Args: - task_id (str): the task id for this task run - cache_key (Optional[str]): the cache key for this Task's cache; if `None`, the task id alone will be used - created_after (datetime.datetime): the earliest date the state should have been created at Returns: - List[State]: a list of Cached states created after the given date """ args = { "where": { "state": {"_eq": "Cached"}, "state_timestamp": {"_gte": created_after.isoformat()}, }, "order_by": {"state_timestamp": EnumValue("desc")}, "limit": 100, } # type: Dict[str, Any] # if a cache key was provided, match it against all tasks if cache_key is not None: args["where"].update({"cache_key": {"_eq": cache_key}}) # otherwise match against only this task, across all cache keys else: args["where"].update({"task_id": {"_eq": task_id}}) query = {"query": {with_args("task_run", args): "serialized_state"}} result = self.graphql(query) # type: Any deserializer = prefect.engine.state.State.deserialize valid_states = [ deserializer(res.serialized_state) for res in result.data.task_run ] return valid_states # MASKED: get_task_run_info function (lines 1192-1255) def set_task_run_name(self, task_run_id: str, name: str) -> bool: """ Set the name of a task run Args: - task_run_id (str): the id of a task run - name (str): a name for this task run Returns: - bool: whether or not the task run name was updated """ mutation = { "mutation($input: set_task_run_name_input!)": { "set_task_run_name(input: $input)": { "success": True, } } } result = self.graphql( mutation, variables=dict(input=dict(task_run_id=task_run_id, name=name)) ) return result.data.set_task_run_name.success def get_task_run_state(self, task_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a task run. Args: - task_run_id (str): the id for this task run Returns: - State: a Prefect State object """ query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "serialized_state": True, } } } task_run = self.graphql(query).data.task_run_by_pk return prefect.engine.state.State.deserialize(task_run.serialized_state) def set_task_run_state( self, task_run_id: str, state: "prefect.engine.state.State", version: int = None, cache_for: datetime.timedelta = None, ) -> "prefect.engine.state.State": """ Sets new state for a task run. Args: - task_run_id (str): the id of the task run to set state for - state (State): the new state for this task run - version (int, optional): the current version of the task run state. This is optional but it can be supplied to enforce version-locking. - cache_for (timedelta, optional): how long to store the result of this task for, using the serializer set in config; if not provided, no caching occurs Raises: - ClientError: if the GraphQL mutation is bad for any reason Returns: - State: the state the current task run should be considered in """ mutation = { "mutation($input: set_task_run_states_input!)": { "set_task_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, task_run_id=task_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_task_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def set_secret(self, name: str, value: Any) -> None: """ Set a secret with the given name and value. Args: - name (str): the name of the secret; used for retrieving the secret during task runs - value (Any): the value of the secret Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the secret-setting was unsuccessful """ mutation = { "mutation($input: set_secret_input!)": { "set_secret(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(name=name, value=value)) ) # type: Any if not result.data.set_secret.success: raise ValueError("Setting secret failed.") def get_task_tag_limit(self, tag: str) -> Optional[int]: """ Retrieve the current task tag concurrency limit for a given tag. Args: - tag (str): the tag to update Raises: - ClientError: if the GraphQL query fails """ query = { "query": { with_args("task_tag_limit", {"where": {"tag": {"_eq": tag}}}): { "limit": True } } } result = self.graphql(query) # type: Any if result.data.task_tag_limit: return result.data.task_tag_limit[0].limit else: return None def update_task_tag_limit(self, tag: str, limit: int) -> None: """ Update the task tag concurrency limit for a given tag; requires tenant admin permissions. Args: - tag (str): the tag to update - limit (int): the concurrency limit to enforce on the tag; should be a value >= 0 Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag limit-setting was unsuccessful, or if a bad limit was provided """ if limit < 0: raise ValueError("Concurrency limits must be >= 0") mutation = { "mutation($input: update_task_tag_limit_input!)": { "update_task_tag_limit(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict(input=dict(tag=tag, limit=limit)) ) # type: Any if not result.data.update_task_tag_limit.id: raise ValueError("Updating the task tag concurrency limit failed.") def delete_task_tag_limit(self, limit_id: str) -> None: """ Deletes a given task tag concurrency limit; requires tenant admin permissions. Args: - limit_id (str): the ID of the tag to delete Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag deletion was unsuccessful, or if a bad tag ID was provided """ mutation = { "mutation($input: delete_task_tag_limit_input!)": { "delete_task_tag_limit(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(limit_id=limit_id)) ) # type: Any if not result.data.delete_task_tag_limit.success: raise ValueError("Deleting the task tag concurrency limit failed.") def write_run_logs(self, logs: List[Dict]) -> None: """ Uploads a collection of logs to Cloud. Args: - logs (List[Dict]): a list of log entries to add Raises: - ValueError: if uploading the logs fail """ mutation = { "mutation($input: write_run_logs_input!)": { "write_run_logs(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(logs=logs)) ) # type: Any if not result.data.write_run_logs.success: raise ValueError("Writing logs failed.") def register_agent( self, agent_type: str, name: str = None, labels: List[str] = None, agent_config_id: str = None, ) -> str: """ Register an agent with a backend API Args: - agent_type (str): The type of agent being registered - name: (str, optional): The name of the agent being registered - labels (List[str], optional): A list of any present labels on the agent being registered - agent_config_id (str, optional): The ID of an agent configuration to register with Returns: - The agent ID as a string """ mutation = { "mutation($input: register_agent_input!)": { "register_agent(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict( input=dict( type=agent_type, name=name, labels=labels or [], tenant_id=self._active_tenant_id, agent_config_id=agent_config_id, ) ), ) if not result.data.register_agent.id: raise ValueError("Error registering agent") return result.data.register_agent.id def get_agent_config(self, agent_config_id: str) -> dict: """ Get agent config settings Args: - agent_config_id (str): The ID of an agent configuration to retrieve Returns: - dict: the agent configuration's `settings` """ query = { "query": { with_args( "agent_config", {"where": {"id": {"_eq": agent_config_id}}} ): {"settings": True} } } result = self.graphql(query) # type: Any return result.data.agent_config[0].settings
def get_task_run_info( self, flow_run_id: str, task_id: str, map_index: Optional[int] = None ) -> TaskRunInfoResult: """ Retrieves version and current state information for the given task run. Args: - flow_run_id (str): the id of the flow run that this task run lives in - task_id (str): the task id for this task run - map_index (int, optional): the mapping index for this task run; if `None`, it is assumed this task is _not_ mapped Returns: - NamedTuple: a tuple containing `id, task_id, version, state` Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation": { with_args( "get_or_create_task_run", { "input": { "flow_run_id": flow_run_id, "task_id": task_id, "map_index": -1 if map_index is None else map_index, } }, ): { "id": True, } } } result = self.graphql(mutation) # type: Any if result is None: raise ClientError("Failed to create task run.") task_run_id = result.data.get_or_create_task_run.id query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "version": True, "serialized_state": True, "task": {"slug": True}, } } } task_run = self.graphql(query).data.task_run_by_pk # type: ignore if task_run is None: raise ClientError('Task run ID not found: "{}"'.format(task_run_id)) state = prefect.engine.state.State.deserialize(task_run.serialized_state) return TaskRunInfoResult( id=task_run_id, task_id=task_id, task_slug=task_run.task.slug, version=task_run.version, state=state, )
1,192
1,255
import datetime import json import os import re import time import uuid import warnings from pathlib import Path from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Union from urllib.parse import urljoin # if simplejson is installed, `requests` defaults to using it instead of json # this allows the client to gracefully handle either json or simplejson try: from simplejson.errors import JSONDecodeError except ImportError: from json.decoder import JSONDecodeError import pendulum import toml from slugify import slugify import prefect from prefect.utilities.exceptions import ( AuthorizationError, ClientError, VersionLockError, ) from prefect.utilities.graphql import ( EnumValue, GraphQLResult, compress, parse_graphql, with_args, ) from prefect.utilities.logging import create_diagnostic_logger if TYPE_CHECKING: from prefect.core import Flow import requests JSONLike = Union[bool, dict, list, str, int, float, None] # type definitions for GraphQL results TaskRunInfoResult = NamedTuple( "TaskRunInfoResult", [ ("id", str), ("task_id", str), ("task_slug", str), ("version", int), ("state", "prefect.engine.state.State"), ], ) FlowRunInfoResult = NamedTuple( "FlowRunInfoResult", [ ("id", str), ("name", str), ("flow_id", str), ("parameters", Dict[str, Any]), ("context", Dict[str, Any]), ("version", int), ("scheduled_start_time", datetime.datetime), ("state", "prefect.engine.state.State"), ("task_runs", List[TaskRunInfoResult]), ], ) class Client: """ Client for communication with Prefect Cloud If the arguments aren't specified the client initialization first checks the prefect configuration and if the server is not set there it checks the current context. The token will only be present in the current context. Args: - api_server (str, optional): the URL to send all GraphQL requests to; if not provided, will be pulled from `cloud.graphql` config var - api_token (str, optional): a Prefect Cloud API token, taken from `config.cloud.auth_token` if not provided. If this token is USER-scoped, it may be used to log in to any tenant that the user is a member of. In that case, ephemeral JWTs will be loaded as necessary. Otherwise, the API token itself will be used as authorization. """ def __init__(self, api_server: str = None, api_token: str = None): self._access_token = None self._refresh_token = None self._access_token_expires_at = pendulum.now() self._active_tenant_id = None self._attached_headers = {} # type: Dict[str, str] self.logger = create_diagnostic_logger("Diagnostics") # store api server self.api_server = api_server or prefect.context.config.cloud.get("graphql") # store api token self._api_token = api_token or prefect.context.config.cloud.get( "auth_token", None ) if prefect.config.backend == "cloud": if not self._api_token: # if no api token was passed, attempt to load state from local storage settings = self._load_local_settings() self._api_token = settings.get("api_token") if self._api_token: self._active_tenant_id = settings.get("active_tenant_id") if self._active_tenant_id: try: self.login_to_tenant(tenant_id=self._active_tenant_id) except AuthorizationError: # if an authorization error is raised, then the token is invalid and should # be cleared self.logout_from_tenant() else: # TODO: Separate put this functionality and clean up initial tenant access handling if not self._active_tenant_id: tenant_info = self.graphql({"query": {"tenant": {"id"}}}) if tenant_info.data.tenant: self._active_tenant_id = tenant_info.data.tenant[0].id def create_tenant(self, name: str, slug: str = None) -> str: """ Creates a new tenant. Note this route only works when run against Prefect Server. Args: - name (str): the name of the tenant to create - slug (str, optional): the slug of the tenant to create; defaults to name Returns: - str: the ID of the newly created tenant, or the ID of the currently active tenant Raises: - ValueError: if run against Prefect Cloud """ if prefect.config.backend != "server": msg = "To create a tenant with Prefect Cloud, please signup at https://cloud.prefect.io/" raise ValueError(msg) if slug is None: slug = slugify(name) tenant_info = self.graphql( { "mutation($input: create_tenant_input!)": { "create_tenant(input: $input)": {"id"} } }, variables=dict(input=dict(name=name, slug=slug)), ) return tenant_info.data.create_tenant.id # ------------------------------------------------------------------------- # Utilities def get( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and GET request Args: - path (str): the path of the API url. For example, to GET http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the GET request to; defaults to `self.api_server` - headers (dict, optional): Headers to pass with the request - params (dict): GET parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="GET", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def post( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and POST request Args: - path (str): the path of the API url. For example, to POST http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the POST request to; defaults to `self.api_server` - headers(dict): headers to pass with the request - params (dict): POST parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="POST", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def graphql( self, query: Any, raise_on_error: bool = True, headers: Dict[str, str] = None, variables: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> GraphQLResult: """ Convenience function for running queries against the Prefect GraphQL API Args: - query (Any): A representation of a graphql query to be executed. It will be parsed by prefect.utilities.graphql.parse_graphql(). - raise_on_error (bool): if True, a `ClientError` will be raised if the GraphQL returns any `errors`. - headers (dict): any additional headers that should be passed as part of the request - variables (dict): Variables to be filled into a query with the key being equivalent to the variables that are accepted by the query - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Data returned from the GraphQL query Raises: - ClientError if there are errors raised by the GraphQL mutation """ result = self.post( path="", server=self.api_server, headers=headers, params=dict(query=parse_graphql(query), variables=json.dumps(variables)), token=token, retry_on_api_error=retry_on_api_error, ) if raise_on_error and "errors" in result: if "UNAUTHENTICATED" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif "Malformed Authorization header" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif ( result["errors"][0].get("extensions", {}).get("code") == "VERSION_LOCKING_ERROR" ): raise VersionLockError(result["errors"]) raise ClientError(result["errors"]) else: return GraphQLResult(result) # type: ignore def _send_request( self, session: "requests.Session", method: str, url: str, params: Dict[str, JSONLike] = None, headers: dict = None, ) -> "requests.models.Response": if prefect.context.config.cloud.get("diagnostics") is True: self.logger.debug(f"Preparing request to {url}") clean_headers = { head: re.sub("Bearer .*", "Bearer XXXX", val) for head, val in headers.items() # type: ignore } self.logger.debug(f"Headers: {clean_headers}") self.logger.debug(f"Request: {params}") start_time = time.time() if method == "GET": response = session.get(url, headers=headers, params=params, timeout=30) elif method == "POST": response = session.post(url, headers=headers, json=params, timeout=30) elif method == "DELETE": response = session.delete(url, headers=headers, timeout=30) else: raise ValueError("Invalid method: {}".format(method)) if prefect.context.config.cloud.get("diagnostics") is True: end_time = time.time() self.logger.debug(f"Response: {response.json()}") self.logger.debug( f"Request duration: {round(end_time - start_time, 4)} seconds" ) # Check if request returned a successful status response.raise_for_status() return response def _request( self, method: str, path: str, params: Dict[str, JSONLike] = None, server: str = None, headers: dict = None, token: str = None, retry_on_api_error: bool = True, ) -> "requests.models.Response": """ Runs any specified request (GET, POST, DELETE) against the server Args: - method (str): The type of request to be made (GET, POST, DELETE) - path (str): Path of the API URL - params (dict, optional): Parameters used for the request - server (str, optional): The server to make requests against, base API server is used if not specified - headers (dict, optional): Headers to pass with the request - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - requests.models.Response: The response returned from the request Raises: - ClientError: if the client token is not in the context (due to not being logged in) - ValueError: if a method is specified outside of the accepted GET, POST, DELETE - requests.HTTPError: if a status code is returned that is not `200` or `401` """ if server is None: server = self.api_server assert isinstance(server, str) # mypy assert if token is None: token = self.get_auth_token() # 'import requests' is expensive time-wise, we should do this just-in-time to keep # the 'import prefect' time low import requests url = urljoin(server, path.lstrip("/")).rstrip("/") params = params or {} headers = headers or {} if token: headers["Authorization"] = "Bearer {}".format(token) headers["X-PREFECT-CORE-VERSION"] = str(prefect.__version__) if self._attached_headers: headers.update(self._attached_headers) session = requests.Session() retry_total = 6 if prefect.config.backend == "cloud" else 1 retries = requests.packages.urllib3.util.retry.Retry( total=retry_total, backoff_factor=1, status_forcelist=[500, 502, 503, 504], method_whitelist=["DELETE", "GET", "POST"], ) session.mount("https://", requests.adapters.HTTPAdapter(max_retries=retries)) response = self._send_request( session=session, method=method, url=url, params=params, headers=headers ) # parse the response try: json_resp = response.json() except JSONDecodeError as exc: if prefect.config.backend == "cloud" and "Authorization" not in headers: raise ClientError( "Malformed response received from Cloud - please ensure that you " "have an API token properly configured." ) from exc else: raise ClientError("Malformed response received from API.") from exc # check if there was an API_ERROR code in the response if "API_ERROR" in str(json_resp.get("errors")) and retry_on_api_error: success, retry_count = False, 0 # retry up to six times while success is False and retry_count < 6: response = self._send_request( session=session, method=method, url=url, params=params, headers=headers, ) if "API_ERROR" in str(response.json().get("errors")): retry_count += 1 time.sleep(0.25 * (2 ** (retry_count - 1))) else: success = True return response def attach_headers(self, headers: dict) -> None: """ Set headers to be attached to this Client Args: - headers (dict): A dictionary of headers to attach to this client. These headers get added on to the existing dictionary of headers. """ self._attached_headers.update(headers) # ------------------------------------------------------------------------- # Auth # ------------------------------------------------------------------------- @property def _local_settings_path(self) -> Path: """ Returns the local settings directory corresponding to the current API servers """ path = "{home}/client/{server}".format( home=prefect.context.config.home_dir, server=slugify(self.api_server, regex_pattern=r"[^-\.a-z0-9]+"), ) return Path(os.path.expanduser(path)) / "settings.toml" def _save_local_settings(self, settings: dict) -> None: """ Writes settings to local storage """ self._local_settings_path.parent.mkdir(exist_ok=True, parents=True) with self._local_settings_path.open("w+") as f: toml.dump(settings, f) def _load_local_settings(self) -> dict: """ Loads settings from local storage """ if self._local_settings_path.exists(): with self._local_settings_path.open("r") as f: return toml.load(f) # type: ignore return {} def save_api_token(self) -> None: """ Saves the API token in local storage. """ settings = self._load_local_settings() settings["api_token"] = self._api_token self._save_local_settings(settings) def get_auth_token(self) -> str: """ Returns an auth token: - if no explicit access token is stored, returns the api token - if there is an access token: - if there's a refresh token and the access token expires in the next 30 seconds, then we refresh the access token and store the result - return the access token Returns: - str: the access token """ if not self._access_token: return self._api_token expiration = self._access_token_expires_at or pendulum.now() if self._refresh_token and pendulum.now().add(seconds=30) > expiration: self._refresh_access_token() return self._access_token def get_available_tenants(self) -> List[Dict]: """ Returns a list of available tenants. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - List[Dict]: a list of dictionaries containing the id, slug, and name of available tenants """ result = self.graphql( {"query": {"tenant(order_by: {slug: asc})": {"id", "slug", "name"}}}, # use the API token to see all available tenants token=self._api_token, ) # type: ignore return result.data.tenant # type: ignore def login_to_tenant(self, tenant_slug: str = None, tenant_id: str = None) -> bool: """ Log in to a specific tenant NOTE: this should only be called by users who have provided a USER-scoped API token. Args: - tenant_slug (str): the tenant's slug - tenant_id (str): the tenant's id Returns: - bool: True if the login was successful Raises: - ValueError: if at least one of `tenant_slug` or `tenant_id` isn't provided - ValueError: if the `tenant_id` is not a valid UUID - ValueError: if no matching tenants are found """ if tenant_slug is None and tenant_id is None: raise ValueError( "At least one of `tenant_slug` or `tenant_id` must be provided." ) elif tenant_id: try: uuid.UUID(tenant_id) except ValueError as exc: raise ValueError("The `tenant_id` must be a valid UUID.") from exc tenant = self.graphql( { "query($slug: String, $id: uuid)": { "tenant(where: {slug: { _eq: $slug }, id: { _eq: $id } })": {"id"} } }, variables=dict(slug=tenant_slug, id=tenant_id), # use the API token to query the tenant token=self._api_token, ) # type: ignore if not tenant.data.tenant: # type: ignore raise ValueError("No matching tenants found.") tenant_id = tenant.data.tenant[0].id # type: ignore if prefect.config.backend == "cloud": payload = self.graphql( { "mutation($input: switch_tenant_input!)": { "switch_tenant(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(tenant_id=tenant_id)), # Use the API token to switch tenants token=self._api_token, ) # type: ignore self._access_token = payload.data.switch_tenant.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.switch_tenant.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.switch_tenant.refresh_token # type: ignore self._active_tenant_id = tenant_id # save the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = self._active_tenant_id self._save_local_settings(settings) return True def logout_from_tenant(self) -> None: self._access_token = None self._refresh_token = None self._active_tenant_id = None # remove the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = None self._save_local_settings(settings) def _refresh_access_token(self) -> bool: """ Refresh the client's JWT access token. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - bool: True if the refresh succeeds """ payload = self.graphql( { "mutation($input: refresh_token_input!)": { "refresh_token(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(access_token=self._access_token)), # pass the refresh token as the auth header token=self._refresh_token, ) # type: ignore self._access_token = payload.data.refresh_token.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.refresh_token.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.refresh_token.refresh_token # type: ignore return True # ------------------------------------------------------------------------- # Actions # ------------------------------------------------------------------------- def register( self, flow: "Flow", project_name: str = None, build: bool = True, set_schedule_active: bool = True, version_group_id: str = None, compressed: bool = True, no_url: bool = False, ) -> str: """ Push a new flow to Prefect Cloud Args: - flow (Flow): a flow to register - project_name (str, optional): the project that should contain this flow. - build (bool, optional): if `True`, the flow's environment is built prior to serialization; defaults to `True` - set_schedule_active (bool, optional): if `False`, will set the schedule to inactive in the database to prevent auto-scheduling runs (if the Flow has a schedule). Defaults to `True`. This can be changed later. - version_group_id (str, optional): the UUID version group ID to use for versioning this Flow in Cloud; if not provided, the version group ID associated with this Flow's project and name will be used. - compressed (bool, optional): if `True`, the serialized flow will be; defaults to `True` compressed - no_url (bool, optional): if `True`, the stdout from this function will not contain the URL link to the newly-registered flow in the Cloud UI Returns: - str: the ID of the newly-registered flow Raises: - ClientError: if the register failed """ required_parameters = {p for p in flow.parameters() if p.required} if flow.schedule is not None and required_parameters: required_names = {p.name for p in required_parameters} if not all( [ required_names <= set(c.parameter_defaults.keys()) for c in flow.schedule.clocks ] ): raise ClientError( "Flows with required parameters can not be scheduled automatically." ) if any(e.key for e in flow.edges) and flow.result is None: warnings.warn( "No result handler was specified on your Flow. Cloud features such as " "input caching and resuming task runs from failure may not work properly.", stacklevel=2, ) if compressed: create_mutation = { "mutation($input: create_flow_from_compressed_string_input!)": { "create_flow_from_compressed_string(input: $input)": {"id"} } } else: create_mutation = { "mutation($input: create_flow_input!)": { "create_flow(input: $input)": {"id"} } } project = None if project_name is None: raise TypeError( "'project_name' is a required field when registering a flow." ) query_project = { "query": { with_args("project", {"where": {"name": {"_eq": project_name}}}): { "id": True } } } project = self.graphql(query_project).data.project # type: ignore if not project: raise ValueError( "Project {} not found. Run `prefect create project '{}'` to create it.".format( project_name, project_name ) ) serialized_flow = flow.serialize(build=build) # type: Any # Set Docker storage image in environment metadata if provided if isinstance(flow.storage, prefect.environments.storage.Docker): flow.environment.metadata["image"] = flow.storage.name serialized_flow = flow.serialize(build=False) # If no image ever set, default metadata to all_extras image on current version if not flow.environment.metadata.get("image"): version = prefect.__version__.split("+")[0] flow.environment.metadata[ "image" ] = f"prefecthq/prefect:all_extras-{version}" serialized_flow = flow.serialize(build=False) # verify that the serialized flow can be deserialized try: prefect.serialization.flow.FlowSchema().load(serialized_flow) except Exception as exc: raise ValueError( "Flow could not be deserialized successfully. Error was: {}".format( repr(exc) ) ) from exc if compressed: serialized_flow = compress(serialized_flow) res = self.graphql( create_mutation, variables=dict( input=dict( project_id=(project[0].id if project else None), serialized_flow=serialized_flow, set_schedule_active=set_schedule_active, version_group_id=version_group_id, ) ), retry_on_api_error=False, ) # type: Any flow_id = ( res.data.create_flow_from_compressed_string.id if compressed else res.data.create_flow.id ) if not no_url: # Generate direct link to Cloud flow flow_url = self.get_cloud_url("flow", flow_id) prefix = "└── " print("Flow URL: {}".format(flow_url)) # Extra information to improve visibility msg = ( f" {prefix}ID: {flow_id}\n" f" {prefix}Project: {project_name}\n" f" {prefix}Labels: {list(flow.environment.labels)}" ) print(msg) return flow_id def get_cloud_url(self, subdirectory: str, id: str, as_user: bool = True) -> str: """ Convenience method for creating Prefect Cloud URLs for a given subdirectory. Args: - subdirectory (str): the subdirectory to use (e.g., `"flow-run"`) - id (str): the ID of the page - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the URL corresponding to the appropriate base URL, tenant slug, subdirectory and ID Example: ```python from prefect import Client client = Client() client.get_cloud_url("flow-run", "424242-ca-94611-111-55") # returns "https://cloud.prefect.io/my-tenant-slug/flow-run/424242-ca-94611-111-55" ``` """ # Generate direct link to UI if prefect.config.backend == "cloud": tenant_slug = self.get_default_tenant_slug(as_user=as_user) else: tenant_slug = "" base_url = ( re.sub("api-", "", prefect.config.cloud.api) if re.search("api-", prefect.config.cloud.api) else re.sub("api", "cloud", prefect.config.cloud.api) ) full_url = prefect.config.cloud.api if tenant_slug: full_url = "/".join([base_url.rstrip("/"), tenant_slug, subdirectory, id]) elif prefect.config.backend == "server": full_url = "/".join([prefect.config.server.ui.endpoint, subdirectory, id]) return full_url def get_default_tenant_slug(self, as_user: bool = True) -> str: """ Get the default tenant slug for the currently authenticated user Args: - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the slug of the current default tenant for this user """ if as_user: query = { "query": {"user": {"default_membership": {"tenant": "slug"}}} } # type: dict else: query = {"query": {"tenant": {"slug"}}} res = self.graphql(query) if as_user: user = res.get("data").user[0] slug = user.default_membership.tenant.slug else: slug = res.get("data").tenant[0].slug return slug def create_project(self, project_name: str, project_description: str = None) -> str: """ Create a new Project Args: - project_name (str): the project that should contain this flow - project_description (str, optional): the project description Returns: - str: the ID of the newly-created project Raises: - ClientError: if the project creation failed """ project_mutation = { "mutation($input: create_project_input!)": { "create_project(input: $input)": {"id"} } } res = self.graphql( project_mutation, variables=dict( input=dict( name=project_name, description=project_description, tenant_id=self._active_tenant_id, ) ), ) # type: Any return res.data.create_project.id def create_flow_run( self, flow_id: str = None, context: dict = None, parameters: dict = None, scheduled_start_time: datetime.datetime = None, idempotency_key: str = None, run_name: str = None, version_group_id: str = None, ) -> str: """ Create a new flow run for the given flow id. If `start_time` is not provided, the flow run will be scheduled to start immediately. If both `flow_id` and `version_group_id` are provided, only the `flow_id` will be used. Args: - flow_id (str, optional): the id of the Flow you wish to schedule - context (dict, optional): the run context - parameters (dict, optional): a dictionary of parameter values to pass to the flow run - scheduled_start_time (datetime, optional): the time to schedule the execution for; if not provided, defaults to now - idempotency_key (str, optional): an idempotency key; if provided, this run will be cached for 24 hours. Any subsequent attempts to create a run with the same idempotency key will return the ID of the originally created run (no new run will be created after the first). An error will be raised if parameters or context are provided and don't match the original. Each subsequent request will reset the TTL for 24 hours. - run_name (str, optional): The name assigned to this flow run - version_group_id (str, optional): if provided, the unique unarchived flow within this version group will be scheduled to run. This input can be used as a stable API for running flows which are regularly updated. Returns: - str: the ID of the newly-created flow run Raises: - ClientError: if the GraphQL query is bad for any reason """ create_mutation = { "mutation($input: create_flow_run_input!)": { "create_flow_run(input: $input)": {"id": True} } } if not flow_id and not version_group_id: raise ValueError("One of flow_id or version_group_id must be provided") if flow_id: inputs = dict(flow_id=flow_id) else: inputs = dict(version_group_id=version_group_id) # type: ignore if parameters is not None: inputs.update(parameters=parameters) # type: ignore if context is not None: inputs.update(context=context) # type: ignore if idempotency_key is not None: inputs.update(idempotency_key=idempotency_key) # type: ignore if scheduled_start_time is not None: inputs.update( scheduled_start_time=scheduled_start_time.isoformat() ) # type: ignore if run_name is not None: inputs.update(flow_run_name=run_name) # type: ignore res = self.graphql(create_mutation, variables=dict(input=inputs)) return res.data.create_flow_run.id # type: ignore def get_flow_run_info(self, flow_run_id: str) -> FlowRunInfoResult: """ Retrieves version and current state information for the given flow run. Args: - flow_run_id (str): the id of the flow run to get information for Returns: - GraphQLResult: an object representing information about the flow run Raises: - ClientError: if the GraphQL mutation is bad for any reason """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "id": True, "name": True, "flow_id": True, "parameters": True, "context": True, "version": True, "scheduled_start_time": True, "serialized_state": True, # load all task runs except dynamic task runs with_args("task_runs", {"where": {"map_index": {"_eq": -1}}}): { "id": True, "task": {"id": True, "slug": True}, "version": True, "serialized_state": True, }, } } } result = self.graphql(query).data.flow_run_by_pk # type: ignore if result is None: raise ClientError('Flow run ID not found: "{}"'.format(flow_run_id)) # convert scheduled_start_time from string to datetime result.scheduled_start_time = pendulum.parse(result.scheduled_start_time) # create "state" attribute from serialized_state result.state = prefect.engine.state.State.deserialize( result.pop("serialized_state") ) # reformat task_runs task_runs = [] for tr in result.task_runs: tr.state = prefect.engine.state.State.deserialize( tr.pop("serialized_state") ) task_info = tr.pop("task") tr.task_id = task_info["id"] tr.task_slug = task_info["slug"] task_runs.append(TaskRunInfoResult(**tr)) result.task_runs = task_runs result.context = ( result.context.to_dict() if result.context is not None else None ) result.parameters = ( result.parameters.to_dict() if result.parameters is not None else None ) return FlowRunInfoResult(**result) def update_flow_run_heartbeat(self, flow_run_id: str) -> None: """ Convenience method for heartbeating a flow run. Does NOT raise an error if the update fails. Args: - flow_run_id (str): the flow run ID to heartbeat """ mutation = { "mutation": { with_args( "update_flow_run_heartbeat", {"input": {"flow_run_id": flow_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def update_task_run_heartbeat(self, task_run_id: str) -> None: """ Convenience method for heartbeating a task run. Does NOT raise an error if the update fails. Args: - task_run_id (str): the task run ID to heartbeat """ mutation = { "mutation": { with_args( "update_task_run_heartbeat", {"input": {"task_run_id": task_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def get_flow_run_state(self, flow_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a flow run. Args: - flow_run_id (str): the id for this flow run Returns: - State: a Prefect State object """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "serialized_state": True, } } } flow_run = self.graphql(query).data.flow_run_by_pk return prefect.engine.state.State.deserialize(flow_run.serialized_state) def set_flow_run_state( self, flow_run_id: str, state: "prefect.engine.state.State", version: int = None, ) -> "prefect.engine.state.State": """ Sets new state for a flow run in the database. Args: - flow_run_id (str): the id of the flow run to set state for - state (State): the new state for this flow run - version (int, optional): the current version of the flow run state. This is optional but it can be supplied to enforce version-locking. Returns: - State: the state the current flow run should be considered in Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation($input: set_flow_run_states_input!)": { "set_flow_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, flow_run_id=flow_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_flow_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def get_latest_cached_states( self, task_id: str, cache_key: Optional[str], created_after: datetime.datetime ) -> List["prefect.engine.state.State"]: """ Pulls all Cached states for the given task that were created after the provided date. Args: - task_id (str): the task id for this task run - cache_key (Optional[str]): the cache key for this Task's cache; if `None`, the task id alone will be used - created_after (datetime.datetime): the earliest date the state should have been created at Returns: - List[State]: a list of Cached states created after the given date """ args = { "where": { "state": {"_eq": "Cached"}, "state_timestamp": {"_gte": created_after.isoformat()}, }, "order_by": {"state_timestamp": EnumValue("desc")}, "limit": 100, } # type: Dict[str, Any] # if a cache key was provided, match it against all tasks if cache_key is not None: args["where"].update({"cache_key": {"_eq": cache_key}}) # otherwise match against only this task, across all cache keys else: args["where"].update({"task_id": {"_eq": task_id}}) query = {"query": {with_args("task_run", args): "serialized_state"}} result = self.graphql(query) # type: Any deserializer = prefect.engine.state.State.deserialize valid_states = [ deserializer(res.serialized_state) for res in result.data.task_run ] return valid_states def get_task_run_info( self, flow_run_id: str, task_id: str, map_index: Optional[int] = None ) -> TaskRunInfoResult: """ Retrieves version and current state information for the given task run. Args: - flow_run_id (str): the id of the flow run that this task run lives in - task_id (str): the task id for this task run - map_index (int, optional): the mapping index for this task run; if `None`, it is assumed this task is _not_ mapped Returns: - NamedTuple: a tuple containing `id, task_id, version, state` Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation": { with_args( "get_or_create_task_run", { "input": { "flow_run_id": flow_run_id, "task_id": task_id, "map_index": -1 if map_index is None else map_index, } }, ): { "id": True, } } } result = self.graphql(mutation) # type: Any if result is None: raise ClientError("Failed to create task run.") task_run_id = result.data.get_or_create_task_run.id query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "version": True, "serialized_state": True, "task": {"slug": True}, } } } task_run = self.graphql(query).data.task_run_by_pk # type: ignore if task_run is None: raise ClientError('Task run ID not found: "{}"'.format(task_run_id)) state = prefect.engine.state.State.deserialize(task_run.serialized_state) return TaskRunInfoResult( id=task_run_id, task_id=task_id, task_slug=task_run.task.slug, version=task_run.version, state=state, ) def set_task_run_name(self, task_run_id: str, name: str) -> bool: """ Set the name of a task run Args: - task_run_id (str): the id of a task run - name (str): a name for this task run Returns: - bool: whether or not the task run name was updated """ mutation = { "mutation($input: set_task_run_name_input!)": { "set_task_run_name(input: $input)": { "success": True, } } } result = self.graphql( mutation, variables=dict(input=dict(task_run_id=task_run_id, name=name)) ) return result.data.set_task_run_name.success def get_task_run_state(self, task_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a task run. Args: - task_run_id (str): the id for this task run Returns: - State: a Prefect State object """ query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "serialized_state": True, } } } task_run = self.graphql(query).data.task_run_by_pk return prefect.engine.state.State.deserialize(task_run.serialized_state) def set_task_run_state( self, task_run_id: str, state: "prefect.engine.state.State", version: int = None, cache_for: datetime.timedelta = None, ) -> "prefect.engine.state.State": """ Sets new state for a task run. Args: - task_run_id (str): the id of the task run to set state for - state (State): the new state for this task run - version (int, optional): the current version of the task run state. This is optional but it can be supplied to enforce version-locking. - cache_for (timedelta, optional): how long to store the result of this task for, using the serializer set in config; if not provided, no caching occurs Raises: - ClientError: if the GraphQL mutation is bad for any reason Returns: - State: the state the current task run should be considered in """ mutation = { "mutation($input: set_task_run_states_input!)": { "set_task_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, task_run_id=task_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_task_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def set_secret(self, name: str, value: Any) -> None: """ Set a secret with the given name and value. Args: - name (str): the name of the secret; used for retrieving the secret during task runs - value (Any): the value of the secret Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the secret-setting was unsuccessful """ mutation = { "mutation($input: set_secret_input!)": { "set_secret(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(name=name, value=value)) ) # type: Any if not result.data.set_secret.success: raise ValueError("Setting secret failed.") def get_task_tag_limit(self, tag: str) -> Optional[int]: """ Retrieve the current task tag concurrency limit for a given tag. Args: - tag (str): the tag to update Raises: - ClientError: if the GraphQL query fails """ query = { "query": { with_args("task_tag_limit", {"where": {"tag": {"_eq": tag}}}): { "limit": True } } } result = self.graphql(query) # type: Any if result.data.task_tag_limit: return result.data.task_tag_limit[0].limit else: return None def update_task_tag_limit(self, tag: str, limit: int) -> None: """ Update the task tag concurrency limit for a given tag; requires tenant admin permissions. Args: - tag (str): the tag to update - limit (int): the concurrency limit to enforce on the tag; should be a value >= 0 Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag limit-setting was unsuccessful, or if a bad limit was provided """ if limit < 0: raise ValueError("Concurrency limits must be >= 0") mutation = { "mutation($input: update_task_tag_limit_input!)": { "update_task_tag_limit(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict(input=dict(tag=tag, limit=limit)) ) # type: Any if not result.data.update_task_tag_limit.id: raise ValueError("Updating the task tag concurrency limit failed.") def delete_task_tag_limit(self, limit_id: str) -> None: """ Deletes a given task tag concurrency limit; requires tenant admin permissions. Args: - limit_id (str): the ID of the tag to delete Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag deletion was unsuccessful, or if a bad tag ID was provided """ mutation = { "mutation($input: delete_task_tag_limit_input!)": { "delete_task_tag_limit(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(limit_id=limit_id)) ) # type: Any if not result.data.delete_task_tag_limit.success: raise ValueError("Deleting the task tag concurrency limit failed.") def write_run_logs(self, logs: List[Dict]) -> None: """ Uploads a collection of logs to Cloud. Args: - logs (List[Dict]): a list of log entries to add Raises: - ValueError: if uploading the logs fail """ mutation = { "mutation($input: write_run_logs_input!)": { "write_run_logs(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(logs=logs)) ) # type: Any if not result.data.write_run_logs.success: raise ValueError("Writing logs failed.") def register_agent( self, agent_type: str, name: str = None, labels: List[str] = None, agent_config_id: str = None, ) -> str: """ Register an agent with a backend API Args: - agent_type (str): The type of agent being registered - name: (str, optional): The name of the agent being registered - labels (List[str], optional): A list of any present labels on the agent being registered - agent_config_id (str, optional): The ID of an agent configuration to register with Returns: - The agent ID as a string """ mutation = { "mutation($input: register_agent_input!)": { "register_agent(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict( input=dict( type=agent_type, name=name, labels=labels or [], tenant_id=self._active_tenant_id, agent_config_id=agent_config_id, ) ), ) if not result.data.register_agent.id: raise ValueError("Error registering agent") return result.data.register_agent.id def get_agent_config(self, agent_config_id: str) -> dict: """ Get agent config settings Args: - agent_config_id (str): The ID of an agent configuration to retrieve Returns: - dict: the agent configuration's `settings` """ query = { "query": { with_args( "agent_config", {"where": {"id": {"_eq": agent_config_id}}} ): {"settings": True} } } result = self.graphql(query) # type: Any return result.data.agent_config[0].settings
get_task_run_state
Retrieves the current state for a task run. Args: - task_run_id (str): the id for this task run Returns: - State: a Prefect State object
import datetime import json import os import re import time import uuid import warnings from pathlib import Path from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Union from urllib.parse import urljoin # if simplejson is installed, `requests` defaults to using it instead of json # this allows the client to gracefully handle either json or simplejson try: from simplejson.errors import JSONDecodeError except ImportError: from json.decoder import JSONDecodeError import pendulum import toml from slugify import slugify import prefect from prefect.utilities.exceptions import ( AuthorizationError, ClientError, VersionLockError, ) from prefect.utilities.graphql import ( EnumValue, GraphQLResult, compress, parse_graphql, with_args, ) from prefect.utilities.logging import create_diagnostic_logger if TYPE_CHECKING: from prefect.core import Flow import requests JSONLike = Union[bool, dict, list, str, int, float, None] # type definitions for GraphQL results TaskRunInfoResult = NamedTuple( "TaskRunInfoResult", [ ("id", str), ("task_id", str), ("task_slug", str), ("version", int), ("state", "prefect.engine.state.State"), ], ) FlowRunInfoResult = NamedTuple( "FlowRunInfoResult", [ ("id", str), ("name", str), ("flow_id", str), ("parameters", Dict[str, Any]), ("context", Dict[str, Any]), ("version", int), ("scheduled_start_time", datetime.datetime), ("state", "prefect.engine.state.State"), ("task_runs", List[TaskRunInfoResult]), ], ) class Client: """ Client for communication with Prefect Cloud If the arguments aren't specified the client initialization first checks the prefect configuration and if the server is not set there it checks the current context. The token will only be present in the current context. Args: - api_server (str, optional): the URL to send all GraphQL requests to; if not provided, will be pulled from `cloud.graphql` config var - api_token (str, optional): a Prefect Cloud API token, taken from `config.cloud.auth_token` if not provided. If this token is USER-scoped, it may be used to log in to any tenant that the user is a member of. In that case, ephemeral JWTs will be loaded as necessary. Otherwise, the API token itself will be used as authorization. """ def __init__(self, api_server: str = None, api_token: str = None): self._access_token = None self._refresh_token = None self._access_token_expires_at = pendulum.now() self._active_tenant_id = None self._attached_headers = {} # type: Dict[str, str] self.logger = create_diagnostic_logger("Diagnostics") # store api server self.api_server = api_server or prefect.context.config.cloud.get("graphql") # store api token self._api_token = api_token or prefect.context.config.cloud.get( "auth_token", None ) if prefect.config.backend == "cloud": if not self._api_token: # if no api token was passed, attempt to load state from local storage settings = self._load_local_settings() self._api_token = settings.get("api_token") if self._api_token: self._active_tenant_id = settings.get("active_tenant_id") if self._active_tenant_id: try: self.login_to_tenant(tenant_id=self._active_tenant_id) except AuthorizationError: # if an authorization error is raised, then the token is invalid and should # be cleared self.logout_from_tenant() else: # TODO: Separate put this functionality and clean up initial tenant access handling if not self._active_tenant_id: tenant_info = self.graphql({"query": {"tenant": {"id"}}}) if tenant_info.data.tenant: self._active_tenant_id = tenant_info.data.tenant[0].id def create_tenant(self, name: str, slug: str = None) -> str: """ Creates a new tenant. Note this route only works when run against Prefect Server. Args: - name (str): the name of the tenant to create - slug (str, optional): the slug of the tenant to create; defaults to name Returns: - str: the ID of the newly created tenant, or the ID of the currently active tenant Raises: - ValueError: if run against Prefect Cloud """ if prefect.config.backend != "server": msg = "To create a tenant with Prefect Cloud, please signup at https://cloud.prefect.io/" raise ValueError(msg) if slug is None: slug = slugify(name) tenant_info = self.graphql( { "mutation($input: create_tenant_input!)": { "create_tenant(input: $input)": {"id"} } }, variables=dict(input=dict(name=name, slug=slug)), ) return tenant_info.data.create_tenant.id # ------------------------------------------------------------------------- # Utilities def get( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and GET request Args: - path (str): the path of the API url. For example, to GET http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the GET request to; defaults to `self.api_server` - headers (dict, optional): Headers to pass with the request - params (dict): GET parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="GET", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def post( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and POST request Args: - path (str): the path of the API url. For example, to POST http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the POST request to; defaults to `self.api_server` - headers(dict): headers to pass with the request - params (dict): POST parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="POST", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def graphql( self, query: Any, raise_on_error: bool = True, headers: Dict[str, str] = None, variables: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> GraphQLResult: """ Convenience function for running queries against the Prefect GraphQL API Args: - query (Any): A representation of a graphql query to be executed. It will be parsed by prefect.utilities.graphql.parse_graphql(). - raise_on_error (bool): if True, a `ClientError` will be raised if the GraphQL returns any `errors`. - headers (dict): any additional headers that should be passed as part of the request - variables (dict): Variables to be filled into a query with the key being equivalent to the variables that are accepted by the query - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Data returned from the GraphQL query Raises: - ClientError if there are errors raised by the GraphQL mutation """ result = self.post( path="", server=self.api_server, headers=headers, params=dict(query=parse_graphql(query), variables=json.dumps(variables)), token=token, retry_on_api_error=retry_on_api_error, ) if raise_on_error and "errors" in result: if "UNAUTHENTICATED" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif "Malformed Authorization header" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif ( result["errors"][0].get("extensions", {}).get("code") == "VERSION_LOCKING_ERROR" ): raise VersionLockError(result["errors"]) raise ClientError(result["errors"]) else: return GraphQLResult(result) # type: ignore def _send_request( self, session: "requests.Session", method: str, url: str, params: Dict[str, JSONLike] = None, headers: dict = None, ) -> "requests.models.Response": if prefect.context.config.cloud.get("diagnostics") is True: self.logger.debug(f"Preparing request to {url}") clean_headers = { head: re.sub("Bearer .*", "Bearer XXXX", val) for head, val in headers.items() # type: ignore } self.logger.debug(f"Headers: {clean_headers}") self.logger.debug(f"Request: {params}") start_time = time.time() if method == "GET": response = session.get(url, headers=headers, params=params, timeout=30) elif method == "POST": response = session.post(url, headers=headers, json=params, timeout=30) elif method == "DELETE": response = session.delete(url, headers=headers, timeout=30) else: raise ValueError("Invalid method: {}".format(method)) if prefect.context.config.cloud.get("diagnostics") is True: end_time = time.time() self.logger.debug(f"Response: {response.json()}") self.logger.debug( f"Request duration: {round(end_time - start_time, 4)} seconds" ) # Check if request returned a successful status response.raise_for_status() return response def _request( self, method: str, path: str, params: Dict[str, JSONLike] = None, server: str = None, headers: dict = None, token: str = None, retry_on_api_error: bool = True, ) -> "requests.models.Response": """ Runs any specified request (GET, POST, DELETE) against the server Args: - method (str): The type of request to be made (GET, POST, DELETE) - path (str): Path of the API URL - params (dict, optional): Parameters used for the request - server (str, optional): The server to make requests against, base API server is used if not specified - headers (dict, optional): Headers to pass with the request - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - requests.models.Response: The response returned from the request Raises: - ClientError: if the client token is not in the context (due to not being logged in) - ValueError: if a method is specified outside of the accepted GET, POST, DELETE - requests.HTTPError: if a status code is returned that is not `200` or `401` """ if server is None: server = self.api_server assert isinstance(server, str) # mypy assert if token is None: token = self.get_auth_token() # 'import requests' is expensive time-wise, we should do this just-in-time to keep # the 'import prefect' time low import requests url = urljoin(server, path.lstrip("/")).rstrip("/") params = params or {} headers = headers or {} if token: headers["Authorization"] = "Bearer {}".format(token) headers["X-PREFECT-CORE-VERSION"] = str(prefect.__version__) if self._attached_headers: headers.update(self._attached_headers) session = requests.Session() retry_total = 6 if prefect.config.backend == "cloud" else 1 retries = requests.packages.urllib3.util.retry.Retry( total=retry_total, backoff_factor=1, status_forcelist=[500, 502, 503, 504], method_whitelist=["DELETE", "GET", "POST"], ) session.mount("https://", requests.adapters.HTTPAdapter(max_retries=retries)) response = self._send_request( session=session, method=method, url=url, params=params, headers=headers ) # parse the response try: json_resp = response.json() except JSONDecodeError as exc: if prefect.config.backend == "cloud" and "Authorization" not in headers: raise ClientError( "Malformed response received from Cloud - please ensure that you " "have an API token properly configured." ) from exc else: raise ClientError("Malformed response received from API.") from exc # check if there was an API_ERROR code in the response if "API_ERROR" in str(json_resp.get("errors")) and retry_on_api_error: success, retry_count = False, 0 # retry up to six times while success is False and retry_count < 6: response = self._send_request( session=session, method=method, url=url, params=params, headers=headers, ) if "API_ERROR" in str(response.json().get("errors")): retry_count += 1 time.sleep(0.25 * (2 ** (retry_count - 1))) else: success = True return response def attach_headers(self, headers: dict) -> None: """ Set headers to be attached to this Client Args: - headers (dict): A dictionary of headers to attach to this client. These headers get added on to the existing dictionary of headers. """ self._attached_headers.update(headers) # ------------------------------------------------------------------------- # Auth # ------------------------------------------------------------------------- @property def _local_settings_path(self) -> Path: """ Returns the local settings directory corresponding to the current API servers """ path = "{home}/client/{server}".format( home=prefect.context.config.home_dir, server=slugify(self.api_server, regex_pattern=r"[^-\.a-z0-9]+"), ) return Path(os.path.expanduser(path)) / "settings.toml" def _save_local_settings(self, settings: dict) -> None: """ Writes settings to local storage """ self._local_settings_path.parent.mkdir(exist_ok=True, parents=True) with self._local_settings_path.open("w+") as f: toml.dump(settings, f) def _load_local_settings(self) -> dict: """ Loads settings from local storage """ if self._local_settings_path.exists(): with self._local_settings_path.open("r") as f: return toml.load(f) # type: ignore return {} def save_api_token(self) -> None: """ Saves the API token in local storage. """ settings = self._load_local_settings() settings["api_token"] = self._api_token self._save_local_settings(settings) def get_auth_token(self) -> str: """ Returns an auth token: - if no explicit access token is stored, returns the api token - if there is an access token: - if there's a refresh token and the access token expires in the next 30 seconds, then we refresh the access token and store the result - return the access token Returns: - str: the access token """ if not self._access_token: return self._api_token expiration = self._access_token_expires_at or pendulum.now() if self._refresh_token and pendulum.now().add(seconds=30) > expiration: self._refresh_access_token() return self._access_token def get_available_tenants(self) -> List[Dict]: """ Returns a list of available tenants. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - List[Dict]: a list of dictionaries containing the id, slug, and name of available tenants """ result = self.graphql( {"query": {"tenant(order_by: {slug: asc})": {"id", "slug", "name"}}}, # use the API token to see all available tenants token=self._api_token, ) # type: ignore return result.data.tenant # type: ignore def login_to_tenant(self, tenant_slug: str = None, tenant_id: str = None) -> bool: """ Log in to a specific tenant NOTE: this should only be called by users who have provided a USER-scoped API token. Args: - tenant_slug (str): the tenant's slug - tenant_id (str): the tenant's id Returns: - bool: True if the login was successful Raises: - ValueError: if at least one of `tenant_slug` or `tenant_id` isn't provided - ValueError: if the `tenant_id` is not a valid UUID - ValueError: if no matching tenants are found """ if tenant_slug is None and tenant_id is None: raise ValueError( "At least one of `tenant_slug` or `tenant_id` must be provided." ) elif tenant_id: try: uuid.UUID(tenant_id) except ValueError as exc: raise ValueError("The `tenant_id` must be a valid UUID.") from exc tenant = self.graphql( { "query($slug: String, $id: uuid)": { "tenant(where: {slug: { _eq: $slug }, id: { _eq: $id } })": {"id"} } }, variables=dict(slug=tenant_slug, id=tenant_id), # use the API token to query the tenant token=self._api_token, ) # type: ignore if not tenant.data.tenant: # type: ignore raise ValueError("No matching tenants found.") tenant_id = tenant.data.tenant[0].id # type: ignore if prefect.config.backend == "cloud": payload = self.graphql( { "mutation($input: switch_tenant_input!)": { "switch_tenant(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(tenant_id=tenant_id)), # Use the API token to switch tenants token=self._api_token, ) # type: ignore self._access_token = payload.data.switch_tenant.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.switch_tenant.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.switch_tenant.refresh_token # type: ignore self._active_tenant_id = tenant_id # save the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = self._active_tenant_id self._save_local_settings(settings) return True def logout_from_tenant(self) -> None: self._access_token = None self._refresh_token = None self._active_tenant_id = None # remove the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = None self._save_local_settings(settings) def _refresh_access_token(self) -> bool: """ Refresh the client's JWT access token. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - bool: True if the refresh succeeds """ payload = self.graphql( { "mutation($input: refresh_token_input!)": { "refresh_token(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(access_token=self._access_token)), # pass the refresh token as the auth header token=self._refresh_token, ) # type: ignore self._access_token = payload.data.refresh_token.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.refresh_token.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.refresh_token.refresh_token # type: ignore return True # ------------------------------------------------------------------------- # Actions # ------------------------------------------------------------------------- def register( self, flow: "Flow", project_name: str = None, build: bool = True, set_schedule_active: bool = True, version_group_id: str = None, compressed: bool = True, no_url: bool = False, ) -> str: """ Push a new flow to Prefect Cloud Args: - flow (Flow): a flow to register - project_name (str, optional): the project that should contain this flow. - build (bool, optional): if `True`, the flow's environment is built prior to serialization; defaults to `True` - set_schedule_active (bool, optional): if `False`, will set the schedule to inactive in the database to prevent auto-scheduling runs (if the Flow has a schedule). Defaults to `True`. This can be changed later. - version_group_id (str, optional): the UUID version group ID to use for versioning this Flow in Cloud; if not provided, the version group ID associated with this Flow's project and name will be used. - compressed (bool, optional): if `True`, the serialized flow will be; defaults to `True` compressed - no_url (bool, optional): if `True`, the stdout from this function will not contain the URL link to the newly-registered flow in the Cloud UI Returns: - str: the ID of the newly-registered flow Raises: - ClientError: if the register failed """ required_parameters = {p for p in flow.parameters() if p.required} if flow.schedule is not None and required_parameters: required_names = {p.name for p in required_parameters} if not all( [ required_names <= set(c.parameter_defaults.keys()) for c in flow.schedule.clocks ] ): raise ClientError( "Flows with required parameters can not be scheduled automatically." ) if any(e.key for e in flow.edges) and flow.result is None: warnings.warn( "No result handler was specified on your Flow. Cloud features such as " "input caching and resuming task runs from failure may not work properly.", stacklevel=2, ) if compressed: create_mutation = { "mutation($input: create_flow_from_compressed_string_input!)": { "create_flow_from_compressed_string(input: $input)": {"id"} } } else: create_mutation = { "mutation($input: create_flow_input!)": { "create_flow(input: $input)": {"id"} } } project = None if project_name is None: raise TypeError( "'project_name' is a required field when registering a flow." ) query_project = { "query": { with_args("project", {"where": {"name": {"_eq": project_name}}}): { "id": True } } } project = self.graphql(query_project).data.project # type: ignore if not project: raise ValueError( "Project {} not found. Run `prefect create project '{}'` to create it.".format( project_name, project_name ) ) serialized_flow = flow.serialize(build=build) # type: Any # Set Docker storage image in environment metadata if provided if isinstance(flow.storage, prefect.environments.storage.Docker): flow.environment.metadata["image"] = flow.storage.name serialized_flow = flow.serialize(build=False) # If no image ever set, default metadata to all_extras image on current version if not flow.environment.metadata.get("image"): version = prefect.__version__.split("+")[0] flow.environment.metadata[ "image" ] = f"prefecthq/prefect:all_extras-{version}" serialized_flow = flow.serialize(build=False) # verify that the serialized flow can be deserialized try: prefect.serialization.flow.FlowSchema().load(serialized_flow) except Exception as exc: raise ValueError( "Flow could not be deserialized successfully. Error was: {}".format( repr(exc) ) ) from exc if compressed: serialized_flow = compress(serialized_flow) res = self.graphql( create_mutation, variables=dict( input=dict( project_id=(project[0].id if project else None), serialized_flow=serialized_flow, set_schedule_active=set_schedule_active, version_group_id=version_group_id, ) ), retry_on_api_error=False, ) # type: Any flow_id = ( res.data.create_flow_from_compressed_string.id if compressed else res.data.create_flow.id ) if not no_url: # Generate direct link to Cloud flow flow_url = self.get_cloud_url("flow", flow_id) prefix = "└── " print("Flow URL: {}".format(flow_url)) # Extra information to improve visibility msg = ( f" {prefix}ID: {flow_id}\n" f" {prefix}Project: {project_name}\n" f" {prefix}Labels: {list(flow.environment.labels)}" ) print(msg) return flow_id def get_cloud_url(self, subdirectory: str, id: str, as_user: bool = True) -> str: """ Convenience method for creating Prefect Cloud URLs for a given subdirectory. Args: - subdirectory (str): the subdirectory to use (e.g., `"flow-run"`) - id (str): the ID of the page - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the URL corresponding to the appropriate base URL, tenant slug, subdirectory and ID Example: ```python from prefect import Client client = Client() client.get_cloud_url("flow-run", "424242-ca-94611-111-55") # returns "https://cloud.prefect.io/my-tenant-slug/flow-run/424242-ca-94611-111-55" ``` """ # Generate direct link to UI if prefect.config.backend == "cloud": tenant_slug = self.get_default_tenant_slug(as_user=as_user) else: tenant_slug = "" base_url = ( re.sub("api-", "", prefect.config.cloud.api) if re.search("api-", prefect.config.cloud.api) else re.sub("api", "cloud", prefect.config.cloud.api) ) full_url = prefect.config.cloud.api if tenant_slug: full_url = "/".join([base_url.rstrip("/"), tenant_slug, subdirectory, id]) elif prefect.config.backend == "server": full_url = "/".join([prefect.config.server.ui.endpoint, subdirectory, id]) return full_url def get_default_tenant_slug(self, as_user: bool = True) -> str: """ Get the default tenant slug for the currently authenticated user Args: - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the slug of the current default tenant for this user """ if as_user: query = { "query": {"user": {"default_membership": {"tenant": "slug"}}} } # type: dict else: query = {"query": {"tenant": {"slug"}}} res = self.graphql(query) if as_user: user = res.get("data").user[0] slug = user.default_membership.tenant.slug else: slug = res.get("data").tenant[0].slug return slug def create_project(self, project_name: str, project_description: str = None) -> str: """ Create a new Project Args: - project_name (str): the project that should contain this flow - project_description (str, optional): the project description Returns: - str: the ID of the newly-created project Raises: - ClientError: if the project creation failed """ project_mutation = { "mutation($input: create_project_input!)": { "create_project(input: $input)": {"id"} } } res = self.graphql( project_mutation, variables=dict( input=dict( name=project_name, description=project_description, tenant_id=self._active_tenant_id, ) ), ) # type: Any return res.data.create_project.id def create_flow_run( self, flow_id: str = None, context: dict = None, parameters: dict = None, scheduled_start_time: datetime.datetime = None, idempotency_key: str = None, run_name: str = None, version_group_id: str = None, ) -> str: """ Create a new flow run for the given flow id. If `start_time` is not provided, the flow run will be scheduled to start immediately. If both `flow_id` and `version_group_id` are provided, only the `flow_id` will be used. Args: - flow_id (str, optional): the id of the Flow you wish to schedule - context (dict, optional): the run context - parameters (dict, optional): a dictionary of parameter values to pass to the flow run - scheduled_start_time (datetime, optional): the time to schedule the execution for; if not provided, defaults to now - idempotency_key (str, optional): an idempotency key; if provided, this run will be cached for 24 hours. Any subsequent attempts to create a run with the same idempotency key will return the ID of the originally created run (no new run will be created after the first). An error will be raised if parameters or context are provided and don't match the original. Each subsequent request will reset the TTL for 24 hours. - run_name (str, optional): The name assigned to this flow run - version_group_id (str, optional): if provided, the unique unarchived flow within this version group will be scheduled to run. This input can be used as a stable API for running flows which are regularly updated. Returns: - str: the ID of the newly-created flow run Raises: - ClientError: if the GraphQL query is bad for any reason """ create_mutation = { "mutation($input: create_flow_run_input!)": { "create_flow_run(input: $input)": {"id": True} } } if not flow_id and not version_group_id: raise ValueError("One of flow_id or version_group_id must be provided") if flow_id: inputs = dict(flow_id=flow_id) else: inputs = dict(version_group_id=version_group_id) # type: ignore if parameters is not None: inputs.update(parameters=parameters) # type: ignore if context is not None: inputs.update(context=context) # type: ignore if idempotency_key is not None: inputs.update(idempotency_key=idempotency_key) # type: ignore if scheduled_start_time is not None: inputs.update( scheduled_start_time=scheduled_start_time.isoformat() ) # type: ignore if run_name is not None: inputs.update(flow_run_name=run_name) # type: ignore res = self.graphql(create_mutation, variables=dict(input=inputs)) return res.data.create_flow_run.id # type: ignore def get_flow_run_info(self, flow_run_id: str) -> FlowRunInfoResult: """ Retrieves version and current state information for the given flow run. Args: - flow_run_id (str): the id of the flow run to get information for Returns: - GraphQLResult: an object representing information about the flow run Raises: - ClientError: if the GraphQL mutation is bad for any reason """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "id": True, "name": True, "flow_id": True, "parameters": True, "context": True, "version": True, "scheduled_start_time": True, "serialized_state": True, # load all task runs except dynamic task runs with_args("task_runs", {"where": {"map_index": {"_eq": -1}}}): { "id": True, "task": {"id": True, "slug": True}, "version": True, "serialized_state": True, }, } } } result = self.graphql(query).data.flow_run_by_pk # type: ignore if result is None: raise ClientError('Flow run ID not found: "{}"'.format(flow_run_id)) # convert scheduled_start_time from string to datetime result.scheduled_start_time = pendulum.parse(result.scheduled_start_time) # create "state" attribute from serialized_state result.state = prefect.engine.state.State.deserialize( result.pop("serialized_state") ) # reformat task_runs task_runs = [] for tr in result.task_runs: tr.state = prefect.engine.state.State.deserialize( tr.pop("serialized_state") ) task_info = tr.pop("task") tr.task_id = task_info["id"] tr.task_slug = task_info["slug"] task_runs.append(TaskRunInfoResult(**tr)) result.task_runs = task_runs result.context = ( result.context.to_dict() if result.context is not None else None ) result.parameters = ( result.parameters.to_dict() if result.parameters is not None else None ) return FlowRunInfoResult(**result) def update_flow_run_heartbeat(self, flow_run_id: str) -> None: """ Convenience method for heartbeating a flow run. Does NOT raise an error if the update fails. Args: - flow_run_id (str): the flow run ID to heartbeat """ mutation = { "mutation": { with_args( "update_flow_run_heartbeat", {"input": {"flow_run_id": flow_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def update_task_run_heartbeat(self, task_run_id: str) -> None: """ Convenience method for heartbeating a task run. Does NOT raise an error if the update fails. Args: - task_run_id (str): the task run ID to heartbeat """ mutation = { "mutation": { with_args( "update_task_run_heartbeat", {"input": {"task_run_id": task_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def get_flow_run_state(self, flow_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a flow run. Args: - flow_run_id (str): the id for this flow run Returns: - State: a Prefect State object """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "serialized_state": True, } } } flow_run = self.graphql(query).data.flow_run_by_pk return prefect.engine.state.State.deserialize(flow_run.serialized_state) def set_flow_run_state( self, flow_run_id: str, state: "prefect.engine.state.State", version: int = None, ) -> "prefect.engine.state.State": """ Sets new state for a flow run in the database. Args: - flow_run_id (str): the id of the flow run to set state for - state (State): the new state for this flow run - version (int, optional): the current version of the flow run state. This is optional but it can be supplied to enforce version-locking. Returns: - State: the state the current flow run should be considered in Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation($input: set_flow_run_states_input!)": { "set_flow_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, flow_run_id=flow_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_flow_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def get_latest_cached_states( self, task_id: str, cache_key: Optional[str], created_after: datetime.datetime ) -> List["prefect.engine.state.State"]: """ Pulls all Cached states for the given task that were created after the provided date. Args: - task_id (str): the task id for this task run - cache_key (Optional[str]): the cache key for this Task's cache; if `None`, the task id alone will be used - created_after (datetime.datetime): the earliest date the state should have been created at Returns: - List[State]: a list of Cached states created after the given date """ args = { "where": { "state": {"_eq": "Cached"}, "state_timestamp": {"_gte": created_after.isoformat()}, }, "order_by": {"state_timestamp": EnumValue("desc")}, "limit": 100, } # type: Dict[str, Any] # if a cache key was provided, match it against all tasks if cache_key is not None: args["where"].update({"cache_key": {"_eq": cache_key}}) # otherwise match against only this task, across all cache keys else: args["where"].update({"task_id": {"_eq": task_id}}) query = {"query": {with_args("task_run", args): "serialized_state"}} result = self.graphql(query) # type: Any deserializer = prefect.engine.state.State.deserialize valid_states = [ deserializer(res.serialized_state) for res in result.data.task_run ] return valid_states def get_task_run_info( self, flow_run_id: str, task_id: str, map_index: Optional[int] = None ) -> TaskRunInfoResult: """ Retrieves version and current state information for the given task run. Args: - flow_run_id (str): the id of the flow run that this task run lives in - task_id (str): the task id for this task run - map_index (int, optional): the mapping index for this task run; if `None`, it is assumed this task is _not_ mapped Returns: - NamedTuple: a tuple containing `id, task_id, version, state` Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation": { with_args( "get_or_create_task_run", { "input": { "flow_run_id": flow_run_id, "task_id": task_id, "map_index": -1 if map_index is None else map_index, } }, ): { "id": True, } } } result = self.graphql(mutation) # type: Any if result is None: raise ClientError("Failed to create task run.") task_run_id = result.data.get_or_create_task_run.id query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "version": True, "serialized_state": True, "task": {"slug": True}, } } } task_run = self.graphql(query).data.task_run_by_pk # type: ignore if task_run is None: raise ClientError('Task run ID not found: "{}"'.format(task_run_id)) state = prefect.engine.state.State.deserialize(task_run.serialized_state) return TaskRunInfoResult( id=task_run_id, task_id=task_id, task_slug=task_run.task.slug, version=task_run.version, state=state, ) def set_task_run_name(self, task_run_id: str, name: str) -> bool: """ Set the name of a task run Args: - task_run_id (str): the id of a task run - name (str): a name for this task run Returns: - bool: whether or not the task run name was updated """ mutation = { "mutation($input: set_task_run_name_input!)": { "set_task_run_name(input: $input)": { "success": True, } } } result = self.graphql( mutation, variables=dict(input=dict(task_run_id=task_run_id, name=name)) ) return result.data.set_task_run_name.success # MASKED: get_task_run_state function (lines 1282-1302) def set_task_run_state( self, task_run_id: str, state: "prefect.engine.state.State", version: int = None, cache_for: datetime.timedelta = None, ) -> "prefect.engine.state.State": """ Sets new state for a task run. Args: - task_run_id (str): the id of the task run to set state for - state (State): the new state for this task run - version (int, optional): the current version of the task run state. This is optional but it can be supplied to enforce version-locking. - cache_for (timedelta, optional): how long to store the result of this task for, using the serializer set in config; if not provided, no caching occurs Raises: - ClientError: if the GraphQL mutation is bad for any reason Returns: - State: the state the current task run should be considered in """ mutation = { "mutation($input: set_task_run_states_input!)": { "set_task_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, task_run_id=task_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_task_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def set_secret(self, name: str, value: Any) -> None: """ Set a secret with the given name and value. Args: - name (str): the name of the secret; used for retrieving the secret during task runs - value (Any): the value of the secret Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the secret-setting was unsuccessful """ mutation = { "mutation($input: set_secret_input!)": { "set_secret(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(name=name, value=value)) ) # type: Any if not result.data.set_secret.success: raise ValueError("Setting secret failed.") def get_task_tag_limit(self, tag: str) -> Optional[int]: """ Retrieve the current task tag concurrency limit for a given tag. Args: - tag (str): the tag to update Raises: - ClientError: if the GraphQL query fails """ query = { "query": { with_args("task_tag_limit", {"where": {"tag": {"_eq": tag}}}): { "limit": True } } } result = self.graphql(query) # type: Any if result.data.task_tag_limit: return result.data.task_tag_limit[0].limit else: return None def update_task_tag_limit(self, tag: str, limit: int) -> None: """ Update the task tag concurrency limit for a given tag; requires tenant admin permissions. Args: - tag (str): the tag to update - limit (int): the concurrency limit to enforce on the tag; should be a value >= 0 Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag limit-setting was unsuccessful, or if a bad limit was provided """ if limit < 0: raise ValueError("Concurrency limits must be >= 0") mutation = { "mutation($input: update_task_tag_limit_input!)": { "update_task_tag_limit(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict(input=dict(tag=tag, limit=limit)) ) # type: Any if not result.data.update_task_tag_limit.id: raise ValueError("Updating the task tag concurrency limit failed.") def delete_task_tag_limit(self, limit_id: str) -> None: """ Deletes a given task tag concurrency limit; requires tenant admin permissions. Args: - limit_id (str): the ID of the tag to delete Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag deletion was unsuccessful, or if a bad tag ID was provided """ mutation = { "mutation($input: delete_task_tag_limit_input!)": { "delete_task_tag_limit(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(limit_id=limit_id)) ) # type: Any if not result.data.delete_task_tag_limit.success: raise ValueError("Deleting the task tag concurrency limit failed.") def write_run_logs(self, logs: List[Dict]) -> None: """ Uploads a collection of logs to Cloud. Args: - logs (List[Dict]): a list of log entries to add Raises: - ValueError: if uploading the logs fail """ mutation = { "mutation($input: write_run_logs_input!)": { "write_run_logs(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(logs=logs)) ) # type: Any if not result.data.write_run_logs.success: raise ValueError("Writing logs failed.") def register_agent( self, agent_type: str, name: str = None, labels: List[str] = None, agent_config_id: str = None, ) -> str: """ Register an agent with a backend API Args: - agent_type (str): The type of agent being registered - name: (str, optional): The name of the agent being registered - labels (List[str], optional): A list of any present labels on the agent being registered - agent_config_id (str, optional): The ID of an agent configuration to register with Returns: - The agent ID as a string """ mutation = { "mutation($input: register_agent_input!)": { "register_agent(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict( input=dict( type=agent_type, name=name, labels=labels or [], tenant_id=self._active_tenant_id, agent_config_id=agent_config_id, ) ), ) if not result.data.register_agent.id: raise ValueError("Error registering agent") return result.data.register_agent.id def get_agent_config(self, agent_config_id: str) -> dict: """ Get agent config settings Args: - agent_config_id (str): The ID of an agent configuration to retrieve Returns: - dict: the agent configuration's `settings` """ query = { "query": { with_args( "agent_config", {"where": {"id": {"_eq": agent_config_id}}} ): {"settings": True} } } result = self.graphql(query) # type: Any return result.data.agent_config[0].settings
def get_task_run_state(self, task_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a task run. Args: - task_run_id (str): the id for this task run Returns: - State: a Prefect State object """ query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "serialized_state": True, } } } task_run = self.graphql(query).data.task_run_by_pk return prefect.engine.state.State.deserialize(task_run.serialized_state)
1,282
1,302
import datetime import json import os import re import time import uuid import warnings from pathlib import Path from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Union from urllib.parse import urljoin # if simplejson is installed, `requests` defaults to using it instead of json # this allows the client to gracefully handle either json or simplejson try: from simplejson.errors import JSONDecodeError except ImportError: from json.decoder import JSONDecodeError import pendulum import toml from slugify import slugify import prefect from prefect.utilities.exceptions import ( AuthorizationError, ClientError, VersionLockError, ) from prefect.utilities.graphql import ( EnumValue, GraphQLResult, compress, parse_graphql, with_args, ) from prefect.utilities.logging import create_diagnostic_logger if TYPE_CHECKING: from prefect.core import Flow import requests JSONLike = Union[bool, dict, list, str, int, float, None] # type definitions for GraphQL results TaskRunInfoResult = NamedTuple( "TaskRunInfoResult", [ ("id", str), ("task_id", str), ("task_slug", str), ("version", int), ("state", "prefect.engine.state.State"), ], ) FlowRunInfoResult = NamedTuple( "FlowRunInfoResult", [ ("id", str), ("name", str), ("flow_id", str), ("parameters", Dict[str, Any]), ("context", Dict[str, Any]), ("version", int), ("scheduled_start_time", datetime.datetime), ("state", "prefect.engine.state.State"), ("task_runs", List[TaskRunInfoResult]), ], ) class Client: """ Client for communication with Prefect Cloud If the arguments aren't specified the client initialization first checks the prefect configuration and if the server is not set there it checks the current context. The token will only be present in the current context. Args: - api_server (str, optional): the URL to send all GraphQL requests to; if not provided, will be pulled from `cloud.graphql` config var - api_token (str, optional): a Prefect Cloud API token, taken from `config.cloud.auth_token` if not provided. If this token is USER-scoped, it may be used to log in to any tenant that the user is a member of. In that case, ephemeral JWTs will be loaded as necessary. Otherwise, the API token itself will be used as authorization. """ def __init__(self, api_server: str = None, api_token: str = None): self._access_token = None self._refresh_token = None self._access_token_expires_at = pendulum.now() self._active_tenant_id = None self._attached_headers = {} # type: Dict[str, str] self.logger = create_diagnostic_logger("Diagnostics") # store api server self.api_server = api_server or prefect.context.config.cloud.get("graphql") # store api token self._api_token = api_token or prefect.context.config.cloud.get( "auth_token", None ) if prefect.config.backend == "cloud": if not self._api_token: # if no api token was passed, attempt to load state from local storage settings = self._load_local_settings() self._api_token = settings.get("api_token") if self._api_token: self._active_tenant_id = settings.get("active_tenant_id") if self._active_tenant_id: try: self.login_to_tenant(tenant_id=self._active_tenant_id) except AuthorizationError: # if an authorization error is raised, then the token is invalid and should # be cleared self.logout_from_tenant() else: # TODO: Separate put this functionality and clean up initial tenant access handling if not self._active_tenant_id: tenant_info = self.graphql({"query": {"tenant": {"id"}}}) if tenant_info.data.tenant: self._active_tenant_id = tenant_info.data.tenant[0].id def create_tenant(self, name: str, slug: str = None) -> str: """ Creates a new tenant. Note this route only works when run against Prefect Server. Args: - name (str): the name of the tenant to create - slug (str, optional): the slug of the tenant to create; defaults to name Returns: - str: the ID of the newly created tenant, or the ID of the currently active tenant Raises: - ValueError: if run against Prefect Cloud """ if prefect.config.backend != "server": msg = "To create a tenant with Prefect Cloud, please signup at https://cloud.prefect.io/" raise ValueError(msg) if slug is None: slug = slugify(name) tenant_info = self.graphql( { "mutation($input: create_tenant_input!)": { "create_tenant(input: $input)": {"id"} } }, variables=dict(input=dict(name=name, slug=slug)), ) return tenant_info.data.create_tenant.id # ------------------------------------------------------------------------- # Utilities def get( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and GET request Args: - path (str): the path of the API url. For example, to GET http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the GET request to; defaults to `self.api_server` - headers (dict, optional): Headers to pass with the request - params (dict): GET parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="GET", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def post( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and POST request Args: - path (str): the path of the API url. For example, to POST http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the POST request to; defaults to `self.api_server` - headers(dict): headers to pass with the request - params (dict): POST parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="POST", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def graphql( self, query: Any, raise_on_error: bool = True, headers: Dict[str, str] = None, variables: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> GraphQLResult: """ Convenience function for running queries against the Prefect GraphQL API Args: - query (Any): A representation of a graphql query to be executed. It will be parsed by prefect.utilities.graphql.parse_graphql(). - raise_on_error (bool): if True, a `ClientError` will be raised if the GraphQL returns any `errors`. - headers (dict): any additional headers that should be passed as part of the request - variables (dict): Variables to be filled into a query with the key being equivalent to the variables that are accepted by the query - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Data returned from the GraphQL query Raises: - ClientError if there are errors raised by the GraphQL mutation """ result = self.post( path="", server=self.api_server, headers=headers, params=dict(query=parse_graphql(query), variables=json.dumps(variables)), token=token, retry_on_api_error=retry_on_api_error, ) if raise_on_error and "errors" in result: if "UNAUTHENTICATED" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif "Malformed Authorization header" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif ( result["errors"][0].get("extensions", {}).get("code") == "VERSION_LOCKING_ERROR" ): raise VersionLockError(result["errors"]) raise ClientError(result["errors"]) else: return GraphQLResult(result) # type: ignore def _send_request( self, session: "requests.Session", method: str, url: str, params: Dict[str, JSONLike] = None, headers: dict = None, ) -> "requests.models.Response": if prefect.context.config.cloud.get("diagnostics") is True: self.logger.debug(f"Preparing request to {url}") clean_headers = { head: re.sub("Bearer .*", "Bearer XXXX", val) for head, val in headers.items() # type: ignore } self.logger.debug(f"Headers: {clean_headers}") self.logger.debug(f"Request: {params}") start_time = time.time() if method == "GET": response = session.get(url, headers=headers, params=params, timeout=30) elif method == "POST": response = session.post(url, headers=headers, json=params, timeout=30) elif method == "DELETE": response = session.delete(url, headers=headers, timeout=30) else: raise ValueError("Invalid method: {}".format(method)) if prefect.context.config.cloud.get("diagnostics") is True: end_time = time.time() self.logger.debug(f"Response: {response.json()}") self.logger.debug( f"Request duration: {round(end_time - start_time, 4)} seconds" ) # Check if request returned a successful status response.raise_for_status() return response def _request( self, method: str, path: str, params: Dict[str, JSONLike] = None, server: str = None, headers: dict = None, token: str = None, retry_on_api_error: bool = True, ) -> "requests.models.Response": """ Runs any specified request (GET, POST, DELETE) against the server Args: - method (str): The type of request to be made (GET, POST, DELETE) - path (str): Path of the API URL - params (dict, optional): Parameters used for the request - server (str, optional): The server to make requests against, base API server is used if not specified - headers (dict, optional): Headers to pass with the request - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - requests.models.Response: The response returned from the request Raises: - ClientError: if the client token is not in the context (due to not being logged in) - ValueError: if a method is specified outside of the accepted GET, POST, DELETE - requests.HTTPError: if a status code is returned that is not `200` or `401` """ if server is None: server = self.api_server assert isinstance(server, str) # mypy assert if token is None: token = self.get_auth_token() # 'import requests' is expensive time-wise, we should do this just-in-time to keep # the 'import prefect' time low import requests url = urljoin(server, path.lstrip("/")).rstrip("/") params = params or {} headers = headers or {} if token: headers["Authorization"] = "Bearer {}".format(token) headers["X-PREFECT-CORE-VERSION"] = str(prefect.__version__) if self._attached_headers: headers.update(self._attached_headers) session = requests.Session() retry_total = 6 if prefect.config.backend == "cloud" else 1 retries = requests.packages.urllib3.util.retry.Retry( total=retry_total, backoff_factor=1, status_forcelist=[500, 502, 503, 504], method_whitelist=["DELETE", "GET", "POST"], ) session.mount("https://", requests.adapters.HTTPAdapter(max_retries=retries)) response = self._send_request( session=session, method=method, url=url, params=params, headers=headers ) # parse the response try: json_resp = response.json() except JSONDecodeError as exc: if prefect.config.backend == "cloud" and "Authorization" not in headers: raise ClientError( "Malformed response received from Cloud - please ensure that you " "have an API token properly configured." ) from exc else: raise ClientError("Malformed response received from API.") from exc # check if there was an API_ERROR code in the response if "API_ERROR" in str(json_resp.get("errors")) and retry_on_api_error: success, retry_count = False, 0 # retry up to six times while success is False and retry_count < 6: response = self._send_request( session=session, method=method, url=url, params=params, headers=headers, ) if "API_ERROR" in str(response.json().get("errors")): retry_count += 1 time.sleep(0.25 * (2 ** (retry_count - 1))) else: success = True return response def attach_headers(self, headers: dict) -> None: """ Set headers to be attached to this Client Args: - headers (dict): A dictionary of headers to attach to this client. These headers get added on to the existing dictionary of headers. """ self._attached_headers.update(headers) # ------------------------------------------------------------------------- # Auth # ------------------------------------------------------------------------- @property def _local_settings_path(self) -> Path: """ Returns the local settings directory corresponding to the current API servers """ path = "{home}/client/{server}".format( home=prefect.context.config.home_dir, server=slugify(self.api_server, regex_pattern=r"[^-\.a-z0-9]+"), ) return Path(os.path.expanduser(path)) / "settings.toml" def _save_local_settings(self, settings: dict) -> None: """ Writes settings to local storage """ self._local_settings_path.parent.mkdir(exist_ok=True, parents=True) with self._local_settings_path.open("w+") as f: toml.dump(settings, f) def _load_local_settings(self) -> dict: """ Loads settings from local storage """ if self._local_settings_path.exists(): with self._local_settings_path.open("r") as f: return toml.load(f) # type: ignore return {} def save_api_token(self) -> None: """ Saves the API token in local storage. """ settings = self._load_local_settings() settings["api_token"] = self._api_token self._save_local_settings(settings) def get_auth_token(self) -> str: """ Returns an auth token: - if no explicit access token is stored, returns the api token - if there is an access token: - if there's a refresh token and the access token expires in the next 30 seconds, then we refresh the access token and store the result - return the access token Returns: - str: the access token """ if not self._access_token: return self._api_token expiration = self._access_token_expires_at or pendulum.now() if self._refresh_token and pendulum.now().add(seconds=30) > expiration: self._refresh_access_token() return self._access_token def get_available_tenants(self) -> List[Dict]: """ Returns a list of available tenants. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - List[Dict]: a list of dictionaries containing the id, slug, and name of available tenants """ result = self.graphql( {"query": {"tenant(order_by: {slug: asc})": {"id", "slug", "name"}}}, # use the API token to see all available tenants token=self._api_token, ) # type: ignore return result.data.tenant # type: ignore def login_to_tenant(self, tenant_slug: str = None, tenant_id: str = None) -> bool: """ Log in to a specific tenant NOTE: this should only be called by users who have provided a USER-scoped API token. Args: - tenant_slug (str): the tenant's slug - tenant_id (str): the tenant's id Returns: - bool: True if the login was successful Raises: - ValueError: if at least one of `tenant_slug` or `tenant_id` isn't provided - ValueError: if the `tenant_id` is not a valid UUID - ValueError: if no matching tenants are found """ if tenant_slug is None and tenant_id is None: raise ValueError( "At least one of `tenant_slug` or `tenant_id` must be provided." ) elif tenant_id: try: uuid.UUID(tenant_id) except ValueError as exc: raise ValueError("The `tenant_id` must be a valid UUID.") from exc tenant = self.graphql( { "query($slug: String, $id: uuid)": { "tenant(where: {slug: { _eq: $slug }, id: { _eq: $id } })": {"id"} } }, variables=dict(slug=tenant_slug, id=tenant_id), # use the API token to query the tenant token=self._api_token, ) # type: ignore if not tenant.data.tenant: # type: ignore raise ValueError("No matching tenants found.") tenant_id = tenant.data.tenant[0].id # type: ignore if prefect.config.backend == "cloud": payload = self.graphql( { "mutation($input: switch_tenant_input!)": { "switch_tenant(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(tenant_id=tenant_id)), # Use the API token to switch tenants token=self._api_token, ) # type: ignore self._access_token = payload.data.switch_tenant.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.switch_tenant.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.switch_tenant.refresh_token # type: ignore self._active_tenant_id = tenant_id # save the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = self._active_tenant_id self._save_local_settings(settings) return True def logout_from_tenant(self) -> None: self._access_token = None self._refresh_token = None self._active_tenant_id = None # remove the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = None self._save_local_settings(settings) def _refresh_access_token(self) -> bool: """ Refresh the client's JWT access token. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - bool: True if the refresh succeeds """ payload = self.graphql( { "mutation($input: refresh_token_input!)": { "refresh_token(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(access_token=self._access_token)), # pass the refresh token as the auth header token=self._refresh_token, ) # type: ignore self._access_token = payload.data.refresh_token.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.refresh_token.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.refresh_token.refresh_token # type: ignore return True # ------------------------------------------------------------------------- # Actions # ------------------------------------------------------------------------- def register( self, flow: "Flow", project_name: str = None, build: bool = True, set_schedule_active: bool = True, version_group_id: str = None, compressed: bool = True, no_url: bool = False, ) -> str: """ Push a new flow to Prefect Cloud Args: - flow (Flow): a flow to register - project_name (str, optional): the project that should contain this flow. - build (bool, optional): if `True`, the flow's environment is built prior to serialization; defaults to `True` - set_schedule_active (bool, optional): if `False`, will set the schedule to inactive in the database to prevent auto-scheduling runs (if the Flow has a schedule). Defaults to `True`. This can be changed later. - version_group_id (str, optional): the UUID version group ID to use for versioning this Flow in Cloud; if not provided, the version group ID associated with this Flow's project and name will be used. - compressed (bool, optional): if `True`, the serialized flow will be; defaults to `True` compressed - no_url (bool, optional): if `True`, the stdout from this function will not contain the URL link to the newly-registered flow in the Cloud UI Returns: - str: the ID of the newly-registered flow Raises: - ClientError: if the register failed """ required_parameters = {p for p in flow.parameters() if p.required} if flow.schedule is not None and required_parameters: required_names = {p.name for p in required_parameters} if not all( [ required_names <= set(c.parameter_defaults.keys()) for c in flow.schedule.clocks ] ): raise ClientError( "Flows with required parameters can not be scheduled automatically." ) if any(e.key for e in flow.edges) and flow.result is None: warnings.warn( "No result handler was specified on your Flow. Cloud features such as " "input caching and resuming task runs from failure may not work properly.", stacklevel=2, ) if compressed: create_mutation = { "mutation($input: create_flow_from_compressed_string_input!)": { "create_flow_from_compressed_string(input: $input)": {"id"} } } else: create_mutation = { "mutation($input: create_flow_input!)": { "create_flow(input: $input)": {"id"} } } project = None if project_name is None: raise TypeError( "'project_name' is a required field when registering a flow." ) query_project = { "query": { with_args("project", {"where": {"name": {"_eq": project_name}}}): { "id": True } } } project = self.graphql(query_project).data.project # type: ignore if not project: raise ValueError( "Project {} not found. Run `prefect create project '{}'` to create it.".format( project_name, project_name ) ) serialized_flow = flow.serialize(build=build) # type: Any # Set Docker storage image in environment metadata if provided if isinstance(flow.storage, prefect.environments.storage.Docker): flow.environment.metadata["image"] = flow.storage.name serialized_flow = flow.serialize(build=False) # If no image ever set, default metadata to all_extras image on current version if not flow.environment.metadata.get("image"): version = prefect.__version__.split("+")[0] flow.environment.metadata[ "image" ] = f"prefecthq/prefect:all_extras-{version}" serialized_flow = flow.serialize(build=False) # verify that the serialized flow can be deserialized try: prefect.serialization.flow.FlowSchema().load(serialized_flow) except Exception as exc: raise ValueError( "Flow could not be deserialized successfully. Error was: {}".format( repr(exc) ) ) from exc if compressed: serialized_flow = compress(serialized_flow) res = self.graphql( create_mutation, variables=dict( input=dict( project_id=(project[0].id if project else None), serialized_flow=serialized_flow, set_schedule_active=set_schedule_active, version_group_id=version_group_id, ) ), retry_on_api_error=False, ) # type: Any flow_id = ( res.data.create_flow_from_compressed_string.id if compressed else res.data.create_flow.id ) if not no_url: # Generate direct link to Cloud flow flow_url = self.get_cloud_url("flow", flow_id) prefix = "└── " print("Flow URL: {}".format(flow_url)) # Extra information to improve visibility msg = ( f" {prefix}ID: {flow_id}\n" f" {prefix}Project: {project_name}\n" f" {prefix}Labels: {list(flow.environment.labels)}" ) print(msg) return flow_id def get_cloud_url(self, subdirectory: str, id: str, as_user: bool = True) -> str: """ Convenience method for creating Prefect Cloud URLs for a given subdirectory. Args: - subdirectory (str): the subdirectory to use (e.g., `"flow-run"`) - id (str): the ID of the page - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the URL corresponding to the appropriate base URL, tenant slug, subdirectory and ID Example: ```python from prefect import Client client = Client() client.get_cloud_url("flow-run", "424242-ca-94611-111-55") # returns "https://cloud.prefect.io/my-tenant-slug/flow-run/424242-ca-94611-111-55" ``` """ # Generate direct link to UI if prefect.config.backend == "cloud": tenant_slug = self.get_default_tenant_slug(as_user=as_user) else: tenant_slug = "" base_url = ( re.sub("api-", "", prefect.config.cloud.api) if re.search("api-", prefect.config.cloud.api) else re.sub("api", "cloud", prefect.config.cloud.api) ) full_url = prefect.config.cloud.api if tenant_slug: full_url = "/".join([base_url.rstrip("/"), tenant_slug, subdirectory, id]) elif prefect.config.backend == "server": full_url = "/".join([prefect.config.server.ui.endpoint, subdirectory, id]) return full_url def get_default_tenant_slug(self, as_user: bool = True) -> str: """ Get the default tenant slug for the currently authenticated user Args: - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the slug of the current default tenant for this user """ if as_user: query = { "query": {"user": {"default_membership": {"tenant": "slug"}}} } # type: dict else: query = {"query": {"tenant": {"slug"}}} res = self.graphql(query) if as_user: user = res.get("data").user[0] slug = user.default_membership.tenant.slug else: slug = res.get("data").tenant[0].slug return slug def create_project(self, project_name: str, project_description: str = None) -> str: """ Create a new Project Args: - project_name (str): the project that should contain this flow - project_description (str, optional): the project description Returns: - str: the ID of the newly-created project Raises: - ClientError: if the project creation failed """ project_mutation = { "mutation($input: create_project_input!)": { "create_project(input: $input)": {"id"} } } res = self.graphql( project_mutation, variables=dict( input=dict( name=project_name, description=project_description, tenant_id=self._active_tenant_id, ) ), ) # type: Any return res.data.create_project.id def create_flow_run( self, flow_id: str = None, context: dict = None, parameters: dict = None, scheduled_start_time: datetime.datetime = None, idempotency_key: str = None, run_name: str = None, version_group_id: str = None, ) -> str: """ Create a new flow run for the given flow id. If `start_time` is not provided, the flow run will be scheduled to start immediately. If both `flow_id` and `version_group_id` are provided, only the `flow_id` will be used. Args: - flow_id (str, optional): the id of the Flow you wish to schedule - context (dict, optional): the run context - parameters (dict, optional): a dictionary of parameter values to pass to the flow run - scheduled_start_time (datetime, optional): the time to schedule the execution for; if not provided, defaults to now - idempotency_key (str, optional): an idempotency key; if provided, this run will be cached for 24 hours. Any subsequent attempts to create a run with the same idempotency key will return the ID of the originally created run (no new run will be created after the first). An error will be raised if parameters or context are provided and don't match the original. Each subsequent request will reset the TTL for 24 hours. - run_name (str, optional): The name assigned to this flow run - version_group_id (str, optional): if provided, the unique unarchived flow within this version group will be scheduled to run. This input can be used as a stable API for running flows which are regularly updated. Returns: - str: the ID of the newly-created flow run Raises: - ClientError: if the GraphQL query is bad for any reason """ create_mutation = { "mutation($input: create_flow_run_input!)": { "create_flow_run(input: $input)": {"id": True} } } if not flow_id and not version_group_id: raise ValueError("One of flow_id or version_group_id must be provided") if flow_id: inputs = dict(flow_id=flow_id) else: inputs = dict(version_group_id=version_group_id) # type: ignore if parameters is not None: inputs.update(parameters=parameters) # type: ignore if context is not None: inputs.update(context=context) # type: ignore if idempotency_key is not None: inputs.update(idempotency_key=idempotency_key) # type: ignore if scheduled_start_time is not None: inputs.update( scheduled_start_time=scheduled_start_time.isoformat() ) # type: ignore if run_name is not None: inputs.update(flow_run_name=run_name) # type: ignore res = self.graphql(create_mutation, variables=dict(input=inputs)) return res.data.create_flow_run.id # type: ignore def get_flow_run_info(self, flow_run_id: str) -> FlowRunInfoResult: """ Retrieves version and current state information for the given flow run. Args: - flow_run_id (str): the id of the flow run to get information for Returns: - GraphQLResult: an object representing information about the flow run Raises: - ClientError: if the GraphQL mutation is bad for any reason """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "id": True, "name": True, "flow_id": True, "parameters": True, "context": True, "version": True, "scheduled_start_time": True, "serialized_state": True, # load all task runs except dynamic task runs with_args("task_runs", {"where": {"map_index": {"_eq": -1}}}): { "id": True, "task": {"id": True, "slug": True}, "version": True, "serialized_state": True, }, } } } result = self.graphql(query).data.flow_run_by_pk # type: ignore if result is None: raise ClientError('Flow run ID not found: "{}"'.format(flow_run_id)) # convert scheduled_start_time from string to datetime result.scheduled_start_time = pendulum.parse(result.scheduled_start_time) # create "state" attribute from serialized_state result.state = prefect.engine.state.State.deserialize( result.pop("serialized_state") ) # reformat task_runs task_runs = [] for tr in result.task_runs: tr.state = prefect.engine.state.State.deserialize( tr.pop("serialized_state") ) task_info = tr.pop("task") tr.task_id = task_info["id"] tr.task_slug = task_info["slug"] task_runs.append(TaskRunInfoResult(**tr)) result.task_runs = task_runs result.context = ( result.context.to_dict() if result.context is not None else None ) result.parameters = ( result.parameters.to_dict() if result.parameters is not None else None ) return FlowRunInfoResult(**result) def update_flow_run_heartbeat(self, flow_run_id: str) -> None: """ Convenience method for heartbeating a flow run. Does NOT raise an error if the update fails. Args: - flow_run_id (str): the flow run ID to heartbeat """ mutation = { "mutation": { with_args( "update_flow_run_heartbeat", {"input": {"flow_run_id": flow_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def update_task_run_heartbeat(self, task_run_id: str) -> None: """ Convenience method for heartbeating a task run. Does NOT raise an error if the update fails. Args: - task_run_id (str): the task run ID to heartbeat """ mutation = { "mutation": { with_args( "update_task_run_heartbeat", {"input": {"task_run_id": task_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def get_flow_run_state(self, flow_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a flow run. Args: - flow_run_id (str): the id for this flow run Returns: - State: a Prefect State object """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "serialized_state": True, } } } flow_run = self.graphql(query).data.flow_run_by_pk return prefect.engine.state.State.deserialize(flow_run.serialized_state) def set_flow_run_state( self, flow_run_id: str, state: "prefect.engine.state.State", version: int = None, ) -> "prefect.engine.state.State": """ Sets new state for a flow run in the database. Args: - flow_run_id (str): the id of the flow run to set state for - state (State): the new state for this flow run - version (int, optional): the current version of the flow run state. This is optional but it can be supplied to enforce version-locking. Returns: - State: the state the current flow run should be considered in Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation($input: set_flow_run_states_input!)": { "set_flow_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, flow_run_id=flow_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_flow_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def get_latest_cached_states( self, task_id: str, cache_key: Optional[str], created_after: datetime.datetime ) -> List["prefect.engine.state.State"]: """ Pulls all Cached states for the given task that were created after the provided date. Args: - task_id (str): the task id for this task run - cache_key (Optional[str]): the cache key for this Task's cache; if `None`, the task id alone will be used - created_after (datetime.datetime): the earliest date the state should have been created at Returns: - List[State]: a list of Cached states created after the given date """ args = { "where": { "state": {"_eq": "Cached"}, "state_timestamp": {"_gte": created_after.isoformat()}, }, "order_by": {"state_timestamp": EnumValue("desc")}, "limit": 100, } # type: Dict[str, Any] # if a cache key was provided, match it against all tasks if cache_key is not None: args["where"].update({"cache_key": {"_eq": cache_key}}) # otherwise match against only this task, across all cache keys else: args["where"].update({"task_id": {"_eq": task_id}}) query = {"query": {with_args("task_run", args): "serialized_state"}} result = self.graphql(query) # type: Any deserializer = prefect.engine.state.State.deserialize valid_states = [ deserializer(res.serialized_state) for res in result.data.task_run ] return valid_states def get_task_run_info( self, flow_run_id: str, task_id: str, map_index: Optional[int] = None ) -> TaskRunInfoResult: """ Retrieves version and current state information for the given task run. Args: - flow_run_id (str): the id of the flow run that this task run lives in - task_id (str): the task id for this task run - map_index (int, optional): the mapping index for this task run; if `None`, it is assumed this task is _not_ mapped Returns: - NamedTuple: a tuple containing `id, task_id, version, state` Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation": { with_args( "get_or_create_task_run", { "input": { "flow_run_id": flow_run_id, "task_id": task_id, "map_index": -1 if map_index is None else map_index, } }, ): { "id": True, } } } result = self.graphql(mutation) # type: Any if result is None: raise ClientError("Failed to create task run.") task_run_id = result.data.get_or_create_task_run.id query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "version": True, "serialized_state": True, "task": {"slug": True}, } } } task_run = self.graphql(query).data.task_run_by_pk # type: ignore if task_run is None: raise ClientError('Task run ID not found: "{}"'.format(task_run_id)) state = prefect.engine.state.State.deserialize(task_run.serialized_state) return TaskRunInfoResult( id=task_run_id, task_id=task_id, task_slug=task_run.task.slug, version=task_run.version, state=state, ) def set_task_run_name(self, task_run_id: str, name: str) -> bool: """ Set the name of a task run Args: - task_run_id (str): the id of a task run - name (str): a name for this task run Returns: - bool: whether or not the task run name was updated """ mutation = { "mutation($input: set_task_run_name_input!)": { "set_task_run_name(input: $input)": { "success": True, } } } result = self.graphql( mutation, variables=dict(input=dict(task_run_id=task_run_id, name=name)) ) return result.data.set_task_run_name.success def get_task_run_state(self, task_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a task run. Args: - task_run_id (str): the id for this task run Returns: - State: a Prefect State object """ query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "serialized_state": True, } } } task_run = self.graphql(query).data.task_run_by_pk return prefect.engine.state.State.deserialize(task_run.serialized_state) def set_task_run_state( self, task_run_id: str, state: "prefect.engine.state.State", version: int = None, cache_for: datetime.timedelta = None, ) -> "prefect.engine.state.State": """ Sets new state for a task run. Args: - task_run_id (str): the id of the task run to set state for - state (State): the new state for this task run - version (int, optional): the current version of the task run state. This is optional but it can be supplied to enforce version-locking. - cache_for (timedelta, optional): how long to store the result of this task for, using the serializer set in config; if not provided, no caching occurs Raises: - ClientError: if the GraphQL mutation is bad for any reason Returns: - State: the state the current task run should be considered in """ mutation = { "mutation($input: set_task_run_states_input!)": { "set_task_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, task_run_id=task_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_task_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def set_secret(self, name: str, value: Any) -> None: """ Set a secret with the given name and value. Args: - name (str): the name of the secret; used for retrieving the secret during task runs - value (Any): the value of the secret Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the secret-setting was unsuccessful """ mutation = { "mutation($input: set_secret_input!)": { "set_secret(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(name=name, value=value)) ) # type: Any if not result.data.set_secret.success: raise ValueError("Setting secret failed.") def get_task_tag_limit(self, tag: str) -> Optional[int]: """ Retrieve the current task tag concurrency limit for a given tag. Args: - tag (str): the tag to update Raises: - ClientError: if the GraphQL query fails """ query = { "query": { with_args("task_tag_limit", {"where": {"tag": {"_eq": tag}}}): { "limit": True } } } result = self.graphql(query) # type: Any if result.data.task_tag_limit: return result.data.task_tag_limit[0].limit else: return None def update_task_tag_limit(self, tag: str, limit: int) -> None: """ Update the task tag concurrency limit for a given tag; requires tenant admin permissions. Args: - tag (str): the tag to update - limit (int): the concurrency limit to enforce on the tag; should be a value >= 0 Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag limit-setting was unsuccessful, or if a bad limit was provided """ if limit < 0: raise ValueError("Concurrency limits must be >= 0") mutation = { "mutation($input: update_task_tag_limit_input!)": { "update_task_tag_limit(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict(input=dict(tag=tag, limit=limit)) ) # type: Any if not result.data.update_task_tag_limit.id: raise ValueError("Updating the task tag concurrency limit failed.") def delete_task_tag_limit(self, limit_id: str) -> None: """ Deletes a given task tag concurrency limit; requires tenant admin permissions. Args: - limit_id (str): the ID of the tag to delete Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag deletion was unsuccessful, or if a bad tag ID was provided """ mutation = { "mutation($input: delete_task_tag_limit_input!)": { "delete_task_tag_limit(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(limit_id=limit_id)) ) # type: Any if not result.data.delete_task_tag_limit.success: raise ValueError("Deleting the task tag concurrency limit failed.") def write_run_logs(self, logs: List[Dict]) -> None: """ Uploads a collection of logs to Cloud. Args: - logs (List[Dict]): a list of log entries to add Raises: - ValueError: if uploading the logs fail """ mutation = { "mutation($input: write_run_logs_input!)": { "write_run_logs(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(logs=logs)) ) # type: Any if not result.data.write_run_logs.success: raise ValueError("Writing logs failed.") def register_agent( self, agent_type: str, name: str = None, labels: List[str] = None, agent_config_id: str = None, ) -> str: """ Register an agent with a backend API Args: - agent_type (str): The type of agent being registered - name: (str, optional): The name of the agent being registered - labels (List[str], optional): A list of any present labels on the agent being registered - agent_config_id (str, optional): The ID of an agent configuration to register with Returns: - The agent ID as a string """ mutation = { "mutation($input: register_agent_input!)": { "register_agent(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict( input=dict( type=agent_type, name=name, labels=labels or [], tenant_id=self._active_tenant_id, agent_config_id=agent_config_id, ) ), ) if not result.data.register_agent.id: raise ValueError("Error registering agent") return result.data.register_agent.id def get_agent_config(self, agent_config_id: str) -> dict: """ Get agent config settings Args: - agent_config_id (str): The ID of an agent configuration to retrieve Returns: - dict: the agent configuration's `settings` """ query = { "query": { with_args( "agent_config", {"where": {"id": {"_eq": agent_config_id}}} ): {"settings": True} } } result = self.graphql(query) # type: Any return result.data.agent_config[0].settings
set_task_run_state
Sets new state for a task run. Args: - task_run_id (str): the id of the task run to set state for - state (State): the new state for this task run - version (int, optional): the current version of the task run state. This is optional but it can be supplied to enforce version-locking. - cache_for (timedelta, optional): how long to store the result of this task for, using the serializer set in config; if not provided, no caching occurs Raises: - ClientError: if the GraphQL mutation is bad for any reason Returns: - State: the state the current task run should be considered in
import datetime import json import os import re import time import uuid import warnings from pathlib import Path from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Union from urllib.parse import urljoin # if simplejson is installed, `requests` defaults to using it instead of json # this allows the client to gracefully handle either json or simplejson try: from simplejson.errors import JSONDecodeError except ImportError: from json.decoder import JSONDecodeError import pendulum import toml from slugify import slugify import prefect from prefect.utilities.exceptions import ( AuthorizationError, ClientError, VersionLockError, ) from prefect.utilities.graphql import ( EnumValue, GraphQLResult, compress, parse_graphql, with_args, ) from prefect.utilities.logging import create_diagnostic_logger if TYPE_CHECKING: from prefect.core import Flow import requests JSONLike = Union[bool, dict, list, str, int, float, None] # type definitions for GraphQL results TaskRunInfoResult = NamedTuple( "TaskRunInfoResult", [ ("id", str), ("task_id", str), ("task_slug", str), ("version", int), ("state", "prefect.engine.state.State"), ], ) FlowRunInfoResult = NamedTuple( "FlowRunInfoResult", [ ("id", str), ("name", str), ("flow_id", str), ("parameters", Dict[str, Any]), ("context", Dict[str, Any]), ("version", int), ("scheduled_start_time", datetime.datetime), ("state", "prefect.engine.state.State"), ("task_runs", List[TaskRunInfoResult]), ], ) class Client: """ Client for communication with Prefect Cloud If the arguments aren't specified the client initialization first checks the prefect configuration and if the server is not set there it checks the current context. The token will only be present in the current context. Args: - api_server (str, optional): the URL to send all GraphQL requests to; if not provided, will be pulled from `cloud.graphql` config var - api_token (str, optional): a Prefect Cloud API token, taken from `config.cloud.auth_token` if not provided. If this token is USER-scoped, it may be used to log in to any tenant that the user is a member of. In that case, ephemeral JWTs will be loaded as necessary. Otherwise, the API token itself will be used as authorization. """ def __init__(self, api_server: str = None, api_token: str = None): self._access_token = None self._refresh_token = None self._access_token_expires_at = pendulum.now() self._active_tenant_id = None self._attached_headers = {} # type: Dict[str, str] self.logger = create_diagnostic_logger("Diagnostics") # store api server self.api_server = api_server or prefect.context.config.cloud.get("graphql") # store api token self._api_token = api_token or prefect.context.config.cloud.get( "auth_token", None ) if prefect.config.backend == "cloud": if not self._api_token: # if no api token was passed, attempt to load state from local storage settings = self._load_local_settings() self._api_token = settings.get("api_token") if self._api_token: self._active_tenant_id = settings.get("active_tenant_id") if self._active_tenant_id: try: self.login_to_tenant(tenant_id=self._active_tenant_id) except AuthorizationError: # if an authorization error is raised, then the token is invalid and should # be cleared self.logout_from_tenant() else: # TODO: Separate put this functionality and clean up initial tenant access handling if not self._active_tenant_id: tenant_info = self.graphql({"query": {"tenant": {"id"}}}) if tenant_info.data.tenant: self._active_tenant_id = tenant_info.data.tenant[0].id def create_tenant(self, name: str, slug: str = None) -> str: """ Creates a new tenant. Note this route only works when run against Prefect Server. Args: - name (str): the name of the tenant to create - slug (str, optional): the slug of the tenant to create; defaults to name Returns: - str: the ID of the newly created tenant, or the ID of the currently active tenant Raises: - ValueError: if run against Prefect Cloud """ if prefect.config.backend != "server": msg = "To create a tenant with Prefect Cloud, please signup at https://cloud.prefect.io/" raise ValueError(msg) if slug is None: slug = slugify(name) tenant_info = self.graphql( { "mutation($input: create_tenant_input!)": { "create_tenant(input: $input)": {"id"} } }, variables=dict(input=dict(name=name, slug=slug)), ) return tenant_info.data.create_tenant.id # ------------------------------------------------------------------------- # Utilities def get( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and GET request Args: - path (str): the path of the API url. For example, to GET http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the GET request to; defaults to `self.api_server` - headers (dict, optional): Headers to pass with the request - params (dict): GET parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="GET", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def post( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and POST request Args: - path (str): the path of the API url. For example, to POST http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the POST request to; defaults to `self.api_server` - headers(dict): headers to pass with the request - params (dict): POST parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="POST", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def graphql( self, query: Any, raise_on_error: bool = True, headers: Dict[str, str] = None, variables: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> GraphQLResult: """ Convenience function for running queries against the Prefect GraphQL API Args: - query (Any): A representation of a graphql query to be executed. It will be parsed by prefect.utilities.graphql.parse_graphql(). - raise_on_error (bool): if True, a `ClientError` will be raised if the GraphQL returns any `errors`. - headers (dict): any additional headers that should be passed as part of the request - variables (dict): Variables to be filled into a query with the key being equivalent to the variables that are accepted by the query - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Data returned from the GraphQL query Raises: - ClientError if there are errors raised by the GraphQL mutation """ result = self.post( path="", server=self.api_server, headers=headers, params=dict(query=parse_graphql(query), variables=json.dumps(variables)), token=token, retry_on_api_error=retry_on_api_error, ) if raise_on_error and "errors" in result: if "UNAUTHENTICATED" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif "Malformed Authorization header" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif ( result["errors"][0].get("extensions", {}).get("code") == "VERSION_LOCKING_ERROR" ): raise VersionLockError(result["errors"]) raise ClientError(result["errors"]) else: return GraphQLResult(result) # type: ignore def _send_request( self, session: "requests.Session", method: str, url: str, params: Dict[str, JSONLike] = None, headers: dict = None, ) -> "requests.models.Response": if prefect.context.config.cloud.get("diagnostics") is True: self.logger.debug(f"Preparing request to {url}") clean_headers = { head: re.sub("Bearer .*", "Bearer XXXX", val) for head, val in headers.items() # type: ignore } self.logger.debug(f"Headers: {clean_headers}") self.logger.debug(f"Request: {params}") start_time = time.time() if method == "GET": response = session.get(url, headers=headers, params=params, timeout=30) elif method == "POST": response = session.post(url, headers=headers, json=params, timeout=30) elif method == "DELETE": response = session.delete(url, headers=headers, timeout=30) else: raise ValueError("Invalid method: {}".format(method)) if prefect.context.config.cloud.get("diagnostics") is True: end_time = time.time() self.logger.debug(f"Response: {response.json()}") self.logger.debug( f"Request duration: {round(end_time - start_time, 4)} seconds" ) # Check if request returned a successful status response.raise_for_status() return response def _request( self, method: str, path: str, params: Dict[str, JSONLike] = None, server: str = None, headers: dict = None, token: str = None, retry_on_api_error: bool = True, ) -> "requests.models.Response": """ Runs any specified request (GET, POST, DELETE) against the server Args: - method (str): The type of request to be made (GET, POST, DELETE) - path (str): Path of the API URL - params (dict, optional): Parameters used for the request - server (str, optional): The server to make requests against, base API server is used if not specified - headers (dict, optional): Headers to pass with the request - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - requests.models.Response: The response returned from the request Raises: - ClientError: if the client token is not in the context (due to not being logged in) - ValueError: if a method is specified outside of the accepted GET, POST, DELETE - requests.HTTPError: if a status code is returned that is not `200` or `401` """ if server is None: server = self.api_server assert isinstance(server, str) # mypy assert if token is None: token = self.get_auth_token() # 'import requests' is expensive time-wise, we should do this just-in-time to keep # the 'import prefect' time low import requests url = urljoin(server, path.lstrip("/")).rstrip("/") params = params or {} headers = headers or {} if token: headers["Authorization"] = "Bearer {}".format(token) headers["X-PREFECT-CORE-VERSION"] = str(prefect.__version__) if self._attached_headers: headers.update(self._attached_headers) session = requests.Session() retry_total = 6 if prefect.config.backend == "cloud" else 1 retries = requests.packages.urllib3.util.retry.Retry( total=retry_total, backoff_factor=1, status_forcelist=[500, 502, 503, 504], method_whitelist=["DELETE", "GET", "POST"], ) session.mount("https://", requests.adapters.HTTPAdapter(max_retries=retries)) response = self._send_request( session=session, method=method, url=url, params=params, headers=headers ) # parse the response try: json_resp = response.json() except JSONDecodeError as exc: if prefect.config.backend == "cloud" and "Authorization" not in headers: raise ClientError( "Malformed response received from Cloud - please ensure that you " "have an API token properly configured." ) from exc else: raise ClientError("Malformed response received from API.") from exc # check if there was an API_ERROR code in the response if "API_ERROR" in str(json_resp.get("errors")) and retry_on_api_error: success, retry_count = False, 0 # retry up to six times while success is False and retry_count < 6: response = self._send_request( session=session, method=method, url=url, params=params, headers=headers, ) if "API_ERROR" in str(response.json().get("errors")): retry_count += 1 time.sleep(0.25 * (2 ** (retry_count - 1))) else: success = True return response def attach_headers(self, headers: dict) -> None: """ Set headers to be attached to this Client Args: - headers (dict): A dictionary of headers to attach to this client. These headers get added on to the existing dictionary of headers. """ self._attached_headers.update(headers) # ------------------------------------------------------------------------- # Auth # ------------------------------------------------------------------------- @property def _local_settings_path(self) -> Path: """ Returns the local settings directory corresponding to the current API servers """ path = "{home}/client/{server}".format( home=prefect.context.config.home_dir, server=slugify(self.api_server, regex_pattern=r"[^-\.a-z0-9]+"), ) return Path(os.path.expanduser(path)) / "settings.toml" def _save_local_settings(self, settings: dict) -> None: """ Writes settings to local storage """ self._local_settings_path.parent.mkdir(exist_ok=True, parents=True) with self._local_settings_path.open("w+") as f: toml.dump(settings, f) def _load_local_settings(self) -> dict: """ Loads settings from local storage """ if self._local_settings_path.exists(): with self._local_settings_path.open("r") as f: return toml.load(f) # type: ignore return {} def save_api_token(self) -> None: """ Saves the API token in local storage. """ settings = self._load_local_settings() settings["api_token"] = self._api_token self._save_local_settings(settings) def get_auth_token(self) -> str: """ Returns an auth token: - if no explicit access token is stored, returns the api token - if there is an access token: - if there's a refresh token and the access token expires in the next 30 seconds, then we refresh the access token and store the result - return the access token Returns: - str: the access token """ if not self._access_token: return self._api_token expiration = self._access_token_expires_at or pendulum.now() if self._refresh_token and pendulum.now().add(seconds=30) > expiration: self._refresh_access_token() return self._access_token def get_available_tenants(self) -> List[Dict]: """ Returns a list of available tenants. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - List[Dict]: a list of dictionaries containing the id, slug, and name of available tenants """ result = self.graphql( {"query": {"tenant(order_by: {slug: asc})": {"id", "slug", "name"}}}, # use the API token to see all available tenants token=self._api_token, ) # type: ignore return result.data.tenant # type: ignore def login_to_tenant(self, tenant_slug: str = None, tenant_id: str = None) -> bool: """ Log in to a specific tenant NOTE: this should only be called by users who have provided a USER-scoped API token. Args: - tenant_slug (str): the tenant's slug - tenant_id (str): the tenant's id Returns: - bool: True if the login was successful Raises: - ValueError: if at least one of `tenant_slug` or `tenant_id` isn't provided - ValueError: if the `tenant_id` is not a valid UUID - ValueError: if no matching tenants are found """ if tenant_slug is None and tenant_id is None: raise ValueError( "At least one of `tenant_slug` or `tenant_id` must be provided." ) elif tenant_id: try: uuid.UUID(tenant_id) except ValueError as exc: raise ValueError("The `tenant_id` must be a valid UUID.") from exc tenant = self.graphql( { "query($slug: String, $id: uuid)": { "tenant(where: {slug: { _eq: $slug }, id: { _eq: $id } })": {"id"} } }, variables=dict(slug=tenant_slug, id=tenant_id), # use the API token to query the tenant token=self._api_token, ) # type: ignore if not tenant.data.tenant: # type: ignore raise ValueError("No matching tenants found.") tenant_id = tenant.data.tenant[0].id # type: ignore if prefect.config.backend == "cloud": payload = self.graphql( { "mutation($input: switch_tenant_input!)": { "switch_tenant(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(tenant_id=tenant_id)), # Use the API token to switch tenants token=self._api_token, ) # type: ignore self._access_token = payload.data.switch_tenant.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.switch_tenant.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.switch_tenant.refresh_token # type: ignore self._active_tenant_id = tenant_id # save the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = self._active_tenant_id self._save_local_settings(settings) return True def logout_from_tenant(self) -> None: self._access_token = None self._refresh_token = None self._active_tenant_id = None # remove the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = None self._save_local_settings(settings) def _refresh_access_token(self) -> bool: """ Refresh the client's JWT access token. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - bool: True if the refresh succeeds """ payload = self.graphql( { "mutation($input: refresh_token_input!)": { "refresh_token(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(access_token=self._access_token)), # pass the refresh token as the auth header token=self._refresh_token, ) # type: ignore self._access_token = payload.data.refresh_token.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.refresh_token.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.refresh_token.refresh_token # type: ignore return True # ------------------------------------------------------------------------- # Actions # ------------------------------------------------------------------------- def register( self, flow: "Flow", project_name: str = None, build: bool = True, set_schedule_active: bool = True, version_group_id: str = None, compressed: bool = True, no_url: bool = False, ) -> str: """ Push a new flow to Prefect Cloud Args: - flow (Flow): a flow to register - project_name (str, optional): the project that should contain this flow. - build (bool, optional): if `True`, the flow's environment is built prior to serialization; defaults to `True` - set_schedule_active (bool, optional): if `False`, will set the schedule to inactive in the database to prevent auto-scheduling runs (if the Flow has a schedule). Defaults to `True`. This can be changed later. - version_group_id (str, optional): the UUID version group ID to use for versioning this Flow in Cloud; if not provided, the version group ID associated with this Flow's project and name will be used. - compressed (bool, optional): if `True`, the serialized flow will be; defaults to `True` compressed - no_url (bool, optional): if `True`, the stdout from this function will not contain the URL link to the newly-registered flow in the Cloud UI Returns: - str: the ID of the newly-registered flow Raises: - ClientError: if the register failed """ required_parameters = {p for p in flow.parameters() if p.required} if flow.schedule is not None and required_parameters: required_names = {p.name for p in required_parameters} if not all( [ required_names <= set(c.parameter_defaults.keys()) for c in flow.schedule.clocks ] ): raise ClientError( "Flows with required parameters can not be scheduled automatically." ) if any(e.key for e in flow.edges) and flow.result is None: warnings.warn( "No result handler was specified on your Flow. Cloud features such as " "input caching and resuming task runs from failure may not work properly.", stacklevel=2, ) if compressed: create_mutation = { "mutation($input: create_flow_from_compressed_string_input!)": { "create_flow_from_compressed_string(input: $input)": {"id"} } } else: create_mutation = { "mutation($input: create_flow_input!)": { "create_flow(input: $input)": {"id"} } } project = None if project_name is None: raise TypeError( "'project_name' is a required field when registering a flow." ) query_project = { "query": { with_args("project", {"where": {"name": {"_eq": project_name}}}): { "id": True } } } project = self.graphql(query_project).data.project # type: ignore if not project: raise ValueError( "Project {} not found. Run `prefect create project '{}'` to create it.".format( project_name, project_name ) ) serialized_flow = flow.serialize(build=build) # type: Any # Set Docker storage image in environment metadata if provided if isinstance(flow.storage, prefect.environments.storage.Docker): flow.environment.metadata["image"] = flow.storage.name serialized_flow = flow.serialize(build=False) # If no image ever set, default metadata to all_extras image on current version if not flow.environment.metadata.get("image"): version = prefect.__version__.split("+")[0] flow.environment.metadata[ "image" ] = f"prefecthq/prefect:all_extras-{version}" serialized_flow = flow.serialize(build=False) # verify that the serialized flow can be deserialized try: prefect.serialization.flow.FlowSchema().load(serialized_flow) except Exception as exc: raise ValueError( "Flow could not be deserialized successfully. Error was: {}".format( repr(exc) ) ) from exc if compressed: serialized_flow = compress(serialized_flow) res = self.graphql( create_mutation, variables=dict( input=dict( project_id=(project[0].id if project else None), serialized_flow=serialized_flow, set_schedule_active=set_schedule_active, version_group_id=version_group_id, ) ), retry_on_api_error=False, ) # type: Any flow_id = ( res.data.create_flow_from_compressed_string.id if compressed else res.data.create_flow.id ) if not no_url: # Generate direct link to Cloud flow flow_url = self.get_cloud_url("flow", flow_id) prefix = "└── " print("Flow URL: {}".format(flow_url)) # Extra information to improve visibility msg = ( f" {prefix}ID: {flow_id}\n" f" {prefix}Project: {project_name}\n" f" {prefix}Labels: {list(flow.environment.labels)}" ) print(msg) return flow_id def get_cloud_url(self, subdirectory: str, id: str, as_user: bool = True) -> str: """ Convenience method for creating Prefect Cloud URLs for a given subdirectory. Args: - subdirectory (str): the subdirectory to use (e.g., `"flow-run"`) - id (str): the ID of the page - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the URL corresponding to the appropriate base URL, tenant slug, subdirectory and ID Example: ```python from prefect import Client client = Client() client.get_cloud_url("flow-run", "424242-ca-94611-111-55") # returns "https://cloud.prefect.io/my-tenant-slug/flow-run/424242-ca-94611-111-55" ``` """ # Generate direct link to UI if prefect.config.backend == "cloud": tenant_slug = self.get_default_tenant_slug(as_user=as_user) else: tenant_slug = "" base_url = ( re.sub("api-", "", prefect.config.cloud.api) if re.search("api-", prefect.config.cloud.api) else re.sub("api", "cloud", prefect.config.cloud.api) ) full_url = prefect.config.cloud.api if tenant_slug: full_url = "/".join([base_url.rstrip("/"), tenant_slug, subdirectory, id]) elif prefect.config.backend == "server": full_url = "/".join([prefect.config.server.ui.endpoint, subdirectory, id]) return full_url def get_default_tenant_slug(self, as_user: bool = True) -> str: """ Get the default tenant slug for the currently authenticated user Args: - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the slug of the current default tenant for this user """ if as_user: query = { "query": {"user": {"default_membership": {"tenant": "slug"}}} } # type: dict else: query = {"query": {"tenant": {"slug"}}} res = self.graphql(query) if as_user: user = res.get("data").user[0] slug = user.default_membership.tenant.slug else: slug = res.get("data").tenant[0].slug return slug def create_project(self, project_name: str, project_description: str = None) -> str: """ Create a new Project Args: - project_name (str): the project that should contain this flow - project_description (str, optional): the project description Returns: - str: the ID of the newly-created project Raises: - ClientError: if the project creation failed """ project_mutation = { "mutation($input: create_project_input!)": { "create_project(input: $input)": {"id"} } } res = self.graphql( project_mutation, variables=dict( input=dict( name=project_name, description=project_description, tenant_id=self._active_tenant_id, ) ), ) # type: Any return res.data.create_project.id def create_flow_run( self, flow_id: str = None, context: dict = None, parameters: dict = None, scheduled_start_time: datetime.datetime = None, idempotency_key: str = None, run_name: str = None, version_group_id: str = None, ) -> str: """ Create a new flow run for the given flow id. If `start_time` is not provided, the flow run will be scheduled to start immediately. If both `flow_id` and `version_group_id` are provided, only the `flow_id` will be used. Args: - flow_id (str, optional): the id of the Flow you wish to schedule - context (dict, optional): the run context - parameters (dict, optional): a dictionary of parameter values to pass to the flow run - scheduled_start_time (datetime, optional): the time to schedule the execution for; if not provided, defaults to now - idempotency_key (str, optional): an idempotency key; if provided, this run will be cached for 24 hours. Any subsequent attempts to create a run with the same idempotency key will return the ID of the originally created run (no new run will be created after the first). An error will be raised if parameters or context are provided and don't match the original. Each subsequent request will reset the TTL for 24 hours. - run_name (str, optional): The name assigned to this flow run - version_group_id (str, optional): if provided, the unique unarchived flow within this version group will be scheduled to run. This input can be used as a stable API for running flows which are regularly updated. Returns: - str: the ID of the newly-created flow run Raises: - ClientError: if the GraphQL query is bad for any reason """ create_mutation = { "mutation($input: create_flow_run_input!)": { "create_flow_run(input: $input)": {"id": True} } } if not flow_id and not version_group_id: raise ValueError("One of flow_id or version_group_id must be provided") if flow_id: inputs = dict(flow_id=flow_id) else: inputs = dict(version_group_id=version_group_id) # type: ignore if parameters is not None: inputs.update(parameters=parameters) # type: ignore if context is not None: inputs.update(context=context) # type: ignore if idempotency_key is not None: inputs.update(idempotency_key=idempotency_key) # type: ignore if scheduled_start_time is not None: inputs.update( scheduled_start_time=scheduled_start_time.isoformat() ) # type: ignore if run_name is not None: inputs.update(flow_run_name=run_name) # type: ignore res = self.graphql(create_mutation, variables=dict(input=inputs)) return res.data.create_flow_run.id # type: ignore def get_flow_run_info(self, flow_run_id: str) -> FlowRunInfoResult: """ Retrieves version and current state information for the given flow run. Args: - flow_run_id (str): the id of the flow run to get information for Returns: - GraphQLResult: an object representing information about the flow run Raises: - ClientError: if the GraphQL mutation is bad for any reason """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "id": True, "name": True, "flow_id": True, "parameters": True, "context": True, "version": True, "scheduled_start_time": True, "serialized_state": True, # load all task runs except dynamic task runs with_args("task_runs", {"where": {"map_index": {"_eq": -1}}}): { "id": True, "task": {"id": True, "slug": True}, "version": True, "serialized_state": True, }, } } } result = self.graphql(query).data.flow_run_by_pk # type: ignore if result is None: raise ClientError('Flow run ID not found: "{}"'.format(flow_run_id)) # convert scheduled_start_time from string to datetime result.scheduled_start_time = pendulum.parse(result.scheduled_start_time) # create "state" attribute from serialized_state result.state = prefect.engine.state.State.deserialize( result.pop("serialized_state") ) # reformat task_runs task_runs = [] for tr in result.task_runs: tr.state = prefect.engine.state.State.deserialize( tr.pop("serialized_state") ) task_info = tr.pop("task") tr.task_id = task_info["id"] tr.task_slug = task_info["slug"] task_runs.append(TaskRunInfoResult(**tr)) result.task_runs = task_runs result.context = ( result.context.to_dict() if result.context is not None else None ) result.parameters = ( result.parameters.to_dict() if result.parameters is not None else None ) return FlowRunInfoResult(**result) def update_flow_run_heartbeat(self, flow_run_id: str) -> None: """ Convenience method for heartbeating a flow run. Does NOT raise an error if the update fails. Args: - flow_run_id (str): the flow run ID to heartbeat """ mutation = { "mutation": { with_args( "update_flow_run_heartbeat", {"input": {"flow_run_id": flow_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def update_task_run_heartbeat(self, task_run_id: str) -> None: """ Convenience method for heartbeating a task run. Does NOT raise an error if the update fails. Args: - task_run_id (str): the task run ID to heartbeat """ mutation = { "mutation": { with_args( "update_task_run_heartbeat", {"input": {"task_run_id": task_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def get_flow_run_state(self, flow_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a flow run. Args: - flow_run_id (str): the id for this flow run Returns: - State: a Prefect State object """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "serialized_state": True, } } } flow_run = self.graphql(query).data.flow_run_by_pk return prefect.engine.state.State.deserialize(flow_run.serialized_state) def set_flow_run_state( self, flow_run_id: str, state: "prefect.engine.state.State", version: int = None, ) -> "prefect.engine.state.State": """ Sets new state for a flow run in the database. Args: - flow_run_id (str): the id of the flow run to set state for - state (State): the new state for this flow run - version (int, optional): the current version of the flow run state. This is optional but it can be supplied to enforce version-locking. Returns: - State: the state the current flow run should be considered in Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation($input: set_flow_run_states_input!)": { "set_flow_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, flow_run_id=flow_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_flow_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def get_latest_cached_states( self, task_id: str, cache_key: Optional[str], created_after: datetime.datetime ) -> List["prefect.engine.state.State"]: """ Pulls all Cached states for the given task that were created after the provided date. Args: - task_id (str): the task id for this task run - cache_key (Optional[str]): the cache key for this Task's cache; if `None`, the task id alone will be used - created_after (datetime.datetime): the earliest date the state should have been created at Returns: - List[State]: a list of Cached states created after the given date """ args = { "where": { "state": {"_eq": "Cached"}, "state_timestamp": {"_gte": created_after.isoformat()}, }, "order_by": {"state_timestamp": EnumValue("desc")}, "limit": 100, } # type: Dict[str, Any] # if a cache key was provided, match it against all tasks if cache_key is not None: args["where"].update({"cache_key": {"_eq": cache_key}}) # otherwise match against only this task, across all cache keys else: args["where"].update({"task_id": {"_eq": task_id}}) query = {"query": {with_args("task_run", args): "serialized_state"}} result = self.graphql(query) # type: Any deserializer = prefect.engine.state.State.deserialize valid_states = [ deserializer(res.serialized_state) for res in result.data.task_run ] return valid_states def get_task_run_info( self, flow_run_id: str, task_id: str, map_index: Optional[int] = None ) -> TaskRunInfoResult: """ Retrieves version and current state information for the given task run. Args: - flow_run_id (str): the id of the flow run that this task run lives in - task_id (str): the task id for this task run - map_index (int, optional): the mapping index for this task run; if `None`, it is assumed this task is _not_ mapped Returns: - NamedTuple: a tuple containing `id, task_id, version, state` Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation": { with_args( "get_or_create_task_run", { "input": { "flow_run_id": flow_run_id, "task_id": task_id, "map_index": -1 if map_index is None else map_index, } }, ): { "id": True, } } } result = self.graphql(mutation) # type: Any if result is None: raise ClientError("Failed to create task run.") task_run_id = result.data.get_or_create_task_run.id query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "version": True, "serialized_state": True, "task": {"slug": True}, } } } task_run = self.graphql(query).data.task_run_by_pk # type: ignore if task_run is None: raise ClientError('Task run ID not found: "{}"'.format(task_run_id)) state = prefect.engine.state.State.deserialize(task_run.serialized_state) return TaskRunInfoResult( id=task_run_id, task_id=task_id, task_slug=task_run.task.slug, version=task_run.version, state=state, ) def set_task_run_name(self, task_run_id: str, name: str) -> bool: """ Set the name of a task run Args: - task_run_id (str): the id of a task run - name (str): a name for this task run Returns: - bool: whether or not the task run name was updated """ mutation = { "mutation($input: set_task_run_name_input!)": { "set_task_run_name(input: $input)": { "success": True, } } } result = self.graphql( mutation, variables=dict(input=dict(task_run_id=task_run_id, name=name)) ) return result.data.set_task_run_name.success def get_task_run_state(self, task_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a task run. Args: - task_run_id (str): the id for this task run Returns: - State: a Prefect State object """ query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "serialized_state": True, } } } task_run = self.graphql(query).data.task_run_by_pk return prefect.engine.state.State.deserialize(task_run.serialized_state) # MASKED: set_task_run_state function (lines 1304-1362) def set_secret(self, name: str, value: Any) -> None: """ Set a secret with the given name and value. Args: - name (str): the name of the secret; used for retrieving the secret during task runs - value (Any): the value of the secret Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the secret-setting was unsuccessful """ mutation = { "mutation($input: set_secret_input!)": { "set_secret(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(name=name, value=value)) ) # type: Any if not result.data.set_secret.success: raise ValueError("Setting secret failed.") def get_task_tag_limit(self, tag: str) -> Optional[int]: """ Retrieve the current task tag concurrency limit for a given tag. Args: - tag (str): the tag to update Raises: - ClientError: if the GraphQL query fails """ query = { "query": { with_args("task_tag_limit", {"where": {"tag": {"_eq": tag}}}): { "limit": True } } } result = self.graphql(query) # type: Any if result.data.task_tag_limit: return result.data.task_tag_limit[0].limit else: return None def update_task_tag_limit(self, tag: str, limit: int) -> None: """ Update the task tag concurrency limit for a given tag; requires tenant admin permissions. Args: - tag (str): the tag to update - limit (int): the concurrency limit to enforce on the tag; should be a value >= 0 Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag limit-setting was unsuccessful, or if a bad limit was provided """ if limit < 0: raise ValueError("Concurrency limits must be >= 0") mutation = { "mutation($input: update_task_tag_limit_input!)": { "update_task_tag_limit(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict(input=dict(tag=tag, limit=limit)) ) # type: Any if not result.data.update_task_tag_limit.id: raise ValueError("Updating the task tag concurrency limit failed.") def delete_task_tag_limit(self, limit_id: str) -> None: """ Deletes a given task tag concurrency limit; requires tenant admin permissions. Args: - limit_id (str): the ID of the tag to delete Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag deletion was unsuccessful, or if a bad tag ID was provided """ mutation = { "mutation($input: delete_task_tag_limit_input!)": { "delete_task_tag_limit(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(limit_id=limit_id)) ) # type: Any if not result.data.delete_task_tag_limit.success: raise ValueError("Deleting the task tag concurrency limit failed.") def write_run_logs(self, logs: List[Dict]) -> None: """ Uploads a collection of logs to Cloud. Args: - logs (List[Dict]): a list of log entries to add Raises: - ValueError: if uploading the logs fail """ mutation = { "mutation($input: write_run_logs_input!)": { "write_run_logs(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(logs=logs)) ) # type: Any if not result.data.write_run_logs.success: raise ValueError("Writing logs failed.") def register_agent( self, agent_type: str, name: str = None, labels: List[str] = None, agent_config_id: str = None, ) -> str: """ Register an agent with a backend API Args: - agent_type (str): The type of agent being registered - name: (str, optional): The name of the agent being registered - labels (List[str], optional): A list of any present labels on the agent being registered - agent_config_id (str, optional): The ID of an agent configuration to register with Returns: - The agent ID as a string """ mutation = { "mutation($input: register_agent_input!)": { "register_agent(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict( input=dict( type=agent_type, name=name, labels=labels or [], tenant_id=self._active_tenant_id, agent_config_id=agent_config_id, ) ), ) if not result.data.register_agent.id: raise ValueError("Error registering agent") return result.data.register_agent.id def get_agent_config(self, agent_config_id: str) -> dict: """ Get agent config settings Args: - agent_config_id (str): The ID of an agent configuration to retrieve Returns: - dict: the agent configuration's `settings` """ query = { "query": { with_args( "agent_config", {"where": {"id": {"_eq": agent_config_id}}} ): {"settings": True} } } result = self.graphql(query) # type: Any return result.data.agent_config[0].settings
def set_task_run_state( self, task_run_id: str, state: "prefect.engine.state.State", version: int = None, cache_for: datetime.timedelta = None, ) -> "prefect.engine.state.State": """ Sets new state for a task run. Args: - task_run_id (str): the id of the task run to set state for - state (State): the new state for this task run - version (int, optional): the current version of the task run state. This is optional but it can be supplied to enforce version-locking. - cache_for (timedelta, optional): how long to store the result of this task for, using the serializer set in config; if not provided, no caching occurs Raises: - ClientError: if the GraphQL mutation is bad for any reason Returns: - State: the state the current task run should be considered in """ mutation = { "mutation($input: set_task_run_states_input!)": { "set_task_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, task_run_id=task_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_task_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state
1,304
1,362
import datetime import json import os import re import time import uuid import warnings from pathlib import Path from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Union from urllib.parse import urljoin # if simplejson is installed, `requests` defaults to using it instead of json # this allows the client to gracefully handle either json or simplejson try: from simplejson.errors import JSONDecodeError except ImportError: from json.decoder import JSONDecodeError import pendulum import toml from slugify import slugify import prefect from prefect.utilities.exceptions import ( AuthorizationError, ClientError, VersionLockError, ) from prefect.utilities.graphql import ( EnumValue, GraphQLResult, compress, parse_graphql, with_args, ) from prefect.utilities.logging import create_diagnostic_logger if TYPE_CHECKING: from prefect.core import Flow import requests JSONLike = Union[bool, dict, list, str, int, float, None] # type definitions for GraphQL results TaskRunInfoResult = NamedTuple( "TaskRunInfoResult", [ ("id", str), ("task_id", str), ("task_slug", str), ("version", int), ("state", "prefect.engine.state.State"), ], ) FlowRunInfoResult = NamedTuple( "FlowRunInfoResult", [ ("id", str), ("name", str), ("flow_id", str), ("parameters", Dict[str, Any]), ("context", Dict[str, Any]), ("version", int), ("scheduled_start_time", datetime.datetime), ("state", "prefect.engine.state.State"), ("task_runs", List[TaskRunInfoResult]), ], ) class Client: """ Client for communication with Prefect Cloud If the arguments aren't specified the client initialization first checks the prefect configuration and if the server is not set there it checks the current context. The token will only be present in the current context. Args: - api_server (str, optional): the URL to send all GraphQL requests to; if not provided, will be pulled from `cloud.graphql` config var - api_token (str, optional): a Prefect Cloud API token, taken from `config.cloud.auth_token` if not provided. If this token is USER-scoped, it may be used to log in to any tenant that the user is a member of. In that case, ephemeral JWTs will be loaded as necessary. Otherwise, the API token itself will be used as authorization. """ def __init__(self, api_server: str = None, api_token: str = None): self._access_token = None self._refresh_token = None self._access_token_expires_at = pendulum.now() self._active_tenant_id = None self._attached_headers = {} # type: Dict[str, str] self.logger = create_diagnostic_logger("Diagnostics") # store api server self.api_server = api_server or prefect.context.config.cloud.get("graphql") # store api token self._api_token = api_token or prefect.context.config.cloud.get( "auth_token", None ) if prefect.config.backend == "cloud": if not self._api_token: # if no api token was passed, attempt to load state from local storage settings = self._load_local_settings() self._api_token = settings.get("api_token") if self._api_token: self._active_tenant_id = settings.get("active_tenant_id") if self._active_tenant_id: try: self.login_to_tenant(tenant_id=self._active_tenant_id) except AuthorizationError: # if an authorization error is raised, then the token is invalid and should # be cleared self.logout_from_tenant() else: # TODO: Separate put this functionality and clean up initial tenant access handling if not self._active_tenant_id: tenant_info = self.graphql({"query": {"tenant": {"id"}}}) if tenant_info.data.tenant: self._active_tenant_id = tenant_info.data.tenant[0].id def create_tenant(self, name: str, slug: str = None) -> str: """ Creates a new tenant. Note this route only works when run against Prefect Server. Args: - name (str): the name of the tenant to create - slug (str, optional): the slug of the tenant to create; defaults to name Returns: - str: the ID of the newly created tenant, or the ID of the currently active tenant Raises: - ValueError: if run against Prefect Cloud """ if prefect.config.backend != "server": msg = "To create a tenant with Prefect Cloud, please signup at https://cloud.prefect.io/" raise ValueError(msg) if slug is None: slug = slugify(name) tenant_info = self.graphql( { "mutation($input: create_tenant_input!)": { "create_tenant(input: $input)": {"id"} } }, variables=dict(input=dict(name=name, slug=slug)), ) return tenant_info.data.create_tenant.id # ------------------------------------------------------------------------- # Utilities def get( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and GET request Args: - path (str): the path of the API url. For example, to GET http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the GET request to; defaults to `self.api_server` - headers (dict, optional): Headers to pass with the request - params (dict): GET parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="GET", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def post( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and POST request Args: - path (str): the path of the API url. For example, to POST http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the POST request to; defaults to `self.api_server` - headers(dict): headers to pass with the request - params (dict): POST parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="POST", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def graphql( self, query: Any, raise_on_error: bool = True, headers: Dict[str, str] = None, variables: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> GraphQLResult: """ Convenience function for running queries against the Prefect GraphQL API Args: - query (Any): A representation of a graphql query to be executed. It will be parsed by prefect.utilities.graphql.parse_graphql(). - raise_on_error (bool): if True, a `ClientError` will be raised if the GraphQL returns any `errors`. - headers (dict): any additional headers that should be passed as part of the request - variables (dict): Variables to be filled into a query with the key being equivalent to the variables that are accepted by the query - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Data returned from the GraphQL query Raises: - ClientError if there are errors raised by the GraphQL mutation """ result = self.post( path="", server=self.api_server, headers=headers, params=dict(query=parse_graphql(query), variables=json.dumps(variables)), token=token, retry_on_api_error=retry_on_api_error, ) if raise_on_error and "errors" in result: if "UNAUTHENTICATED" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif "Malformed Authorization header" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif ( result["errors"][0].get("extensions", {}).get("code") == "VERSION_LOCKING_ERROR" ): raise VersionLockError(result["errors"]) raise ClientError(result["errors"]) else: return GraphQLResult(result) # type: ignore def _send_request( self, session: "requests.Session", method: str, url: str, params: Dict[str, JSONLike] = None, headers: dict = None, ) -> "requests.models.Response": if prefect.context.config.cloud.get("diagnostics") is True: self.logger.debug(f"Preparing request to {url}") clean_headers = { head: re.sub("Bearer .*", "Bearer XXXX", val) for head, val in headers.items() # type: ignore } self.logger.debug(f"Headers: {clean_headers}") self.logger.debug(f"Request: {params}") start_time = time.time() if method == "GET": response = session.get(url, headers=headers, params=params, timeout=30) elif method == "POST": response = session.post(url, headers=headers, json=params, timeout=30) elif method == "DELETE": response = session.delete(url, headers=headers, timeout=30) else: raise ValueError("Invalid method: {}".format(method)) if prefect.context.config.cloud.get("diagnostics") is True: end_time = time.time() self.logger.debug(f"Response: {response.json()}") self.logger.debug( f"Request duration: {round(end_time - start_time, 4)} seconds" ) # Check if request returned a successful status response.raise_for_status() return response def _request( self, method: str, path: str, params: Dict[str, JSONLike] = None, server: str = None, headers: dict = None, token: str = None, retry_on_api_error: bool = True, ) -> "requests.models.Response": """ Runs any specified request (GET, POST, DELETE) against the server Args: - method (str): The type of request to be made (GET, POST, DELETE) - path (str): Path of the API URL - params (dict, optional): Parameters used for the request - server (str, optional): The server to make requests against, base API server is used if not specified - headers (dict, optional): Headers to pass with the request - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - requests.models.Response: The response returned from the request Raises: - ClientError: if the client token is not in the context (due to not being logged in) - ValueError: if a method is specified outside of the accepted GET, POST, DELETE - requests.HTTPError: if a status code is returned that is not `200` or `401` """ if server is None: server = self.api_server assert isinstance(server, str) # mypy assert if token is None: token = self.get_auth_token() # 'import requests' is expensive time-wise, we should do this just-in-time to keep # the 'import prefect' time low import requests url = urljoin(server, path.lstrip("/")).rstrip("/") params = params or {} headers = headers or {} if token: headers["Authorization"] = "Bearer {}".format(token) headers["X-PREFECT-CORE-VERSION"] = str(prefect.__version__) if self._attached_headers: headers.update(self._attached_headers) session = requests.Session() retry_total = 6 if prefect.config.backend == "cloud" else 1 retries = requests.packages.urllib3.util.retry.Retry( total=retry_total, backoff_factor=1, status_forcelist=[500, 502, 503, 504], method_whitelist=["DELETE", "GET", "POST"], ) session.mount("https://", requests.adapters.HTTPAdapter(max_retries=retries)) response = self._send_request( session=session, method=method, url=url, params=params, headers=headers ) # parse the response try: json_resp = response.json() except JSONDecodeError as exc: if prefect.config.backend == "cloud" and "Authorization" not in headers: raise ClientError( "Malformed response received from Cloud - please ensure that you " "have an API token properly configured." ) from exc else: raise ClientError("Malformed response received from API.") from exc # check if there was an API_ERROR code in the response if "API_ERROR" in str(json_resp.get("errors")) and retry_on_api_error: success, retry_count = False, 0 # retry up to six times while success is False and retry_count < 6: response = self._send_request( session=session, method=method, url=url, params=params, headers=headers, ) if "API_ERROR" in str(response.json().get("errors")): retry_count += 1 time.sleep(0.25 * (2 ** (retry_count - 1))) else: success = True return response def attach_headers(self, headers: dict) -> None: """ Set headers to be attached to this Client Args: - headers (dict): A dictionary of headers to attach to this client. These headers get added on to the existing dictionary of headers. """ self._attached_headers.update(headers) # ------------------------------------------------------------------------- # Auth # ------------------------------------------------------------------------- @property def _local_settings_path(self) -> Path: """ Returns the local settings directory corresponding to the current API servers """ path = "{home}/client/{server}".format( home=prefect.context.config.home_dir, server=slugify(self.api_server, regex_pattern=r"[^-\.a-z0-9]+"), ) return Path(os.path.expanduser(path)) / "settings.toml" def _save_local_settings(self, settings: dict) -> None: """ Writes settings to local storage """ self._local_settings_path.parent.mkdir(exist_ok=True, parents=True) with self._local_settings_path.open("w+") as f: toml.dump(settings, f) def _load_local_settings(self) -> dict: """ Loads settings from local storage """ if self._local_settings_path.exists(): with self._local_settings_path.open("r") as f: return toml.load(f) # type: ignore return {} def save_api_token(self) -> None: """ Saves the API token in local storage. """ settings = self._load_local_settings() settings["api_token"] = self._api_token self._save_local_settings(settings) def get_auth_token(self) -> str: """ Returns an auth token: - if no explicit access token is stored, returns the api token - if there is an access token: - if there's a refresh token and the access token expires in the next 30 seconds, then we refresh the access token and store the result - return the access token Returns: - str: the access token """ if not self._access_token: return self._api_token expiration = self._access_token_expires_at or pendulum.now() if self._refresh_token and pendulum.now().add(seconds=30) > expiration: self._refresh_access_token() return self._access_token def get_available_tenants(self) -> List[Dict]: """ Returns a list of available tenants. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - List[Dict]: a list of dictionaries containing the id, slug, and name of available tenants """ result = self.graphql( {"query": {"tenant(order_by: {slug: asc})": {"id", "slug", "name"}}}, # use the API token to see all available tenants token=self._api_token, ) # type: ignore return result.data.tenant # type: ignore def login_to_tenant(self, tenant_slug: str = None, tenant_id: str = None) -> bool: """ Log in to a specific tenant NOTE: this should only be called by users who have provided a USER-scoped API token. Args: - tenant_slug (str): the tenant's slug - tenant_id (str): the tenant's id Returns: - bool: True if the login was successful Raises: - ValueError: if at least one of `tenant_slug` or `tenant_id` isn't provided - ValueError: if the `tenant_id` is not a valid UUID - ValueError: if no matching tenants are found """ if tenant_slug is None and tenant_id is None: raise ValueError( "At least one of `tenant_slug` or `tenant_id` must be provided." ) elif tenant_id: try: uuid.UUID(tenant_id) except ValueError as exc: raise ValueError("The `tenant_id` must be a valid UUID.") from exc tenant = self.graphql( { "query($slug: String, $id: uuid)": { "tenant(where: {slug: { _eq: $slug }, id: { _eq: $id } })": {"id"} } }, variables=dict(slug=tenant_slug, id=tenant_id), # use the API token to query the tenant token=self._api_token, ) # type: ignore if not tenant.data.tenant: # type: ignore raise ValueError("No matching tenants found.") tenant_id = tenant.data.tenant[0].id # type: ignore if prefect.config.backend == "cloud": payload = self.graphql( { "mutation($input: switch_tenant_input!)": { "switch_tenant(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(tenant_id=tenant_id)), # Use the API token to switch tenants token=self._api_token, ) # type: ignore self._access_token = payload.data.switch_tenant.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.switch_tenant.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.switch_tenant.refresh_token # type: ignore self._active_tenant_id = tenant_id # save the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = self._active_tenant_id self._save_local_settings(settings) return True def logout_from_tenant(self) -> None: self._access_token = None self._refresh_token = None self._active_tenant_id = None # remove the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = None self._save_local_settings(settings) def _refresh_access_token(self) -> bool: """ Refresh the client's JWT access token. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - bool: True if the refresh succeeds """ payload = self.graphql( { "mutation($input: refresh_token_input!)": { "refresh_token(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(access_token=self._access_token)), # pass the refresh token as the auth header token=self._refresh_token, ) # type: ignore self._access_token = payload.data.refresh_token.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.refresh_token.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.refresh_token.refresh_token # type: ignore return True # ------------------------------------------------------------------------- # Actions # ------------------------------------------------------------------------- def register( self, flow: "Flow", project_name: str = None, build: bool = True, set_schedule_active: bool = True, version_group_id: str = None, compressed: bool = True, no_url: bool = False, ) -> str: """ Push a new flow to Prefect Cloud Args: - flow (Flow): a flow to register - project_name (str, optional): the project that should contain this flow. - build (bool, optional): if `True`, the flow's environment is built prior to serialization; defaults to `True` - set_schedule_active (bool, optional): if `False`, will set the schedule to inactive in the database to prevent auto-scheduling runs (if the Flow has a schedule). Defaults to `True`. This can be changed later. - version_group_id (str, optional): the UUID version group ID to use for versioning this Flow in Cloud; if not provided, the version group ID associated with this Flow's project and name will be used. - compressed (bool, optional): if `True`, the serialized flow will be; defaults to `True` compressed - no_url (bool, optional): if `True`, the stdout from this function will not contain the URL link to the newly-registered flow in the Cloud UI Returns: - str: the ID of the newly-registered flow Raises: - ClientError: if the register failed """ required_parameters = {p for p in flow.parameters() if p.required} if flow.schedule is not None and required_parameters: required_names = {p.name for p in required_parameters} if not all( [ required_names <= set(c.parameter_defaults.keys()) for c in flow.schedule.clocks ] ): raise ClientError( "Flows with required parameters can not be scheduled automatically." ) if any(e.key for e in flow.edges) and flow.result is None: warnings.warn( "No result handler was specified on your Flow. Cloud features such as " "input caching and resuming task runs from failure may not work properly.", stacklevel=2, ) if compressed: create_mutation = { "mutation($input: create_flow_from_compressed_string_input!)": { "create_flow_from_compressed_string(input: $input)": {"id"} } } else: create_mutation = { "mutation($input: create_flow_input!)": { "create_flow(input: $input)": {"id"} } } project = None if project_name is None: raise TypeError( "'project_name' is a required field when registering a flow." ) query_project = { "query": { with_args("project", {"where": {"name": {"_eq": project_name}}}): { "id": True } } } project = self.graphql(query_project).data.project # type: ignore if not project: raise ValueError( "Project {} not found. Run `prefect create project '{}'` to create it.".format( project_name, project_name ) ) serialized_flow = flow.serialize(build=build) # type: Any # Set Docker storage image in environment metadata if provided if isinstance(flow.storage, prefect.environments.storage.Docker): flow.environment.metadata["image"] = flow.storage.name serialized_flow = flow.serialize(build=False) # If no image ever set, default metadata to all_extras image on current version if not flow.environment.metadata.get("image"): version = prefect.__version__.split("+")[0] flow.environment.metadata[ "image" ] = f"prefecthq/prefect:all_extras-{version}" serialized_flow = flow.serialize(build=False) # verify that the serialized flow can be deserialized try: prefect.serialization.flow.FlowSchema().load(serialized_flow) except Exception as exc: raise ValueError( "Flow could not be deserialized successfully. Error was: {}".format( repr(exc) ) ) from exc if compressed: serialized_flow = compress(serialized_flow) res = self.graphql( create_mutation, variables=dict( input=dict( project_id=(project[0].id if project else None), serialized_flow=serialized_flow, set_schedule_active=set_schedule_active, version_group_id=version_group_id, ) ), retry_on_api_error=False, ) # type: Any flow_id = ( res.data.create_flow_from_compressed_string.id if compressed else res.data.create_flow.id ) if not no_url: # Generate direct link to Cloud flow flow_url = self.get_cloud_url("flow", flow_id) prefix = "└── " print("Flow URL: {}".format(flow_url)) # Extra information to improve visibility msg = ( f" {prefix}ID: {flow_id}\n" f" {prefix}Project: {project_name}\n" f" {prefix}Labels: {list(flow.environment.labels)}" ) print(msg) return flow_id def get_cloud_url(self, subdirectory: str, id: str, as_user: bool = True) -> str: """ Convenience method for creating Prefect Cloud URLs for a given subdirectory. Args: - subdirectory (str): the subdirectory to use (e.g., `"flow-run"`) - id (str): the ID of the page - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the URL corresponding to the appropriate base URL, tenant slug, subdirectory and ID Example: ```python from prefect import Client client = Client() client.get_cloud_url("flow-run", "424242-ca-94611-111-55") # returns "https://cloud.prefect.io/my-tenant-slug/flow-run/424242-ca-94611-111-55" ``` """ # Generate direct link to UI if prefect.config.backend == "cloud": tenant_slug = self.get_default_tenant_slug(as_user=as_user) else: tenant_slug = "" base_url = ( re.sub("api-", "", prefect.config.cloud.api) if re.search("api-", prefect.config.cloud.api) else re.sub("api", "cloud", prefect.config.cloud.api) ) full_url = prefect.config.cloud.api if tenant_slug: full_url = "/".join([base_url.rstrip("/"), tenant_slug, subdirectory, id]) elif prefect.config.backend == "server": full_url = "/".join([prefect.config.server.ui.endpoint, subdirectory, id]) return full_url def get_default_tenant_slug(self, as_user: bool = True) -> str: """ Get the default tenant slug for the currently authenticated user Args: - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the slug of the current default tenant for this user """ if as_user: query = { "query": {"user": {"default_membership": {"tenant": "slug"}}} } # type: dict else: query = {"query": {"tenant": {"slug"}}} res = self.graphql(query) if as_user: user = res.get("data").user[0] slug = user.default_membership.tenant.slug else: slug = res.get("data").tenant[0].slug return slug def create_project(self, project_name: str, project_description: str = None) -> str: """ Create a new Project Args: - project_name (str): the project that should contain this flow - project_description (str, optional): the project description Returns: - str: the ID of the newly-created project Raises: - ClientError: if the project creation failed """ project_mutation = { "mutation($input: create_project_input!)": { "create_project(input: $input)": {"id"} } } res = self.graphql( project_mutation, variables=dict( input=dict( name=project_name, description=project_description, tenant_id=self._active_tenant_id, ) ), ) # type: Any return res.data.create_project.id def create_flow_run( self, flow_id: str = None, context: dict = None, parameters: dict = None, scheduled_start_time: datetime.datetime = None, idempotency_key: str = None, run_name: str = None, version_group_id: str = None, ) -> str: """ Create a new flow run for the given flow id. If `start_time` is not provided, the flow run will be scheduled to start immediately. If both `flow_id` and `version_group_id` are provided, only the `flow_id` will be used. Args: - flow_id (str, optional): the id of the Flow you wish to schedule - context (dict, optional): the run context - parameters (dict, optional): a dictionary of parameter values to pass to the flow run - scheduled_start_time (datetime, optional): the time to schedule the execution for; if not provided, defaults to now - idempotency_key (str, optional): an idempotency key; if provided, this run will be cached for 24 hours. Any subsequent attempts to create a run with the same idempotency key will return the ID of the originally created run (no new run will be created after the first). An error will be raised if parameters or context are provided and don't match the original. Each subsequent request will reset the TTL for 24 hours. - run_name (str, optional): The name assigned to this flow run - version_group_id (str, optional): if provided, the unique unarchived flow within this version group will be scheduled to run. This input can be used as a stable API for running flows which are regularly updated. Returns: - str: the ID of the newly-created flow run Raises: - ClientError: if the GraphQL query is bad for any reason """ create_mutation = { "mutation($input: create_flow_run_input!)": { "create_flow_run(input: $input)": {"id": True} } } if not flow_id and not version_group_id: raise ValueError("One of flow_id or version_group_id must be provided") if flow_id: inputs = dict(flow_id=flow_id) else: inputs = dict(version_group_id=version_group_id) # type: ignore if parameters is not None: inputs.update(parameters=parameters) # type: ignore if context is not None: inputs.update(context=context) # type: ignore if idempotency_key is not None: inputs.update(idempotency_key=idempotency_key) # type: ignore if scheduled_start_time is not None: inputs.update( scheduled_start_time=scheduled_start_time.isoformat() ) # type: ignore if run_name is not None: inputs.update(flow_run_name=run_name) # type: ignore res = self.graphql(create_mutation, variables=dict(input=inputs)) return res.data.create_flow_run.id # type: ignore def get_flow_run_info(self, flow_run_id: str) -> FlowRunInfoResult: """ Retrieves version and current state information for the given flow run. Args: - flow_run_id (str): the id of the flow run to get information for Returns: - GraphQLResult: an object representing information about the flow run Raises: - ClientError: if the GraphQL mutation is bad for any reason """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "id": True, "name": True, "flow_id": True, "parameters": True, "context": True, "version": True, "scheduled_start_time": True, "serialized_state": True, # load all task runs except dynamic task runs with_args("task_runs", {"where": {"map_index": {"_eq": -1}}}): { "id": True, "task": {"id": True, "slug": True}, "version": True, "serialized_state": True, }, } } } result = self.graphql(query).data.flow_run_by_pk # type: ignore if result is None: raise ClientError('Flow run ID not found: "{}"'.format(flow_run_id)) # convert scheduled_start_time from string to datetime result.scheduled_start_time = pendulum.parse(result.scheduled_start_time) # create "state" attribute from serialized_state result.state = prefect.engine.state.State.deserialize( result.pop("serialized_state") ) # reformat task_runs task_runs = [] for tr in result.task_runs: tr.state = prefect.engine.state.State.deserialize( tr.pop("serialized_state") ) task_info = tr.pop("task") tr.task_id = task_info["id"] tr.task_slug = task_info["slug"] task_runs.append(TaskRunInfoResult(**tr)) result.task_runs = task_runs result.context = ( result.context.to_dict() if result.context is not None else None ) result.parameters = ( result.parameters.to_dict() if result.parameters is not None else None ) return FlowRunInfoResult(**result) def update_flow_run_heartbeat(self, flow_run_id: str) -> None: """ Convenience method for heartbeating a flow run. Does NOT raise an error if the update fails. Args: - flow_run_id (str): the flow run ID to heartbeat """ mutation = { "mutation": { with_args( "update_flow_run_heartbeat", {"input": {"flow_run_id": flow_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def update_task_run_heartbeat(self, task_run_id: str) -> None: """ Convenience method for heartbeating a task run. Does NOT raise an error if the update fails. Args: - task_run_id (str): the task run ID to heartbeat """ mutation = { "mutation": { with_args( "update_task_run_heartbeat", {"input": {"task_run_id": task_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def get_flow_run_state(self, flow_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a flow run. Args: - flow_run_id (str): the id for this flow run Returns: - State: a Prefect State object """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "serialized_state": True, } } } flow_run = self.graphql(query).data.flow_run_by_pk return prefect.engine.state.State.deserialize(flow_run.serialized_state) def set_flow_run_state( self, flow_run_id: str, state: "prefect.engine.state.State", version: int = None, ) -> "prefect.engine.state.State": """ Sets new state for a flow run in the database. Args: - flow_run_id (str): the id of the flow run to set state for - state (State): the new state for this flow run - version (int, optional): the current version of the flow run state. This is optional but it can be supplied to enforce version-locking. Returns: - State: the state the current flow run should be considered in Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation($input: set_flow_run_states_input!)": { "set_flow_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, flow_run_id=flow_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_flow_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def get_latest_cached_states( self, task_id: str, cache_key: Optional[str], created_after: datetime.datetime ) -> List["prefect.engine.state.State"]: """ Pulls all Cached states for the given task that were created after the provided date. Args: - task_id (str): the task id for this task run - cache_key (Optional[str]): the cache key for this Task's cache; if `None`, the task id alone will be used - created_after (datetime.datetime): the earliest date the state should have been created at Returns: - List[State]: a list of Cached states created after the given date """ args = { "where": { "state": {"_eq": "Cached"}, "state_timestamp": {"_gte": created_after.isoformat()}, }, "order_by": {"state_timestamp": EnumValue("desc")}, "limit": 100, } # type: Dict[str, Any] # if a cache key was provided, match it against all tasks if cache_key is not None: args["where"].update({"cache_key": {"_eq": cache_key}}) # otherwise match against only this task, across all cache keys else: args["where"].update({"task_id": {"_eq": task_id}}) query = {"query": {with_args("task_run", args): "serialized_state"}} result = self.graphql(query) # type: Any deserializer = prefect.engine.state.State.deserialize valid_states = [ deserializer(res.serialized_state) for res in result.data.task_run ] return valid_states def get_task_run_info( self, flow_run_id: str, task_id: str, map_index: Optional[int] = None ) -> TaskRunInfoResult: """ Retrieves version and current state information for the given task run. Args: - flow_run_id (str): the id of the flow run that this task run lives in - task_id (str): the task id for this task run - map_index (int, optional): the mapping index for this task run; if `None`, it is assumed this task is _not_ mapped Returns: - NamedTuple: a tuple containing `id, task_id, version, state` Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation": { with_args( "get_or_create_task_run", { "input": { "flow_run_id": flow_run_id, "task_id": task_id, "map_index": -1 if map_index is None else map_index, } }, ): { "id": True, } } } result = self.graphql(mutation) # type: Any if result is None: raise ClientError("Failed to create task run.") task_run_id = result.data.get_or_create_task_run.id query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "version": True, "serialized_state": True, "task": {"slug": True}, } } } task_run = self.graphql(query).data.task_run_by_pk # type: ignore if task_run is None: raise ClientError('Task run ID not found: "{}"'.format(task_run_id)) state = prefect.engine.state.State.deserialize(task_run.serialized_state) return TaskRunInfoResult( id=task_run_id, task_id=task_id, task_slug=task_run.task.slug, version=task_run.version, state=state, ) def set_task_run_name(self, task_run_id: str, name: str) -> bool: """ Set the name of a task run Args: - task_run_id (str): the id of a task run - name (str): a name for this task run Returns: - bool: whether or not the task run name was updated """ mutation = { "mutation($input: set_task_run_name_input!)": { "set_task_run_name(input: $input)": { "success": True, } } } result = self.graphql( mutation, variables=dict(input=dict(task_run_id=task_run_id, name=name)) ) return result.data.set_task_run_name.success def get_task_run_state(self, task_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a task run. Args: - task_run_id (str): the id for this task run Returns: - State: a Prefect State object """ query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "serialized_state": True, } } } task_run = self.graphql(query).data.task_run_by_pk return prefect.engine.state.State.deserialize(task_run.serialized_state) def set_task_run_state( self, task_run_id: str, state: "prefect.engine.state.State", version: int = None, cache_for: datetime.timedelta = None, ) -> "prefect.engine.state.State": """ Sets new state for a task run. Args: - task_run_id (str): the id of the task run to set state for - state (State): the new state for this task run - version (int, optional): the current version of the task run state. This is optional but it can be supplied to enforce version-locking. - cache_for (timedelta, optional): how long to store the result of this task for, using the serializer set in config; if not provided, no caching occurs Raises: - ClientError: if the GraphQL mutation is bad for any reason Returns: - State: the state the current task run should be considered in """ mutation = { "mutation($input: set_task_run_states_input!)": { "set_task_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, task_run_id=task_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_task_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def set_secret(self, name: str, value: Any) -> None: """ Set a secret with the given name and value. Args: - name (str): the name of the secret; used for retrieving the secret during task runs - value (Any): the value of the secret Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the secret-setting was unsuccessful """ mutation = { "mutation($input: set_secret_input!)": { "set_secret(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(name=name, value=value)) ) # type: Any if not result.data.set_secret.success: raise ValueError("Setting secret failed.") def get_task_tag_limit(self, tag: str) -> Optional[int]: """ Retrieve the current task tag concurrency limit for a given tag. Args: - tag (str): the tag to update Raises: - ClientError: if the GraphQL query fails """ query = { "query": { with_args("task_tag_limit", {"where": {"tag": {"_eq": tag}}}): { "limit": True } } } result = self.graphql(query) # type: Any if result.data.task_tag_limit: return result.data.task_tag_limit[0].limit else: return None def update_task_tag_limit(self, tag: str, limit: int) -> None: """ Update the task tag concurrency limit for a given tag; requires tenant admin permissions. Args: - tag (str): the tag to update - limit (int): the concurrency limit to enforce on the tag; should be a value >= 0 Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag limit-setting was unsuccessful, or if a bad limit was provided """ if limit < 0: raise ValueError("Concurrency limits must be >= 0") mutation = { "mutation($input: update_task_tag_limit_input!)": { "update_task_tag_limit(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict(input=dict(tag=tag, limit=limit)) ) # type: Any if not result.data.update_task_tag_limit.id: raise ValueError("Updating the task tag concurrency limit failed.") def delete_task_tag_limit(self, limit_id: str) -> None: """ Deletes a given task tag concurrency limit; requires tenant admin permissions. Args: - limit_id (str): the ID of the tag to delete Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag deletion was unsuccessful, or if a bad tag ID was provided """ mutation = { "mutation($input: delete_task_tag_limit_input!)": { "delete_task_tag_limit(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(limit_id=limit_id)) ) # type: Any if not result.data.delete_task_tag_limit.success: raise ValueError("Deleting the task tag concurrency limit failed.") def write_run_logs(self, logs: List[Dict]) -> None: """ Uploads a collection of logs to Cloud. Args: - logs (List[Dict]): a list of log entries to add Raises: - ValueError: if uploading the logs fail """ mutation = { "mutation($input: write_run_logs_input!)": { "write_run_logs(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(logs=logs)) ) # type: Any if not result.data.write_run_logs.success: raise ValueError("Writing logs failed.") def register_agent( self, agent_type: str, name: str = None, labels: List[str] = None, agent_config_id: str = None, ) -> str: """ Register an agent with a backend API Args: - agent_type (str): The type of agent being registered - name: (str, optional): The name of the agent being registered - labels (List[str], optional): A list of any present labels on the agent being registered - agent_config_id (str, optional): The ID of an agent configuration to register with Returns: - The agent ID as a string """ mutation = { "mutation($input: register_agent_input!)": { "register_agent(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict( input=dict( type=agent_type, name=name, labels=labels or [], tenant_id=self._active_tenant_id, agent_config_id=agent_config_id, ) ), ) if not result.data.register_agent.id: raise ValueError("Error registering agent") return result.data.register_agent.id def get_agent_config(self, agent_config_id: str) -> dict: """ Get agent config settings Args: - agent_config_id (str): The ID of an agent configuration to retrieve Returns: - dict: the agent configuration's `settings` """ query = { "query": { with_args( "agent_config", {"where": {"id": {"_eq": agent_config_id}}} ): {"settings": True} } } result = self.graphql(query) # type: Any return result.data.agent_config[0].settings
set_secret
Set a secret with the given name and value. Args: - name (str): the name of the secret; used for retrieving the secret during task runs - value (Any): the value of the secret Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the secret-setting was unsuccessful
import datetime import json import os import re import time import uuid import warnings from pathlib import Path from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Union from urllib.parse import urljoin # if simplejson is installed, `requests` defaults to using it instead of json # this allows the client to gracefully handle either json or simplejson try: from simplejson.errors import JSONDecodeError except ImportError: from json.decoder import JSONDecodeError import pendulum import toml from slugify import slugify import prefect from prefect.utilities.exceptions import ( AuthorizationError, ClientError, VersionLockError, ) from prefect.utilities.graphql import ( EnumValue, GraphQLResult, compress, parse_graphql, with_args, ) from prefect.utilities.logging import create_diagnostic_logger if TYPE_CHECKING: from prefect.core import Flow import requests JSONLike = Union[bool, dict, list, str, int, float, None] # type definitions for GraphQL results TaskRunInfoResult = NamedTuple( "TaskRunInfoResult", [ ("id", str), ("task_id", str), ("task_slug", str), ("version", int), ("state", "prefect.engine.state.State"), ], ) FlowRunInfoResult = NamedTuple( "FlowRunInfoResult", [ ("id", str), ("name", str), ("flow_id", str), ("parameters", Dict[str, Any]), ("context", Dict[str, Any]), ("version", int), ("scheduled_start_time", datetime.datetime), ("state", "prefect.engine.state.State"), ("task_runs", List[TaskRunInfoResult]), ], ) class Client: """ Client for communication with Prefect Cloud If the arguments aren't specified the client initialization first checks the prefect configuration and if the server is not set there it checks the current context. The token will only be present in the current context. Args: - api_server (str, optional): the URL to send all GraphQL requests to; if not provided, will be pulled from `cloud.graphql` config var - api_token (str, optional): a Prefect Cloud API token, taken from `config.cloud.auth_token` if not provided. If this token is USER-scoped, it may be used to log in to any tenant that the user is a member of. In that case, ephemeral JWTs will be loaded as necessary. Otherwise, the API token itself will be used as authorization. """ def __init__(self, api_server: str = None, api_token: str = None): self._access_token = None self._refresh_token = None self._access_token_expires_at = pendulum.now() self._active_tenant_id = None self._attached_headers = {} # type: Dict[str, str] self.logger = create_diagnostic_logger("Diagnostics") # store api server self.api_server = api_server or prefect.context.config.cloud.get("graphql") # store api token self._api_token = api_token or prefect.context.config.cloud.get( "auth_token", None ) if prefect.config.backend == "cloud": if not self._api_token: # if no api token was passed, attempt to load state from local storage settings = self._load_local_settings() self._api_token = settings.get("api_token") if self._api_token: self._active_tenant_id = settings.get("active_tenant_id") if self._active_tenant_id: try: self.login_to_tenant(tenant_id=self._active_tenant_id) except AuthorizationError: # if an authorization error is raised, then the token is invalid and should # be cleared self.logout_from_tenant() else: # TODO: Separate put this functionality and clean up initial tenant access handling if not self._active_tenant_id: tenant_info = self.graphql({"query": {"tenant": {"id"}}}) if tenant_info.data.tenant: self._active_tenant_id = tenant_info.data.tenant[0].id def create_tenant(self, name: str, slug: str = None) -> str: """ Creates a new tenant. Note this route only works when run against Prefect Server. Args: - name (str): the name of the tenant to create - slug (str, optional): the slug of the tenant to create; defaults to name Returns: - str: the ID of the newly created tenant, or the ID of the currently active tenant Raises: - ValueError: if run against Prefect Cloud """ if prefect.config.backend != "server": msg = "To create a tenant with Prefect Cloud, please signup at https://cloud.prefect.io/" raise ValueError(msg) if slug is None: slug = slugify(name) tenant_info = self.graphql( { "mutation($input: create_tenant_input!)": { "create_tenant(input: $input)": {"id"} } }, variables=dict(input=dict(name=name, slug=slug)), ) return tenant_info.data.create_tenant.id # ------------------------------------------------------------------------- # Utilities def get( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and GET request Args: - path (str): the path of the API url. For example, to GET http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the GET request to; defaults to `self.api_server` - headers (dict, optional): Headers to pass with the request - params (dict): GET parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="GET", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def post( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and POST request Args: - path (str): the path of the API url. For example, to POST http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the POST request to; defaults to `self.api_server` - headers(dict): headers to pass with the request - params (dict): POST parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="POST", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def graphql( self, query: Any, raise_on_error: bool = True, headers: Dict[str, str] = None, variables: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> GraphQLResult: """ Convenience function for running queries against the Prefect GraphQL API Args: - query (Any): A representation of a graphql query to be executed. It will be parsed by prefect.utilities.graphql.parse_graphql(). - raise_on_error (bool): if True, a `ClientError` will be raised if the GraphQL returns any `errors`. - headers (dict): any additional headers that should be passed as part of the request - variables (dict): Variables to be filled into a query with the key being equivalent to the variables that are accepted by the query - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Data returned from the GraphQL query Raises: - ClientError if there are errors raised by the GraphQL mutation """ result = self.post( path="", server=self.api_server, headers=headers, params=dict(query=parse_graphql(query), variables=json.dumps(variables)), token=token, retry_on_api_error=retry_on_api_error, ) if raise_on_error and "errors" in result: if "UNAUTHENTICATED" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif "Malformed Authorization header" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif ( result["errors"][0].get("extensions", {}).get("code") == "VERSION_LOCKING_ERROR" ): raise VersionLockError(result["errors"]) raise ClientError(result["errors"]) else: return GraphQLResult(result) # type: ignore def _send_request( self, session: "requests.Session", method: str, url: str, params: Dict[str, JSONLike] = None, headers: dict = None, ) -> "requests.models.Response": if prefect.context.config.cloud.get("diagnostics") is True: self.logger.debug(f"Preparing request to {url}") clean_headers = { head: re.sub("Bearer .*", "Bearer XXXX", val) for head, val in headers.items() # type: ignore } self.logger.debug(f"Headers: {clean_headers}") self.logger.debug(f"Request: {params}") start_time = time.time() if method == "GET": response = session.get(url, headers=headers, params=params, timeout=30) elif method == "POST": response = session.post(url, headers=headers, json=params, timeout=30) elif method == "DELETE": response = session.delete(url, headers=headers, timeout=30) else: raise ValueError("Invalid method: {}".format(method)) if prefect.context.config.cloud.get("diagnostics") is True: end_time = time.time() self.logger.debug(f"Response: {response.json()}") self.logger.debug( f"Request duration: {round(end_time - start_time, 4)} seconds" ) # Check if request returned a successful status response.raise_for_status() return response def _request( self, method: str, path: str, params: Dict[str, JSONLike] = None, server: str = None, headers: dict = None, token: str = None, retry_on_api_error: bool = True, ) -> "requests.models.Response": """ Runs any specified request (GET, POST, DELETE) against the server Args: - method (str): The type of request to be made (GET, POST, DELETE) - path (str): Path of the API URL - params (dict, optional): Parameters used for the request - server (str, optional): The server to make requests against, base API server is used if not specified - headers (dict, optional): Headers to pass with the request - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - requests.models.Response: The response returned from the request Raises: - ClientError: if the client token is not in the context (due to not being logged in) - ValueError: if a method is specified outside of the accepted GET, POST, DELETE - requests.HTTPError: if a status code is returned that is not `200` or `401` """ if server is None: server = self.api_server assert isinstance(server, str) # mypy assert if token is None: token = self.get_auth_token() # 'import requests' is expensive time-wise, we should do this just-in-time to keep # the 'import prefect' time low import requests url = urljoin(server, path.lstrip("/")).rstrip("/") params = params or {} headers = headers or {} if token: headers["Authorization"] = "Bearer {}".format(token) headers["X-PREFECT-CORE-VERSION"] = str(prefect.__version__) if self._attached_headers: headers.update(self._attached_headers) session = requests.Session() retry_total = 6 if prefect.config.backend == "cloud" else 1 retries = requests.packages.urllib3.util.retry.Retry( total=retry_total, backoff_factor=1, status_forcelist=[500, 502, 503, 504], method_whitelist=["DELETE", "GET", "POST"], ) session.mount("https://", requests.adapters.HTTPAdapter(max_retries=retries)) response = self._send_request( session=session, method=method, url=url, params=params, headers=headers ) # parse the response try: json_resp = response.json() except JSONDecodeError as exc: if prefect.config.backend == "cloud" and "Authorization" not in headers: raise ClientError( "Malformed response received from Cloud - please ensure that you " "have an API token properly configured." ) from exc else: raise ClientError("Malformed response received from API.") from exc # check if there was an API_ERROR code in the response if "API_ERROR" in str(json_resp.get("errors")) and retry_on_api_error: success, retry_count = False, 0 # retry up to six times while success is False and retry_count < 6: response = self._send_request( session=session, method=method, url=url, params=params, headers=headers, ) if "API_ERROR" in str(response.json().get("errors")): retry_count += 1 time.sleep(0.25 * (2 ** (retry_count - 1))) else: success = True return response def attach_headers(self, headers: dict) -> None: """ Set headers to be attached to this Client Args: - headers (dict): A dictionary of headers to attach to this client. These headers get added on to the existing dictionary of headers. """ self._attached_headers.update(headers) # ------------------------------------------------------------------------- # Auth # ------------------------------------------------------------------------- @property def _local_settings_path(self) -> Path: """ Returns the local settings directory corresponding to the current API servers """ path = "{home}/client/{server}".format( home=prefect.context.config.home_dir, server=slugify(self.api_server, regex_pattern=r"[^-\.a-z0-9]+"), ) return Path(os.path.expanduser(path)) / "settings.toml" def _save_local_settings(self, settings: dict) -> None: """ Writes settings to local storage """ self._local_settings_path.parent.mkdir(exist_ok=True, parents=True) with self._local_settings_path.open("w+") as f: toml.dump(settings, f) def _load_local_settings(self) -> dict: """ Loads settings from local storage """ if self._local_settings_path.exists(): with self._local_settings_path.open("r") as f: return toml.load(f) # type: ignore return {} def save_api_token(self) -> None: """ Saves the API token in local storage. """ settings = self._load_local_settings() settings["api_token"] = self._api_token self._save_local_settings(settings) def get_auth_token(self) -> str: """ Returns an auth token: - if no explicit access token is stored, returns the api token - if there is an access token: - if there's a refresh token and the access token expires in the next 30 seconds, then we refresh the access token and store the result - return the access token Returns: - str: the access token """ if not self._access_token: return self._api_token expiration = self._access_token_expires_at or pendulum.now() if self._refresh_token and pendulum.now().add(seconds=30) > expiration: self._refresh_access_token() return self._access_token def get_available_tenants(self) -> List[Dict]: """ Returns a list of available tenants. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - List[Dict]: a list of dictionaries containing the id, slug, and name of available tenants """ result = self.graphql( {"query": {"tenant(order_by: {slug: asc})": {"id", "slug", "name"}}}, # use the API token to see all available tenants token=self._api_token, ) # type: ignore return result.data.tenant # type: ignore def login_to_tenant(self, tenant_slug: str = None, tenant_id: str = None) -> bool: """ Log in to a specific tenant NOTE: this should only be called by users who have provided a USER-scoped API token. Args: - tenant_slug (str): the tenant's slug - tenant_id (str): the tenant's id Returns: - bool: True if the login was successful Raises: - ValueError: if at least one of `tenant_slug` or `tenant_id` isn't provided - ValueError: if the `tenant_id` is not a valid UUID - ValueError: if no matching tenants are found """ if tenant_slug is None and tenant_id is None: raise ValueError( "At least one of `tenant_slug` or `tenant_id` must be provided." ) elif tenant_id: try: uuid.UUID(tenant_id) except ValueError as exc: raise ValueError("The `tenant_id` must be a valid UUID.") from exc tenant = self.graphql( { "query($slug: String, $id: uuid)": { "tenant(where: {slug: { _eq: $slug }, id: { _eq: $id } })": {"id"} } }, variables=dict(slug=tenant_slug, id=tenant_id), # use the API token to query the tenant token=self._api_token, ) # type: ignore if not tenant.data.tenant: # type: ignore raise ValueError("No matching tenants found.") tenant_id = tenant.data.tenant[0].id # type: ignore if prefect.config.backend == "cloud": payload = self.graphql( { "mutation($input: switch_tenant_input!)": { "switch_tenant(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(tenant_id=tenant_id)), # Use the API token to switch tenants token=self._api_token, ) # type: ignore self._access_token = payload.data.switch_tenant.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.switch_tenant.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.switch_tenant.refresh_token # type: ignore self._active_tenant_id = tenant_id # save the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = self._active_tenant_id self._save_local_settings(settings) return True def logout_from_tenant(self) -> None: self._access_token = None self._refresh_token = None self._active_tenant_id = None # remove the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = None self._save_local_settings(settings) def _refresh_access_token(self) -> bool: """ Refresh the client's JWT access token. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - bool: True if the refresh succeeds """ payload = self.graphql( { "mutation($input: refresh_token_input!)": { "refresh_token(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(access_token=self._access_token)), # pass the refresh token as the auth header token=self._refresh_token, ) # type: ignore self._access_token = payload.data.refresh_token.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.refresh_token.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.refresh_token.refresh_token # type: ignore return True # ------------------------------------------------------------------------- # Actions # ------------------------------------------------------------------------- def register( self, flow: "Flow", project_name: str = None, build: bool = True, set_schedule_active: bool = True, version_group_id: str = None, compressed: bool = True, no_url: bool = False, ) -> str: """ Push a new flow to Prefect Cloud Args: - flow (Flow): a flow to register - project_name (str, optional): the project that should contain this flow. - build (bool, optional): if `True`, the flow's environment is built prior to serialization; defaults to `True` - set_schedule_active (bool, optional): if `False`, will set the schedule to inactive in the database to prevent auto-scheduling runs (if the Flow has a schedule). Defaults to `True`. This can be changed later. - version_group_id (str, optional): the UUID version group ID to use for versioning this Flow in Cloud; if not provided, the version group ID associated with this Flow's project and name will be used. - compressed (bool, optional): if `True`, the serialized flow will be; defaults to `True` compressed - no_url (bool, optional): if `True`, the stdout from this function will not contain the URL link to the newly-registered flow in the Cloud UI Returns: - str: the ID of the newly-registered flow Raises: - ClientError: if the register failed """ required_parameters = {p for p in flow.parameters() if p.required} if flow.schedule is not None and required_parameters: required_names = {p.name for p in required_parameters} if not all( [ required_names <= set(c.parameter_defaults.keys()) for c in flow.schedule.clocks ] ): raise ClientError( "Flows with required parameters can not be scheduled automatically." ) if any(e.key for e in flow.edges) and flow.result is None: warnings.warn( "No result handler was specified on your Flow. Cloud features such as " "input caching and resuming task runs from failure may not work properly.", stacklevel=2, ) if compressed: create_mutation = { "mutation($input: create_flow_from_compressed_string_input!)": { "create_flow_from_compressed_string(input: $input)": {"id"} } } else: create_mutation = { "mutation($input: create_flow_input!)": { "create_flow(input: $input)": {"id"} } } project = None if project_name is None: raise TypeError( "'project_name' is a required field when registering a flow." ) query_project = { "query": { with_args("project", {"where": {"name": {"_eq": project_name}}}): { "id": True } } } project = self.graphql(query_project).data.project # type: ignore if not project: raise ValueError( "Project {} not found. Run `prefect create project '{}'` to create it.".format( project_name, project_name ) ) serialized_flow = flow.serialize(build=build) # type: Any # Set Docker storage image in environment metadata if provided if isinstance(flow.storage, prefect.environments.storage.Docker): flow.environment.metadata["image"] = flow.storage.name serialized_flow = flow.serialize(build=False) # If no image ever set, default metadata to all_extras image on current version if not flow.environment.metadata.get("image"): version = prefect.__version__.split("+")[0] flow.environment.metadata[ "image" ] = f"prefecthq/prefect:all_extras-{version}" serialized_flow = flow.serialize(build=False) # verify that the serialized flow can be deserialized try: prefect.serialization.flow.FlowSchema().load(serialized_flow) except Exception as exc: raise ValueError( "Flow could not be deserialized successfully. Error was: {}".format( repr(exc) ) ) from exc if compressed: serialized_flow = compress(serialized_flow) res = self.graphql( create_mutation, variables=dict( input=dict( project_id=(project[0].id if project else None), serialized_flow=serialized_flow, set_schedule_active=set_schedule_active, version_group_id=version_group_id, ) ), retry_on_api_error=False, ) # type: Any flow_id = ( res.data.create_flow_from_compressed_string.id if compressed else res.data.create_flow.id ) if not no_url: # Generate direct link to Cloud flow flow_url = self.get_cloud_url("flow", flow_id) prefix = "└── " print("Flow URL: {}".format(flow_url)) # Extra information to improve visibility msg = ( f" {prefix}ID: {flow_id}\n" f" {prefix}Project: {project_name}\n" f" {prefix}Labels: {list(flow.environment.labels)}" ) print(msg) return flow_id def get_cloud_url(self, subdirectory: str, id: str, as_user: bool = True) -> str: """ Convenience method for creating Prefect Cloud URLs for a given subdirectory. Args: - subdirectory (str): the subdirectory to use (e.g., `"flow-run"`) - id (str): the ID of the page - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the URL corresponding to the appropriate base URL, tenant slug, subdirectory and ID Example: ```python from prefect import Client client = Client() client.get_cloud_url("flow-run", "424242-ca-94611-111-55") # returns "https://cloud.prefect.io/my-tenant-slug/flow-run/424242-ca-94611-111-55" ``` """ # Generate direct link to UI if prefect.config.backend == "cloud": tenant_slug = self.get_default_tenant_slug(as_user=as_user) else: tenant_slug = "" base_url = ( re.sub("api-", "", prefect.config.cloud.api) if re.search("api-", prefect.config.cloud.api) else re.sub("api", "cloud", prefect.config.cloud.api) ) full_url = prefect.config.cloud.api if tenant_slug: full_url = "/".join([base_url.rstrip("/"), tenant_slug, subdirectory, id]) elif prefect.config.backend == "server": full_url = "/".join([prefect.config.server.ui.endpoint, subdirectory, id]) return full_url def get_default_tenant_slug(self, as_user: bool = True) -> str: """ Get the default tenant slug for the currently authenticated user Args: - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the slug of the current default tenant for this user """ if as_user: query = { "query": {"user": {"default_membership": {"tenant": "slug"}}} } # type: dict else: query = {"query": {"tenant": {"slug"}}} res = self.graphql(query) if as_user: user = res.get("data").user[0] slug = user.default_membership.tenant.slug else: slug = res.get("data").tenant[0].slug return slug def create_project(self, project_name: str, project_description: str = None) -> str: """ Create a new Project Args: - project_name (str): the project that should contain this flow - project_description (str, optional): the project description Returns: - str: the ID of the newly-created project Raises: - ClientError: if the project creation failed """ project_mutation = { "mutation($input: create_project_input!)": { "create_project(input: $input)": {"id"} } } res = self.graphql( project_mutation, variables=dict( input=dict( name=project_name, description=project_description, tenant_id=self._active_tenant_id, ) ), ) # type: Any return res.data.create_project.id def create_flow_run( self, flow_id: str = None, context: dict = None, parameters: dict = None, scheduled_start_time: datetime.datetime = None, idempotency_key: str = None, run_name: str = None, version_group_id: str = None, ) -> str: """ Create a new flow run for the given flow id. If `start_time` is not provided, the flow run will be scheduled to start immediately. If both `flow_id` and `version_group_id` are provided, only the `flow_id` will be used. Args: - flow_id (str, optional): the id of the Flow you wish to schedule - context (dict, optional): the run context - parameters (dict, optional): a dictionary of parameter values to pass to the flow run - scheduled_start_time (datetime, optional): the time to schedule the execution for; if not provided, defaults to now - idempotency_key (str, optional): an idempotency key; if provided, this run will be cached for 24 hours. Any subsequent attempts to create a run with the same idempotency key will return the ID of the originally created run (no new run will be created after the first). An error will be raised if parameters or context are provided and don't match the original. Each subsequent request will reset the TTL for 24 hours. - run_name (str, optional): The name assigned to this flow run - version_group_id (str, optional): if provided, the unique unarchived flow within this version group will be scheduled to run. This input can be used as a stable API for running flows which are regularly updated. Returns: - str: the ID of the newly-created flow run Raises: - ClientError: if the GraphQL query is bad for any reason """ create_mutation = { "mutation($input: create_flow_run_input!)": { "create_flow_run(input: $input)": {"id": True} } } if not flow_id and not version_group_id: raise ValueError("One of flow_id or version_group_id must be provided") if flow_id: inputs = dict(flow_id=flow_id) else: inputs = dict(version_group_id=version_group_id) # type: ignore if parameters is not None: inputs.update(parameters=parameters) # type: ignore if context is not None: inputs.update(context=context) # type: ignore if idempotency_key is not None: inputs.update(idempotency_key=idempotency_key) # type: ignore if scheduled_start_time is not None: inputs.update( scheduled_start_time=scheduled_start_time.isoformat() ) # type: ignore if run_name is not None: inputs.update(flow_run_name=run_name) # type: ignore res = self.graphql(create_mutation, variables=dict(input=inputs)) return res.data.create_flow_run.id # type: ignore def get_flow_run_info(self, flow_run_id: str) -> FlowRunInfoResult: """ Retrieves version and current state information for the given flow run. Args: - flow_run_id (str): the id of the flow run to get information for Returns: - GraphQLResult: an object representing information about the flow run Raises: - ClientError: if the GraphQL mutation is bad for any reason """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "id": True, "name": True, "flow_id": True, "parameters": True, "context": True, "version": True, "scheduled_start_time": True, "serialized_state": True, # load all task runs except dynamic task runs with_args("task_runs", {"where": {"map_index": {"_eq": -1}}}): { "id": True, "task": {"id": True, "slug": True}, "version": True, "serialized_state": True, }, } } } result = self.graphql(query).data.flow_run_by_pk # type: ignore if result is None: raise ClientError('Flow run ID not found: "{}"'.format(flow_run_id)) # convert scheduled_start_time from string to datetime result.scheduled_start_time = pendulum.parse(result.scheduled_start_time) # create "state" attribute from serialized_state result.state = prefect.engine.state.State.deserialize( result.pop("serialized_state") ) # reformat task_runs task_runs = [] for tr in result.task_runs: tr.state = prefect.engine.state.State.deserialize( tr.pop("serialized_state") ) task_info = tr.pop("task") tr.task_id = task_info["id"] tr.task_slug = task_info["slug"] task_runs.append(TaskRunInfoResult(**tr)) result.task_runs = task_runs result.context = ( result.context.to_dict() if result.context is not None else None ) result.parameters = ( result.parameters.to_dict() if result.parameters is not None else None ) return FlowRunInfoResult(**result) def update_flow_run_heartbeat(self, flow_run_id: str) -> None: """ Convenience method for heartbeating a flow run. Does NOT raise an error if the update fails. Args: - flow_run_id (str): the flow run ID to heartbeat """ mutation = { "mutation": { with_args( "update_flow_run_heartbeat", {"input": {"flow_run_id": flow_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def update_task_run_heartbeat(self, task_run_id: str) -> None: """ Convenience method for heartbeating a task run. Does NOT raise an error if the update fails. Args: - task_run_id (str): the task run ID to heartbeat """ mutation = { "mutation": { with_args( "update_task_run_heartbeat", {"input": {"task_run_id": task_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def get_flow_run_state(self, flow_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a flow run. Args: - flow_run_id (str): the id for this flow run Returns: - State: a Prefect State object """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "serialized_state": True, } } } flow_run = self.graphql(query).data.flow_run_by_pk return prefect.engine.state.State.deserialize(flow_run.serialized_state) def set_flow_run_state( self, flow_run_id: str, state: "prefect.engine.state.State", version: int = None, ) -> "prefect.engine.state.State": """ Sets new state for a flow run in the database. Args: - flow_run_id (str): the id of the flow run to set state for - state (State): the new state for this flow run - version (int, optional): the current version of the flow run state. This is optional but it can be supplied to enforce version-locking. Returns: - State: the state the current flow run should be considered in Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation($input: set_flow_run_states_input!)": { "set_flow_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, flow_run_id=flow_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_flow_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def get_latest_cached_states( self, task_id: str, cache_key: Optional[str], created_after: datetime.datetime ) -> List["prefect.engine.state.State"]: """ Pulls all Cached states for the given task that were created after the provided date. Args: - task_id (str): the task id for this task run - cache_key (Optional[str]): the cache key for this Task's cache; if `None`, the task id alone will be used - created_after (datetime.datetime): the earliest date the state should have been created at Returns: - List[State]: a list of Cached states created after the given date """ args = { "where": { "state": {"_eq": "Cached"}, "state_timestamp": {"_gte": created_after.isoformat()}, }, "order_by": {"state_timestamp": EnumValue("desc")}, "limit": 100, } # type: Dict[str, Any] # if a cache key was provided, match it against all tasks if cache_key is not None: args["where"].update({"cache_key": {"_eq": cache_key}}) # otherwise match against only this task, across all cache keys else: args["where"].update({"task_id": {"_eq": task_id}}) query = {"query": {with_args("task_run", args): "serialized_state"}} result = self.graphql(query) # type: Any deserializer = prefect.engine.state.State.deserialize valid_states = [ deserializer(res.serialized_state) for res in result.data.task_run ] return valid_states def get_task_run_info( self, flow_run_id: str, task_id: str, map_index: Optional[int] = None ) -> TaskRunInfoResult: """ Retrieves version and current state information for the given task run. Args: - flow_run_id (str): the id of the flow run that this task run lives in - task_id (str): the task id for this task run - map_index (int, optional): the mapping index for this task run; if `None`, it is assumed this task is _not_ mapped Returns: - NamedTuple: a tuple containing `id, task_id, version, state` Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation": { with_args( "get_or_create_task_run", { "input": { "flow_run_id": flow_run_id, "task_id": task_id, "map_index": -1 if map_index is None else map_index, } }, ): { "id": True, } } } result = self.graphql(mutation) # type: Any if result is None: raise ClientError("Failed to create task run.") task_run_id = result.data.get_or_create_task_run.id query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "version": True, "serialized_state": True, "task": {"slug": True}, } } } task_run = self.graphql(query).data.task_run_by_pk # type: ignore if task_run is None: raise ClientError('Task run ID not found: "{}"'.format(task_run_id)) state = prefect.engine.state.State.deserialize(task_run.serialized_state) return TaskRunInfoResult( id=task_run_id, task_id=task_id, task_slug=task_run.task.slug, version=task_run.version, state=state, ) def set_task_run_name(self, task_run_id: str, name: str) -> bool: """ Set the name of a task run Args: - task_run_id (str): the id of a task run - name (str): a name for this task run Returns: - bool: whether or not the task run name was updated """ mutation = { "mutation($input: set_task_run_name_input!)": { "set_task_run_name(input: $input)": { "success": True, } } } result = self.graphql( mutation, variables=dict(input=dict(task_run_id=task_run_id, name=name)) ) return result.data.set_task_run_name.success def get_task_run_state(self, task_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a task run. Args: - task_run_id (str): the id for this task run Returns: - State: a Prefect State object """ query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "serialized_state": True, } } } task_run = self.graphql(query).data.task_run_by_pk return prefect.engine.state.State.deserialize(task_run.serialized_state) def set_task_run_state( self, task_run_id: str, state: "prefect.engine.state.State", version: int = None, cache_for: datetime.timedelta = None, ) -> "prefect.engine.state.State": """ Sets new state for a task run. Args: - task_run_id (str): the id of the task run to set state for - state (State): the new state for this task run - version (int, optional): the current version of the task run state. This is optional but it can be supplied to enforce version-locking. - cache_for (timedelta, optional): how long to store the result of this task for, using the serializer set in config; if not provided, no caching occurs Raises: - ClientError: if the GraphQL mutation is bad for any reason Returns: - State: the state the current task run should be considered in """ mutation = { "mutation($input: set_task_run_states_input!)": { "set_task_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, task_run_id=task_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_task_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state # MASKED: set_secret function (lines 1364-1388) def get_task_tag_limit(self, tag: str) -> Optional[int]: """ Retrieve the current task tag concurrency limit for a given tag. Args: - tag (str): the tag to update Raises: - ClientError: if the GraphQL query fails """ query = { "query": { with_args("task_tag_limit", {"where": {"tag": {"_eq": tag}}}): { "limit": True } } } result = self.graphql(query) # type: Any if result.data.task_tag_limit: return result.data.task_tag_limit[0].limit else: return None def update_task_tag_limit(self, tag: str, limit: int) -> None: """ Update the task tag concurrency limit for a given tag; requires tenant admin permissions. Args: - tag (str): the tag to update - limit (int): the concurrency limit to enforce on the tag; should be a value >= 0 Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag limit-setting was unsuccessful, or if a bad limit was provided """ if limit < 0: raise ValueError("Concurrency limits must be >= 0") mutation = { "mutation($input: update_task_tag_limit_input!)": { "update_task_tag_limit(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict(input=dict(tag=tag, limit=limit)) ) # type: Any if not result.data.update_task_tag_limit.id: raise ValueError("Updating the task tag concurrency limit failed.") def delete_task_tag_limit(self, limit_id: str) -> None: """ Deletes a given task tag concurrency limit; requires tenant admin permissions. Args: - limit_id (str): the ID of the tag to delete Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag deletion was unsuccessful, or if a bad tag ID was provided """ mutation = { "mutation($input: delete_task_tag_limit_input!)": { "delete_task_tag_limit(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(limit_id=limit_id)) ) # type: Any if not result.data.delete_task_tag_limit.success: raise ValueError("Deleting the task tag concurrency limit failed.") def write_run_logs(self, logs: List[Dict]) -> None: """ Uploads a collection of logs to Cloud. Args: - logs (List[Dict]): a list of log entries to add Raises: - ValueError: if uploading the logs fail """ mutation = { "mutation($input: write_run_logs_input!)": { "write_run_logs(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(logs=logs)) ) # type: Any if not result.data.write_run_logs.success: raise ValueError("Writing logs failed.") def register_agent( self, agent_type: str, name: str = None, labels: List[str] = None, agent_config_id: str = None, ) -> str: """ Register an agent with a backend API Args: - agent_type (str): The type of agent being registered - name: (str, optional): The name of the agent being registered - labels (List[str], optional): A list of any present labels on the agent being registered - agent_config_id (str, optional): The ID of an agent configuration to register with Returns: - The agent ID as a string """ mutation = { "mutation($input: register_agent_input!)": { "register_agent(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict( input=dict( type=agent_type, name=name, labels=labels or [], tenant_id=self._active_tenant_id, agent_config_id=agent_config_id, ) ), ) if not result.data.register_agent.id: raise ValueError("Error registering agent") return result.data.register_agent.id def get_agent_config(self, agent_config_id: str) -> dict: """ Get agent config settings Args: - agent_config_id (str): The ID of an agent configuration to retrieve Returns: - dict: the agent configuration's `settings` """ query = { "query": { with_args( "agent_config", {"where": {"id": {"_eq": agent_config_id}}} ): {"settings": True} } } result = self.graphql(query) # type: Any return result.data.agent_config[0].settings
def set_secret(self, name: str, value: Any) -> None: """ Set a secret with the given name and value. Args: - name (str): the name of the secret; used for retrieving the secret during task runs - value (Any): the value of the secret Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the secret-setting was unsuccessful """ mutation = { "mutation($input: set_secret_input!)": { "set_secret(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(name=name, value=value)) ) # type: Any if not result.data.set_secret.success: raise ValueError("Setting secret failed.")
1,364
1,388
import datetime import json import os import re import time import uuid import warnings from pathlib import Path from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Union from urllib.parse import urljoin # if simplejson is installed, `requests` defaults to using it instead of json # this allows the client to gracefully handle either json or simplejson try: from simplejson.errors import JSONDecodeError except ImportError: from json.decoder import JSONDecodeError import pendulum import toml from slugify import slugify import prefect from prefect.utilities.exceptions import ( AuthorizationError, ClientError, VersionLockError, ) from prefect.utilities.graphql import ( EnumValue, GraphQLResult, compress, parse_graphql, with_args, ) from prefect.utilities.logging import create_diagnostic_logger if TYPE_CHECKING: from prefect.core import Flow import requests JSONLike = Union[bool, dict, list, str, int, float, None] # type definitions for GraphQL results TaskRunInfoResult = NamedTuple( "TaskRunInfoResult", [ ("id", str), ("task_id", str), ("task_slug", str), ("version", int), ("state", "prefect.engine.state.State"), ], ) FlowRunInfoResult = NamedTuple( "FlowRunInfoResult", [ ("id", str), ("name", str), ("flow_id", str), ("parameters", Dict[str, Any]), ("context", Dict[str, Any]), ("version", int), ("scheduled_start_time", datetime.datetime), ("state", "prefect.engine.state.State"), ("task_runs", List[TaskRunInfoResult]), ], ) class Client: """ Client for communication with Prefect Cloud If the arguments aren't specified the client initialization first checks the prefect configuration and if the server is not set there it checks the current context. The token will only be present in the current context. Args: - api_server (str, optional): the URL to send all GraphQL requests to; if not provided, will be pulled from `cloud.graphql` config var - api_token (str, optional): a Prefect Cloud API token, taken from `config.cloud.auth_token` if not provided. If this token is USER-scoped, it may be used to log in to any tenant that the user is a member of. In that case, ephemeral JWTs will be loaded as necessary. Otherwise, the API token itself will be used as authorization. """ def __init__(self, api_server: str = None, api_token: str = None): self._access_token = None self._refresh_token = None self._access_token_expires_at = pendulum.now() self._active_tenant_id = None self._attached_headers = {} # type: Dict[str, str] self.logger = create_diagnostic_logger("Diagnostics") # store api server self.api_server = api_server or prefect.context.config.cloud.get("graphql") # store api token self._api_token = api_token or prefect.context.config.cloud.get( "auth_token", None ) if prefect.config.backend == "cloud": if not self._api_token: # if no api token was passed, attempt to load state from local storage settings = self._load_local_settings() self._api_token = settings.get("api_token") if self._api_token: self._active_tenant_id = settings.get("active_tenant_id") if self._active_tenant_id: try: self.login_to_tenant(tenant_id=self._active_tenant_id) except AuthorizationError: # if an authorization error is raised, then the token is invalid and should # be cleared self.logout_from_tenant() else: # TODO: Separate put this functionality and clean up initial tenant access handling if not self._active_tenant_id: tenant_info = self.graphql({"query": {"tenant": {"id"}}}) if tenant_info.data.tenant: self._active_tenant_id = tenant_info.data.tenant[0].id def create_tenant(self, name: str, slug: str = None) -> str: """ Creates a new tenant. Note this route only works when run against Prefect Server. Args: - name (str): the name of the tenant to create - slug (str, optional): the slug of the tenant to create; defaults to name Returns: - str: the ID of the newly created tenant, or the ID of the currently active tenant Raises: - ValueError: if run against Prefect Cloud """ if prefect.config.backend != "server": msg = "To create a tenant with Prefect Cloud, please signup at https://cloud.prefect.io/" raise ValueError(msg) if slug is None: slug = slugify(name) tenant_info = self.graphql( { "mutation($input: create_tenant_input!)": { "create_tenant(input: $input)": {"id"} } }, variables=dict(input=dict(name=name, slug=slug)), ) return tenant_info.data.create_tenant.id # ------------------------------------------------------------------------- # Utilities def get( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and GET request Args: - path (str): the path of the API url. For example, to GET http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the GET request to; defaults to `self.api_server` - headers (dict, optional): Headers to pass with the request - params (dict): GET parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="GET", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def post( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and POST request Args: - path (str): the path of the API url. For example, to POST http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the POST request to; defaults to `self.api_server` - headers(dict): headers to pass with the request - params (dict): POST parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="POST", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def graphql( self, query: Any, raise_on_error: bool = True, headers: Dict[str, str] = None, variables: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> GraphQLResult: """ Convenience function for running queries against the Prefect GraphQL API Args: - query (Any): A representation of a graphql query to be executed. It will be parsed by prefect.utilities.graphql.parse_graphql(). - raise_on_error (bool): if True, a `ClientError` will be raised if the GraphQL returns any `errors`. - headers (dict): any additional headers that should be passed as part of the request - variables (dict): Variables to be filled into a query with the key being equivalent to the variables that are accepted by the query - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Data returned from the GraphQL query Raises: - ClientError if there are errors raised by the GraphQL mutation """ result = self.post( path="", server=self.api_server, headers=headers, params=dict(query=parse_graphql(query), variables=json.dumps(variables)), token=token, retry_on_api_error=retry_on_api_error, ) if raise_on_error and "errors" in result: if "UNAUTHENTICATED" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif "Malformed Authorization header" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif ( result["errors"][0].get("extensions", {}).get("code") == "VERSION_LOCKING_ERROR" ): raise VersionLockError(result["errors"]) raise ClientError(result["errors"]) else: return GraphQLResult(result) # type: ignore def _send_request( self, session: "requests.Session", method: str, url: str, params: Dict[str, JSONLike] = None, headers: dict = None, ) -> "requests.models.Response": if prefect.context.config.cloud.get("diagnostics") is True: self.logger.debug(f"Preparing request to {url}") clean_headers = { head: re.sub("Bearer .*", "Bearer XXXX", val) for head, val in headers.items() # type: ignore } self.logger.debug(f"Headers: {clean_headers}") self.logger.debug(f"Request: {params}") start_time = time.time() if method == "GET": response = session.get(url, headers=headers, params=params, timeout=30) elif method == "POST": response = session.post(url, headers=headers, json=params, timeout=30) elif method == "DELETE": response = session.delete(url, headers=headers, timeout=30) else: raise ValueError("Invalid method: {}".format(method)) if prefect.context.config.cloud.get("diagnostics") is True: end_time = time.time() self.logger.debug(f"Response: {response.json()}") self.logger.debug( f"Request duration: {round(end_time - start_time, 4)} seconds" ) # Check if request returned a successful status response.raise_for_status() return response def _request( self, method: str, path: str, params: Dict[str, JSONLike] = None, server: str = None, headers: dict = None, token: str = None, retry_on_api_error: bool = True, ) -> "requests.models.Response": """ Runs any specified request (GET, POST, DELETE) against the server Args: - method (str): The type of request to be made (GET, POST, DELETE) - path (str): Path of the API URL - params (dict, optional): Parameters used for the request - server (str, optional): The server to make requests against, base API server is used if not specified - headers (dict, optional): Headers to pass with the request - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - requests.models.Response: The response returned from the request Raises: - ClientError: if the client token is not in the context (due to not being logged in) - ValueError: if a method is specified outside of the accepted GET, POST, DELETE - requests.HTTPError: if a status code is returned that is not `200` or `401` """ if server is None: server = self.api_server assert isinstance(server, str) # mypy assert if token is None: token = self.get_auth_token() # 'import requests' is expensive time-wise, we should do this just-in-time to keep # the 'import prefect' time low import requests url = urljoin(server, path.lstrip("/")).rstrip("/") params = params or {} headers = headers or {} if token: headers["Authorization"] = "Bearer {}".format(token) headers["X-PREFECT-CORE-VERSION"] = str(prefect.__version__) if self._attached_headers: headers.update(self._attached_headers) session = requests.Session() retry_total = 6 if prefect.config.backend == "cloud" else 1 retries = requests.packages.urllib3.util.retry.Retry( total=retry_total, backoff_factor=1, status_forcelist=[500, 502, 503, 504], method_whitelist=["DELETE", "GET", "POST"], ) session.mount("https://", requests.adapters.HTTPAdapter(max_retries=retries)) response = self._send_request( session=session, method=method, url=url, params=params, headers=headers ) # parse the response try: json_resp = response.json() except JSONDecodeError as exc: if prefect.config.backend == "cloud" and "Authorization" not in headers: raise ClientError( "Malformed response received from Cloud - please ensure that you " "have an API token properly configured." ) from exc else: raise ClientError("Malformed response received from API.") from exc # check if there was an API_ERROR code in the response if "API_ERROR" in str(json_resp.get("errors")) and retry_on_api_error: success, retry_count = False, 0 # retry up to six times while success is False and retry_count < 6: response = self._send_request( session=session, method=method, url=url, params=params, headers=headers, ) if "API_ERROR" in str(response.json().get("errors")): retry_count += 1 time.sleep(0.25 * (2 ** (retry_count - 1))) else: success = True return response def attach_headers(self, headers: dict) -> None: """ Set headers to be attached to this Client Args: - headers (dict): A dictionary of headers to attach to this client. These headers get added on to the existing dictionary of headers. """ self._attached_headers.update(headers) # ------------------------------------------------------------------------- # Auth # ------------------------------------------------------------------------- @property def _local_settings_path(self) -> Path: """ Returns the local settings directory corresponding to the current API servers """ path = "{home}/client/{server}".format( home=prefect.context.config.home_dir, server=slugify(self.api_server, regex_pattern=r"[^-\.a-z0-9]+"), ) return Path(os.path.expanduser(path)) / "settings.toml" def _save_local_settings(self, settings: dict) -> None: """ Writes settings to local storage """ self._local_settings_path.parent.mkdir(exist_ok=True, parents=True) with self._local_settings_path.open("w+") as f: toml.dump(settings, f) def _load_local_settings(self) -> dict: """ Loads settings from local storage """ if self._local_settings_path.exists(): with self._local_settings_path.open("r") as f: return toml.load(f) # type: ignore return {} def save_api_token(self) -> None: """ Saves the API token in local storage. """ settings = self._load_local_settings() settings["api_token"] = self._api_token self._save_local_settings(settings) def get_auth_token(self) -> str: """ Returns an auth token: - if no explicit access token is stored, returns the api token - if there is an access token: - if there's a refresh token and the access token expires in the next 30 seconds, then we refresh the access token and store the result - return the access token Returns: - str: the access token """ if not self._access_token: return self._api_token expiration = self._access_token_expires_at or pendulum.now() if self._refresh_token and pendulum.now().add(seconds=30) > expiration: self._refresh_access_token() return self._access_token def get_available_tenants(self) -> List[Dict]: """ Returns a list of available tenants. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - List[Dict]: a list of dictionaries containing the id, slug, and name of available tenants """ result = self.graphql( {"query": {"tenant(order_by: {slug: asc})": {"id", "slug", "name"}}}, # use the API token to see all available tenants token=self._api_token, ) # type: ignore return result.data.tenant # type: ignore def login_to_tenant(self, tenant_slug: str = None, tenant_id: str = None) -> bool: """ Log in to a specific tenant NOTE: this should only be called by users who have provided a USER-scoped API token. Args: - tenant_slug (str): the tenant's slug - tenant_id (str): the tenant's id Returns: - bool: True if the login was successful Raises: - ValueError: if at least one of `tenant_slug` or `tenant_id` isn't provided - ValueError: if the `tenant_id` is not a valid UUID - ValueError: if no matching tenants are found """ if tenant_slug is None and tenant_id is None: raise ValueError( "At least one of `tenant_slug` or `tenant_id` must be provided." ) elif tenant_id: try: uuid.UUID(tenant_id) except ValueError as exc: raise ValueError("The `tenant_id` must be a valid UUID.") from exc tenant = self.graphql( { "query($slug: String, $id: uuid)": { "tenant(where: {slug: { _eq: $slug }, id: { _eq: $id } })": {"id"} } }, variables=dict(slug=tenant_slug, id=tenant_id), # use the API token to query the tenant token=self._api_token, ) # type: ignore if not tenant.data.tenant: # type: ignore raise ValueError("No matching tenants found.") tenant_id = tenant.data.tenant[0].id # type: ignore if prefect.config.backend == "cloud": payload = self.graphql( { "mutation($input: switch_tenant_input!)": { "switch_tenant(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(tenant_id=tenant_id)), # Use the API token to switch tenants token=self._api_token, ) # type: ignore self._access_token = payload.data.switch_tenant.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.switch_tenant.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.switch_tenant.refresh_token # type: ignore self._active_tenant_id = tenant_id # save the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = self._active_tenant_id self._save_local_settings(settings) return True def logout_from_tenant(self) -> None: self._access_token = None self._refresh_token = None self._active_tenant_id = None # remove the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = None self._save_local_settings(settings) def _refresh_access_token(self) -> bool: """ Refresh the client's JWT access token. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - bool: True if the refresh succeeds """ payload = self.graphql( { "mutation($input: refresh_token_input!)": { "refresh_token(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(access_token=self._access_token)), # pass the refresh token as the auth header token=self._refresh_token, ) # type: ignore self._access_token = payload.data.refresh_token.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.refresh_token.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.refresh_token.refresh_token # type: ignore return True # ------------------------------------------------------------------------- # Actions # ------------------------------------------------------------------------- def register( self, flow: "Flow", project_name: str = None, build: bool = True, set_schedule_active: bool = True, version_group_id: str = None, compressed: bool = True, no_url: bool = False, ) -> str: """ Push a new flow to Prefect Cloud Args: - flow (Flow): a flow to register - project_name (str, optional): the project that should contain this flow. - build (bool, optional): if `True`, the flow's environment is built prior to serialization; defaults to `True` - set_schedule_active (bool, optional): if `False`, will set the schedule to inactive in the database to prevent auto-scheduling runs (if the Flow has a schedule). Defaults to `True`. This can be changed later. - version_group_id (str, optional): the UUID version group ID to use for versioning this Flow in Cloud; if not provided, the version group ID associated with this Flow's project and name will be used. - compressed (bool, optional): if `True`, the serialized flow will be; defaults to `True` compressed - no_url (bool, optional): if `True`, the stdout from this function will not contain the URL link to the newly-registered flow in the Cloud UI Returns: - str: the ID of the newly-registered flow Raises: - ClientError: if the register failed """ required_parameters = {p for p in flow.parameters() if p.required} if flow.schedule is not None and required_parameters: required_names = {p.name for p in required_parameters} if not all( [ required_names <= set(c.parameter_defaults.keys()) for c in flow.schedule.clocks ] ): raise ClientError( "Flows with required parameters can not be scheduled automatically." ) if any(e.key for e in flow.edges) and flow.result is None: warnings.warn( "No result handler was specified on your Flow. Cloud features such as " "input caching and resuming task runs from failure may not work properly.", stacklevel=2, ) if compressed: create_mutation = { "mutation($input: create_flow_from_compressed_string_input!)": { "create_flow_from_compressed_string(input: $input)": {"id"} } } else: create_mutation = { "mutation($input: create_flow_input!)": { "create_flow(input: $input)": {"id"} } } project = None if project_name is None: raise TypeError( "'project_name' is a required field when registering a flow." ) query_project = { "query": { with_args("project", {"where": {"name": {"_eq": project_name}}}): { "id": True } } } project = self.graphql(query_project).data.project # type: ignore if not project: raise ValueError( "Project {} not found. Run `prefect create project '{}'` to create it.".format( project_name, project_name ) ) serialized_flow = flow.serialize(build=build) # type: Any # Set Docker storage image in environment metadata if provided if isinstance(flow.storage, prefect.environments.storage.Docker): flow.environment.metadata["image"] = flow.storage.name serialized_flow = flow.serialize(build=False) # If no image ever set, default metadata to all_extras image on current version if not flow.environment.metadata.get("image"): version = prefect.__version__.split("+")[0] flow.environment.metadata[ "image" ] = f"prefecthq/prefect:all_extras-{version}" serialized_flow = flow.serialize(build=False) # verify that the serialized flow can be deserialized try: prefect.serialization.flow.FlowSchema().load(serialized_flow) except Exception as exc: raise ValueError( "Flow could not be deserialized successfully. Error was: {}".format( repr(exc) ) ) from exc if compressed: serialized_flow = compress(serialized_flow) res = self.graphql( create_mutation, variables=dict( input=dict( project_id=(project[0].id if project else None), serialized_flow=serialized_flow, set_schedule_active=set_schedule_active, version_group_id=version_group_id, ) ), retry_on_api_error=False, ) # type: Any flow_id = ( res.data.create_flow_from_compressed_string.id if compressed else res.data.create_flow.id ) if not no_url: # Generate direct link to Cloud flow flow_url = self.get_cloud_url("flow", flow_id) prefix = "└── " print("Flow URL: {}".format(flow_url)) # Extra information to improve visibility msg = ( f" {prefix}ID: {flow_id}\n" f" {prefix}Project: {project_name}\n" f" {prefix}Labels: {list(flow.environment.labels)}" ) print(msg) return flow_id def get_cloud_url(self, subdirectory: str, id: str, as_user: bool = True) -> str: """ Convenience method for creating Prefect Cloud URLs for a given subdirectory. Args: - subdirectory (str): the subdirectory to use (e.g., `"flow-run"`) - id (str): the ID of the page - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the URL corresponding to the appropriate base URL, tenant slug, subdirectory and ID Example: ```python from prefect import Client client = Client() client.get_cloud_url("flow-run", "424242-ca-94611-111-55") # returns "https://cloud.prefect.io/my-tenant-slug/flow-run/424242-ca-94611-111-55" ``` """ # Generate direct link to UI if prefect.config.backend == "cloud": tenant_slug = self.get_default_tenant_slug(as_user=as_user) else: tenant_slug = "" base_url = ( re.sub("api-", "", prefect.config.cloud.api) if re.search("api-", prefect.config.cloud.api) else re.sub("api", "cloud", prefect.config.cloud.api) ) full_url = prefect.config.cloud.api if tenant_slug: full_url = "/".join([base_url.rstrip("/"), tenant_slug, subdirectory, id]) elif prefect.config.backend == "server": full_url = "/".join([prefect.config.server.ui.endpoint, subdirectory, id]) return full_url def get_default_tenant_slug(self, as_user: bool = True) -> str: """ Get the default tenant slug for the currently authenticated user Args: - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the slug of the current default tenant for this user """ if as_user: query = { "query": {"user": {"default_membership": {"tenant": "slug"}}} } # type: dict else: query = {"query": {"tenant": {"slug"}}} res = self.graphql(query) if as_user: user = res.get("data").user[0] slug = user.default_membership.tenant.slug else: slug = res.get("data").tenant[0].slug return slug def create_project(self, project_name: str, project_description: str = None) -> str: """ Create a new Project Args: - project_name (str): the project that should contain this flow - project_description (str, optional): the project description Returns: - str: the ID of the newly-created project Raises: - ClientError: if the project creation failed """ project_mutation = { "mutation($input: create_project_input!)": { "create_project(input: $input)": {"id"} } } res = self.graphql( project_mutation, variables=dict( input=dict( name=project_name, description=project_description, tenant_id=self._active_tenant_id, ) ), ) # type: Any return res.data.create_project.id def create_flow_run( self, flow_id: str = None, context: dict = None, parameters: dict = None, scheduled_start_time: datetime.datetime = None, idempotency_key: str = None, run_name: str = None, version_group_id: str = None, ) -> str: """ Create a new flow run for the given flow id. If `start_time` is not provided, the flow run will be scheduled to start immediately. If both `flow_id` and `version_group_id` are provided, only the `flow_id` will be used. Args: - flow_id (str, optional): the id of the Flow you wish to schedule - context (dict, optional): the run context - parameters (dict, optional): a dictionary of parameter values to pass to the flow run - scheduled_start_time (datetime, optional): the time to schedule the execution for; if not provided, defaults to now - idempotency_key (str, optional): an idempotency key; if provided, this run will be cached for 24 hours. Any subsequent attempts to create a run with the same idempotency key will return the ID of the originally created run (no new run will be created after the first). An error will be raised if parameters or context are provided and don't match the original. Each subsequent request will reset the TTL for 24 hours. - run_name (str, optional): The name assigned to this flow run - version_group_id (str, optional): if provided, the unique unarchived flow within this version group will be scheduled to run. This input can be used as a stable API for running flows which are regularly updated. Returns: - str: the ID of the newly-created flow run Raises: - ClientError: if the GraphQL query is bad for any reason """ create_mutation = { "mutation($input: create_flow_run_input!)": { "create_flow_run(input: $input)": {"id": True} } } if not flow_id and not version_group_id: raise ValueError("One of flow_id or version_group_id must be provided") if flow_id: inputs = dict(flow_id=flow_id) else: inputs = dict(version_group_id=version_group_id) # type: ignore if parameters is not None: inputs.update(parameters=parameters) # type: ignore if context is not None: inputs.update(context=context) # type: ignore if idempotency_key is not None: inputs.update(idempotency_key=idempotency_key) # type: ignore if scheduled_start_time is not None: inputs.update( scheduled_start_time=scheduled_start_time.isoformat() ) # type: ignore if run_name is not None: inputs.update(flow_run_name=run_name) # type: ignore res = self.graphql(create_mutation, variables=dict(input=inputs)) return res.data.create_flow_run.id # type: ignore def get_flow_run_info(self, flow_run_id: str) -> FlowRunInfoResult: """ Retrieves version and current state information for the given flow run. Args: - flow_run_id (str): the id of the flow run to get information for Returns: - GraphQLResult: an object representing information about the flow run Raises: - ClientError: if the GraphQL mutation is bad for any reason """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "id": True, "name": True, "flow_id": True, "parameters": True, "context": True, "version": True, "scheduled_start_time": True, "serialized_state": True, # load all task runs except dynamic task runs with_args("task_runs", {"where": {"map_index": {"_eq": -1}}}): { "id": True, "task": {"id": True, "slug": True}, "version": True, "serialized_state": True, }, } } } result = self.graphql(query).data.flow_run_by_pk # type: ignore if result is None: raise ClientError('Flow run ID not found: "{}"'.format(flow_run_id)) # convert scheduled_start_time from string to datetime result.scheduled_start_time = pendulum.parse(result.scheduled_start_time) # create "state" attribute from serialized_state result.state = prefect.engine.state.State.deserialize( result.pop("serialized_state") ) # reformat task_runs task_runs = [] for tr in result.task_runs: tr.state = prefect.engine.state.State.deserialize( tr.pop("serialized_state") ) task_info = tr.pop("task") tr.task_id = task_info["id"] tr.task_slug = task_info["slug"] task_runs.append(TaskRunInfoResult(**tr)) result.task_runs = task_runs result.context = ( result.context.to_dict() if result.context is not None else None ) result.parameters = ( result.parameters.to_dict() if result.parameters is not None else None ) return FlowRunInfoResult(**result) def update_flow_run_heartbeat(self, flow_run_id: str) -> None: """ Convenience method for heartbeating a flow run. Does NOT raise an error if the update fails. Args: - flow_run_id (str): the flow run ID to heartbeat """ mutation = { "mutation": { with_args( "update_flow_run_heartbeat", {"input": {"flow_run_id": flow_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def update_task_run_heartbeat(self, task_run_id: str) -> None: """ Convenience method for heartbeating a task run. Does NOT raise an error if the update fails. Args: - task_run_id (str): the task run ID to heartbeat """ mutation = { "mutation": { with_args( "update_task_run_heartbeat", {"input": {"task_run_id": task_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def get_flow_run_state(self, flow_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a flow run. Args: - flow_run_id (str): the id for this flow run Returns: - State: a Prefect State object """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "serialized_state": True, } } } flow_run = self.graphql(query).data.flow_run_by_pk return prefect.engine.state.State.deserialize(flow_run.serialized_state) def set_flow_run_state( self, flow_run_id: str, state: "prefect.engine.state.State", version: int = None, ) -> "prefect.engine.state.State": """ Sets new state for a flow run in the database. Args: - flow_run_id (str): the id of the flow run to set state for - state (State): the new state for this flow run - version (int, optional): the current version of the flow run state. This is optional but it can be supplied to enforce version-locking. Returns: - State: the state the current flow run should be considered in Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation($input: set_flow_run_states_input!)": { "set_flow_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, flow_run_id=flow_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_flow_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def get_latest_cached_states( self, task_id: str, cache_key: Optional[str], created_after: datetime.datetime ) -> List["prefect.engine.state.State"]: """ Pulls all Cached states for the given task that were created after the provided date. Args: - task_id (str): the task id for this task run - cache_key (Optional[str]): the cache key for this Task's cache; if `None`, the task id alone will be used - created_after (datetime.datetime): the earliest date the state should have been created at Returns: - List[State]: a list of Cached states created after the given date """ args = { "where": { "state": {"_eq": "Cached"}, "state_timestamp": {"_gte": created_after.isoformat()}, }, "order_by": {"state_timestamp": EnumValue("desc")}, "limit": 100, } # type: Dict[str, Any] # if a cache key was provided, match it against all tasks if cache_key is not None: args["where"].update({"cache_key": {"_eq": cache_key}}) # otherwise match against only this task, across all cache keys else: args["where"].update({"task_id": {"_eq": task_id}}) query = {"query": {with_args("task_run", args): "serialized_state"}} result = self.graphql(query) # type: Any deserializer = prefect.engine.state.State.deserialize valid_states = [ deserializer(res.serialized_state) for res in result.data.task_run ] return valid_states def get_task_run_info( self, flow_run_id: str, task_id: str, map_index: Optional[int] = None ) -> TaskRunInfoResult: """ Retrieves version and current state information for the given task run. Args: - flow_run_id (str): the id of the flow run that this task run lives in - task_id (str): the task id for this task run - map_index (int, optional): the mapping index for this task run; if `None`, it is assumed this task is _not_ mapped Returns: - NamedTuple: a tuple containing `id, task_id, version, state` Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation": { with_args( "get_or_create_task_run", { "input": { "flow_run_id": flow_run_id, "task_id": task_id, "map_index": -1 if map_index is None else map_index, } }, ): { "id": True, } } } result = self.graphql(mutation) # type: Any if result is None: raise ClientError("Failed to create task run.") task_run_id = result.data.get_or_create_task_run.id query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "version": True, "serialized_state": True, "task": {"slug": True}, } } } task_run = self.graphql(query).data.task_run_by_pk # type: ignore if task_run is None: raise ClientError('Task run ID not found: "{}"'.format(task_run_id)) state = prefect.engine.state.State.deserialize(task_run.serialized_state) return TaskRunInfoResult( id=task_run_id, task_id=task_id, task_slug=task_run.task.slug, version=task_run.version, state=state, ) def set_task_run_name(self, task_run_id: str, name: str) -> bool: """ Set the name of a task run Args: - task_run_id (str): the id of a task run - name (str): a name for this task run Returns: - bool: whether or not the task run name was updated """ mutation = { "mutation($input: set_task_run_name_input!)": { "set_task_run_name(input: $input)": { "success": True, } } } result = self.graphql( mutation, variables=dict(input=dict(task_run_id=task_run_id, name=name)) ) return result.data.set_task_run_name.success def get_task_run_state(self, task_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a task run. Args: - task_run_id (str): the id for this task run Returns: - State: a Prefect State object """ query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "serialized_state": True, } } } task_run = self.graphql(query).data.task_run_by_pk return prefect.engine.state.State.deserialize(task_run.serialized_state) def set_task_run_state( self, task_run_id: str, state: "prefect.engine.state.State", version: int = None, cache_for: datetime.timedelta = None, ) -> "prefect.engine.state.State": """ Sets new state for a task run. Args: - task_run_id (str): the id of the task run to set state for - state (State): the new state for this task run - version (int, optional): the current version of the task run state. This is optional but it can be supplied to enforce version-locking. - cache_for (timedelta, optional): how long to store the result of this task for, using the serializer set in config; if not provided, no caching occurs Raises: - ClientError: if the GraphQL mutation is bad for any reason Returns: - State: the state the current task run should be considered in """ mutation = { "mutation($input: set_task_run_states_input!)": { "set_task_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, task_run_id=task_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_task_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def set_secret(self, name: str, value: Any) -> None: """ Set a secret with the given name and value. Args: - name (str): the name of the secret; used for retrieving the secret during task runs - value (Any): the value of the secret Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the secret-setting was unsuccessful """ mutation = { "mutation($input: set_secret_input!)": { "set_secret(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(name=name, value=value)) ) # type: Any if not result.data.set_secret.success: raise ValueError("Setting secret failed.") def get_task_tag_limit(self, tag: str) -> Optional[int]: """ Retrieve the current task tag concurrency limit for a given tag. Args: - tag (str): the tag to update Raises: - ClientError: if the GraphQL query fails """ query = { "query": { with_args("task_tag_limit", {"where": {"tag": {"_eq": tag}}}): { "limit": True } } } result = self.graphql(query) # type: Any if result.data.task_tag_limit: return result.data.task_tag_limit[0].limit else: return None def update_task_tag_limit(self, tag: str, limit: int) -> None: """ Update the task tag concurrency limit for a given tag; requires tenant admin permissions. Args: - tag (str): the tag to update - limit (int): the concurrency limit to enforce on the tag; should be a value >= 0 Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag limit-setting was unsuccessful, or if a bad limit was provided """ if limit < 0: raise ValueError("Concurrency limits must be >= 0") mutation = { "mutation($input: update_task_tag_limit_input!)": { "update_task_tag_limit(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict(input=dict(tag=tag, limit=limit)) ) # type: Any if not result.data.update_task_tag_limit.id: raise ValueError("Updating the task tag concurrency limit failed.") def delete_task_tag_limit(self, limit_id: str) -> None: """ Deletes a given task tag concurrency limit; requires tenant admin permissions. Args: - limit_id (str): the ID of the tag to delete Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag deletion was unsuccessful, or if a bad tag ID was provided """ mutation = { "mutation($input: delete_task_tag_limit_input!)": { "delete_task_tag_limit(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(limit_id=limit_id)) ) # type: Any if not result.data.delete_task_tag_limit.success: raise ValueError("Deleting the task tag concurrency limit failed.") def write_run_logs(self, logs: List[Dict]) -> None: """ Uploads a collection of logs to Cloud. Args: - logs (List[Dict]): a list of log entries to add Raises: - ValueError: if uploading the logs fail """ mutation = { "mutation($input: write_run_logs_input!)": { "write_run_logs(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(logs=logs)) ) # type: Any if not result.data.write_run_logs.success: raise ValueError("Writing logs failed.") def register_agent( self, agent_type: str, name: str = None, labels: List[str] = None, agent_config_id: str = None, ) -> str: """ Register an agent with a backend API Args: - agent_type (str): The type of agent being registered - name: (str, optional): The name of the agent being registered - labels (List[str], optional): A list of any present labels on the agent being registered - agent_config_id (str, optional): The ID of an agent configuration to register with Returns: - The agent ID as a string """ mutation = { "mutation($input: register_agent_input!)": { "register_agent(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict( input=dict( type=agent_type, name=name, labels=labels or [], tenant_id=self._active_tenant_id, agent_config_id=agent_config_id, ) ), ) if not result.data.register_agent.id: raise ValueError("Error registering agent") return result.data.register_agent.id def get_agent_config(self, agent_config_id: str) -> dict: """ Get agent config settings Args: - agent_config_id (str): The ID of an agent configuration to retrieve Returns: - dict: the agent configuration's `settings` """ query = { "query": { with_args( "agent_config", {"where": {"id": {"_eq": agent_config_id}}} ): {"settings": True} } } result = self.graphql(query) # type: Any return result.data.agent_config[0].settings
write_run_logs
Uploads a collection of logs to Cloud. Args: - logs (List[Dict]): a list of log entries to add Raises: - ValueError: if uploading the logs fail
import datetime import json import os import re import time import uuid import warnings from pathlib import Path from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Union from urllib.parse import urljoin # if simplejson is installed, `requests` defaults to using it instead of json # this allows the client to gracefully handle either json or simplejson try: from simplejson.errors import JSONDecodeError except ImportError: from json.decoder import JSONDecodeError import pendulum import toml from slugify import slugify import prefect from prefect.utilities.exceptions import ( AuthorizationError, ClientError, VersionLockError, ) from prefect.utilities.graphql import ( EnumValue, GraphQLResult, compress, parse_graphql, with_args, ) from prefect.utilities.logging import create_diagnostic_logger if TYPE_CHECKING: from prefect.core import Flow import requests JSONLike = Union[bool, dict, list, str, int, float, None] # type definitions for GraphQL results TaskRunInfoResult = NamedTuple( "TaskRunInfoResult", [ ("id", str), ("task_id", str), ("task_slug", str), ("version", int), ("state", "prefect.engine.state.State"), ], ) FlowRunInfoResult = NamedTuple( "FlowRunInfoResult", [ ("id", str), ("name", str), ("flow_id", str), ("parameters", Dict[str, Any]), ("context", Dict[str, Any]), ("version", int), ("scheduled_start_time", datetime.datetime), ("state", "prefect.engine.state.State"), ("task_runs", List[TaskRunInfoResult]), ], ) class Client: """ Client for communication with Prefect Cloud If the arguments aren't specified the client initialization first checks the prefect configuration and if the server is not set there it checks the current context. The token will only be present in the current context. Args: - api_server (str, optional): the URL to send all GraphQL requests to; if not provided, will be pulled from `cloud.graphql` config var - api_token (str, optional): a Prefect Cloud API token, taken from `config.cloud.auth_token` if not provided. If this token is USER-scoped, it may be used to log in to any tenant that the user is a member of. In that case, ephemeral JWTs will be loaded as necessary. Otherwise, the API token itself will be used as authorization. """ def __init__(self, api_server: str = None, api_token: str = None): self._access_token = None self._refresh_token = None self._access_token_expires_at = pendulum.now() self._active_tenant_id = None self._attached_headers = {} # type: Dict[str, str] self.logger = create_diagnostic_logger("Diagnostics") # store api server self.api_server = api_server or prefect.context.config.cloud.get("graphql") # store api token self._api_token = api_token or prefect.context.config.cloud.get( "auth_token", None ) if prefect.config.backend == "cloud": if not self._api_token: # if no api token was passed, attempt to load state from local storage settings = self._load_local_settings() self._api_token = settings.get("api_token") if self._api_token: self._active_tenant_id = settings.get("active_tenant_id") if self._active_tenant_id: try: self.login_to_tenant(tenant_id=self._active_tenant_id) except AuthorizationError: # if an authorization error is raised, then the token is invalid and should # be cleared self.logout_from_tenant() else: # TODO: Separate put this functionality and clean up initial tenant access handling if not self._active_tenant_id: tenant_info = self.graphql({"query": {"tenant": {"id"}}}) if tenant_info.data.tenant: self._active_tenant_id = tenant_info.data.tenant[0].id def create_tenant(self, name: str, slug: str = None) -> str: """ Creates a new tenant. Note this route only works when run against Prefect Server. Args: - name (str): the name of the tenant to create - slug (str, optional): the slug of the tenant to create; defaults to name Returns: - str: the ID of the newly created tenant, or the ID of the currently active tenant Raises: - ValueError: if run against Prefect Cloud """ if prefect.config.backend != "server": msg = "To create a tenant with Prefect Cloud, please signup at https://cloud.prefect.io/" raise ValueError(msg) if slug is None: slug = slugify(name) tenant_info = self.graphql( { "mutation($input: create_tenant_input!)": { "create_tenant(input: $input)": {"id"} } }, variables=dict(input=dict(name=name, slug=slug)), ) return tenant_info.data.create_tenant.id # ------------------------------------------------------------------------- # Utilities def get( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and GET request Args: - path (str): the path of the API url. For example, to GET http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the GET request to; defaults to `self.api_server` - headers (dict, optional): Headers to pass with the request - params (dict): GET parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="GET", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def post( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and POST request Args: - path (str): the path of the API url. For example, to POST http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the POST request to; defaults to `self.api_server` - headers(dict): headers to pass with the request - params (dict): POST parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="POST", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def graphql( self, query: Any, raise_on_error: bool = True, headers: Dict[str, str] = None, variables: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> GraphQLResult: """ Convenience function for running queries against the Prefect GraphQL API Args: - query (Any): A representation of a graphql query to be executed. It will be parsed by prefect.utilities.graphql.parse_graphql(). - raise_on_error (bool): if True, a `ClientError` will be raised if the GraphQL returns any `errors`. - headers (dict): any additional headers that should be passed as part of the request - variables (dict): Variables to be filled into a query with the key being equivalent to the variables that are accepted by the query - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Data returned from the GraphQL query Raises: - ClientError if there are errors raised by the GraphQL mutation """ result = self.post( path="", server=self.api_server, headers=headers, params=dict(query=parse_graphql(query), variables=json.dumps(variables)), token=token, retry_on_api_error=retry_on_api_error, ) if raise_on_error and "errors" in result: if "UNAUTHENTICATED" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif "Malformed Authorization header" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif ( result["errors"][0].get("extensions", {}).get("code") == "VERSION_LOCKING_ERROR" ): raise VersionLockError(result["errors"]) raise ClientError(result["errors"]) else: return GraphQLResult(result) # type: ignore def _send_request( self, session: "requests.Session", method: str, url: str, params: Dict[str, JSONLike] = None, headers: dict = None, ) -> "requests.models.Response": if prefect.context.config.cloud.get("diagnostics") is True: self.logger.debug(f"Preparing request to {url}") clean_headers = { head: re.sub("Bearer .*", "Bearer XXXX", val) for head, val in headers.items() # type: ignore } self.logger.debug(f"Headers: {clean_headers}") self.logger.debug(f"Request: {params}") start_time = time.time() if method == "GET": response = session.get(url, headers=headers, params=params, timeout=30) elif method == "POST": response = session.post(url, headers=headers, json=params, timeout=30) elif method == "DELETE": response = session.delete(url, headers=headers, timeout=30) else: raise ValueError("Invalid method: {}".format(method)) if prefect.context.config.cloud.get("diagnostics") is True: end_time = time.time() self.logger.debug(f"Response: {response.json()}") self.logger.debug( f"Request duration: {round(end_time - start_time, 4)} seconds" ) # Check if request returned a successful status response.raise_for_status() return response def _request( self, method: str, path: str, params: Dict[str, JSONLike] = None, server: str = None, headers: dict = None, token: str = None, retry_on_api_error: bool = True, ) -> "requests.models.Response": """ Runs any specified request (GET, POST, DELETE) against the server Args: - method (str): The type of request to be made (GET, POST, DELETE) - path (str): Path of the API URL - params (dict, optional): Parameters used for the request - server (str, optional): The server to make requests against, base API server is used if not specified - headers (dict, optional): Headers to pass with the request - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - requests.models.Response: The response returned from the request Raises: - ClientError: if the client token is not in the context (due to not being logged in) - ValueError: if a method is specified outside of the accepted GET, POST, DELETE - requests.HTTPError: if a status code is returned that is not `200` or `401` """ if server is None: server = self.api_server assert isinstance(server, str) # mypy assert if token is None: token = self.get_auth_token() # 'import requests' is expensive time-wise, we should do this just-in-time to keep # the 'import prefect' time low import requests url = urljoin(server, path.lstrip("/")).rstrip("/") params = params or {} headers = headers or {} if token: headers["Authorization"] = "Bearer {}".format(token) headers["X-PREFECT-CORE-VERSION"] = str(prefect.__version__) if self._attached_headers: headers.update(self._attached_headers) session = requests.Session() retry_total = 6 if prefect.config.backend == "cloud" else 1 retries = requests.packages.urllib3.util.retry.Retry( total=retry_total, backoff_factor=1, status_forcelist=[500, 502, 503, 504], method_whitelist=["DELETE", "GET", "POST"], ) session.mount("https://", requests.adapters.HTTPAdapter(max_retries=retries)) response = self._send_request( session=session, method=method, url=url, params=params, headers=headers ) # parse the response try: json_resp = response.json() except JSONDecodeError as exc: if prefect.config.backend == "cloud" and "Authorization" not in headers: raise ClientError( "Malformed response received from Cloud - please ensure that you " "have an API token properly configured." ) from exc else: raise ClientError("Malformed response received from API.") from exc # check if there was an API_ERROR code in the response if "API_ERROR" in str(json_resp.get("errors")) and retry_on_api_error: success, retry_count = False, 0 # retry up to six times while success is False and retry_count < 6: response = self._send_request( session=session, method=method, url=url, params=params, headers=headers, ) if "API_ERROR" in str(response.json().get("errors")): retry_count += 1 time.sleep(0.25 * (2 ** (retry_count - 1))) else: success = True return response def attach_headers(self, headers: dict) -> None: """ Set headers to be attached to this Client Args: - headers (dict): A dictionary of headers to attach to this client. These headers get added on to the existing dictionary of headers. """ self._attached_headers.update(headers) # ------------------------------------------------------------------------- # Auth # ------------------------------------------------------------------------- @property def _local_settings_path(self) -> Path: """ Returns the local settings directory corresponding to the current API servers """ path = "{home}/client/{server}".format( home=prefect.context.config.home_dir, server=slugify(self.api_server, regex_pattern=r"[^-\.a-z0-9]+"), ) return Path(os.path.expanduser(path)) / "settings.toml" def _save_local_settings(self, settings: dict) -> None: """ Writes settings to local storage """ self._local_settings_path.parent.mkdir(exist_ok=True, parents=True) with self._local_settings_path.open("w+") as f: toml.dump(settings, f) def _load_local_settings(self) -> dict: """ Loads settings from local storage """ if self._local_settings_path.exists(): with self._local_settings_path.open("r") as f: return toml.load(f) # type: ignore return {} def save_api_token(self) -> None: """ Saves the API token in local storage. """ settings = self._load_local_settings() settings["api_token"] = self._api_token self._save_local_settings(settings) def get_auth_token(self) -> str: """ Returns an auth token: - if no explicit access token is stored, returns the api token - if there is an access token: - if there's a refresh token and the access token expires in the next 30 seconds, then we refresh the access token and store the result - return the access token Returns: - str: the access token """ if not self._access_token: return self._api_token expiration = self._access_token_expires_at or pendulum.now() if self._refresh_token and pendulum.now().add(seconds=30) > expiration: self._refresh_access_token() return self._access_token def get_available_tenants(self) -> List[Dict]: """ Returns a list of available tenants. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - List[Dict]: a list of dictionaries containing the id, slug, and name of available tenants """ result = self.graphql( {"query": {"tenant(order_by: {slug: asc})": {"id", "slug", "name"}}}, # use the API token to see all available tenants token=self._api_token, ) # type: ignore return result.data.tenant # type: ignore def login_to_tenant(self, tenant_slug: str = None, tenant_id: str = None) -> bool: """ Log in to a specific tenant NOTE: this should only be called by users who have provided a USER-scoped API token. Args: - tenant_slug (str): the tenant's slug - tenant_id (str): the tenant's id Returns: - bool: True if the login was successful Raises: - ValueError: if at least one of `tenant_slug` or `tenant_id` isn't provided - ValueError: if the `tenant_id` is not a valid UUID - ValueError: if no matching tenants are found """ if tenant_slug is None and tenant_id is None: raise ValueError( "At least one of `tenant_slug` or `tenant_id` must be provided." ) elif tenant_id: try: uuid.UUID(tenant_id) except ValueError as exc: raise ValueError("The `tenant_id` must be a valid UUID.") from exc tenant = self.graphql( { "query($slug: String, $id: uuid)": { "tenant(where: {slug: { _eq: $slug }, id: { _eq: $id } })": {"id"} } }, variables=dict(slug=tenant_slug, id=tenant_id), # use the API token to query the tenant token=self._api_token, ) # type: ignore if not tenant.data.tenant: # type: ignore raise ValueError("No matching tenants found.") tenant_id = tenant.data.tenant[0].id # type: ignore if prefect.config.backend == "cloud": payload = self.graphql( { "mutation($input: switch_tenant_input!)": { "switch_tenant(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(tenant_id=tenant_id)), # Use the API token to switch tenants token=self._api_token, ) # type: ignore self._access_token = payload.data.switch_tenant.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.switch_tenant.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.switch_tenant.refresh_token # type: ignore self._active_tenant_id = tenant_id # save the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = self._active_tenant_id self._save_local_settings(settings) return True def logout_from_tenant(self) -> None: self._access_token = None self._refresh_token = None self._active_tenant_id = None # remove the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = None self._save_local_settings(settings) def _refresh_access_token(self) -> bool: """ Refresh the client's JWT access token. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - bool: True if the refresh succeeds """ payload = self.graphql( { "mutation($input: refresh_token_input!)": { "refresh_token(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(access_token=self._access_token)), # pass the refresh token as the auth header token=self._refresh_token, ) # type: ignore self._access_token = payload.data.refresh_token.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.refresh_token.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.refresh_token.refresh_token # type: ignore return True # ------------------------------------------------------------------------- # Actions # ------------------------------------------------------------------------- def register( self, flow: "Flow", project_name: str = None, build: bool = True, set_schedule_active: bool = True, version_group_id: str = None, compressed: bool = True, no_url: bool = False, ) -> str: """ Push a new flow to Prefect Cloud Args: - flow (Flow): a flow to register - project_name (str, optional): the project that should contain this flow. - build (bool, optional): if `True`, the flow's environment is built prior to serialization; defaults to `True` - set_schedule_active (bool, optional): if `False`, will set the schedule to inactive in the database to prevent auto-scheduling runs (if the Flow has a schedule). Defaults to `True`. This can be changed later. - version_group_id (str, optional): the UUID version group ID to use for versioning this Flow in Cloud; if not provided, the version group ID associated with this Flow's project and name will be used. - compressed (bool, optional): if `True`, the serialized flow will be; defaults to `True` compressed - no_url (bool, optional): if `True`, the stdout from this function will not contain the URL link to the newly-registered flow in the Cloud UI Returns: - str: the ID of the newly-registered flow Raises: - ClientError: if the register failed """ required_parameters = {p for p in flow.parameters() if p.required} if flow.schedule is not None and required_parameters: required_names = {p.name for p in required_parameters} if not all( [ required_names <= set(c.parameter_defaults.keys()) for c in flow.schedule.clocks ] ): raise ClientError( "Flows with required parameters can not be scheduled automatically." ) if any(e.key for e in flow.edges) and flow.result is None: warnings.warn( "No result handler was specified on your Flow. Cloud features such as " "input caching and resuming task runs from failure may not work properly.", stacklevel=2, ) if compressed: create_mutation = { "mutation($input: create_flow_from_compressed_string_input!)": { "create_flow_from_compressed_string(input: $input)": {"id"} } } else: create_mutation = { "mutation($input: create_flow_input!)": { "create_flow(input: $input)": {"id"} } } project = None if project_name is None: raise TypeError( "'project_name' is a required field when registering a flow." ) query_project = { "query": { with_args("project", {"where": {"name": {"_eq": project_name}}}): { "id": True } } } project = self.graphql(query_project).data.project # type: ignore if not project: raise ValueError( "Project {} not found. Run `prefect create project '{}'` to create it.".format( project_name, project_name ) ) serialized_flow = flow.serialize(build=build) # type: Any # Set Docker storage image in environment metadata if provided if isinstance(flow.storage, prefect.environments.storage.Docker): flow.environment.metadata["image"] = flow.storage.name serialized_flow = flow.serialize(build=False) # If no image ever set, default metadata to all_extras image on current version if not flow.environment.metadata.get("image"): version = prefect.__version__.split("+")[0] flow.environment.metadata[ "image" ] = f"prefecthq/prefect:all_extras-{version}" serialized_flow = flow.serialize(build=False) # verify that the serialized flow can be deserialized try: prefect.serialization.flow.FlowSchema().load(serialized_flow) except Exception as exc: raise ValueError( "Flow could not be deserialized successfully. Error was: {}".format( repr(exc) ) ) from exc if compressed: serialized_flow = compress(serialized_flow) res = self.graphql( create_mutation, variables=dict( input=dict( project_id=(project[0].id if project else None), serialized_flow=serialized_flow, set_schedule_active=set_schedule_active, version_group_id=version_group_id, ) ), retry_on_api_error=False, ) # type: Any flow_id = ( res.data.create_flow_from_compressed_string.id if compressed else res.data.create_flow.id ) if not no_url: # Generate direct link to Cloud flow flow_url = self.get_cloud_url("flow", flow_id) prefix = "└── " print("Flow URL: {}".format(flow_url)) # Extra information to improve visibility msg = ( f" {prefix}ID: {flow_id}\n" f" {prefix}Project: {project_name}\n" f" {prefix}Labels: {list(flow.environment.labels)}" ) print(msg) return flow_id def get_cloud_url(self, subdirectory: str, id: str, as_user: bool = True) -> str: """ Convenience method for creating Prefect Cloud URLs for a given subdirectory. Args: - subdirectory (str): the subdirectory to use (e.g., `"flow-run"`) - id (str): the ID of the page - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the URL corresponding to the appropriate base URL, tenant slug, subdirectory and ID Example: ```python from prefect import Client client = Client() client.get_cloud_url("flow-run", "424242-ca-94611-111-55") # returns "https://cloud.prefect.io/my-tenant-slug/flow-run/424242-ca-94611-111-55" ``` """ # Generate direct link to UI if prefect.config.backend == "cloud": tenant_slug = self.get_default_tenant_slug(as_user=as_user) else: tenant_slug = "" base_url = ( re.sub("api-", "", prefect.config.cloud.api) if re.search("api-", prefect.config.cloud.api) else re.sub("api", "cloud", prefect.config.cloud.api) ) full_url = prefect.config.cloud.api if tenant_slug: full_url = "/".join([base_url.rstrip("/"), tenant_slug, subdirectory, id]) elif prefect.config.backend == "server": full_url = "/".join([prefect.config.server.ui.endpoint, subdirectory, id]) return full_url def get_default_tenant_slug(self, as_user: bool = True) -> str: """ Get the default tenant slug for the currently authenticated user Args: - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the slug of the current default tenant for this user """ if as_user: query = { "query": {"user": {"default_membership": {"tenant": "slug"}}} } # type: dict else: query = {"query": {"tenant": {"slug"}}} res = self.graphql(query) if as_user: user = res.get("data").user[0] slug = user.default_membership.tenant.slug else: slug = res.get("data").tenant[0].slug return slug def create_project(self, project_name: str, project_description: str = None) -> str: """ Create a new Project Args: - project_name (str): the project that should contain this flow - project_description (str, optional): the project description Returns: - str: the ID of the newly-created project Raises: - ClientError: if the project creation failed """ project_mutation = { "mutation($input: create_project_input!)": { "create_project(input: $input)": {"id"} } } res = self.graphql( project_mutation, variables=dict( input=dict( name=project_name, description=project_description, tenant_id=self._active_tenant_id, ) ), ) # type: Any return res.data.create_project.id def create_flow_run( self, flow_id: str = None, context: dict = None, parameters: dict = None, scheduled_start_time: datetime.datetime = None, idempotency_key: str = None, run_name: str = None, version_group_id: str = None, ) -> str: """ Create a new flow run for the given flow id. If `start_time` is not provided, the flow run will be scheduled to start immediately. If both `flow_id` and `version_group_id` are provided, only the `flow_id` will be used. Args: - flow_id (str, optional): the id of the Flow you wish to schedule - context (dict, optional): the run context - parameters (dict, optional): a dictionary of parameter values to pass to the flow run - scheduled_start_time (datetime, optional): the time to schedule the execution for; if not provided, defaults to now - idempotency_key (str, optional): an idempotency key; if provided, this run will be cached for 24 hours. Any subsequent attempts to create a run with the same idempotency key will return the ID of the originally created run (no new run will be created after the first). An error will be raised if parameters or context are provided and don't match the original. Each subsequent request will reset the TTL for 24 hours. - run_name (str, optional): The name assigned to this flow run - version_group_id (str, optional): if provided, the unique unarchived flow within this version group will be scheduled to run. This input can be used as a stable API for running flows which are regularly updated. Returns: - str: the ID of the newly-created flow run Raises: - ClientError: if the GraphQL query is bad for any reason """ create_mutation = { "mutation($input: create_flow_run_input!)": { "create_flow_run(input: $input)": {"id": True} } } if not flow_id and not version_group_id: raise ValueError("One of flow_id or version_group_id must be provided") if flow_id: inputs = dict(flow_id=flow_id) else: inputs = dict(version_group_id=version_group_id) # type: ignore if parameters is not None: inputs.update(parameters=parameters) # type: ignore if context is not None: inputs.update(context=context) # type: ignore if idempotency_key is not None: inputs.update(idempotency_key=idempotency_key) # type: ignore if scheduled_start_time is not None: inputs.update( scheduled_start_time=scheduled_start_time.isoformat() ) # type: ignore if run_name is not None: inputs.update(flow_run_name=run_name) # type: ignore res = self.graphql(create_mutation, variables=dict(input=inputs)) return res.data.create_flow_run.id # type: ignore def get_flow_run_info(self, flow_run_id: str) -> FlowRunInfoResult: """ Retrieves version and current state information for the given flow run. Args: - flow_run_id (str): the id of the flow run to get information for Returns: - GraphQLResult: an object representing information about the flow run Raises: - ClientError: if the GraphQL mutation is bad for any reason """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "id": True, "name": True, "flow_id": True, "parameters": True, "context": True, "version": True, "scheduled_start_time": True, "serialized_state": True, # load all task runs except dynamic task runs with_args("task_runs", {"where": {"map_index": {"_eq": -1}}}): { "id": True, "task": {"id": True, "slug": True}, "version": True, "serialized_state": True, }, } } } result = self.graphql(query).data.flow_run_by_pk # type: ignore if result is None: raise ClientError('Flow run ID not found: "{}"'.format(flow_run_id)) # convert scheduled_start_time from string to datetime result.scheduled_start_time = pendulum.parse(result.scheduled_start_time) # create "state" attribute from serialized_state result.state = prefect.engine.state.State.deserialize( result.pop("serialized_state") ) # reformat task_runs task_runs = [] for tr in result.task_runs: tr.state = prefect.engine.state.State.deserialize( tr.pop("serialized_state") ) task_info = tr.pop("task") tr.task_id = task_info["id"] tr.task_slug = task_info["slug"] task_runs.append(TaskRunInfoResult(**tr)) result.task_runs = task_runs result.context = ( result.context.to_dict() if result.context is not None else None ) result.parameters = ( result.parameters.to_dict() if result.parameters is not None else None ) return FlowRunInfoResult(**result) def update_flow_run_heartbeat(self, flow_run_id: str) -> None: """ Convenience method for heartbeating a flow run. Does NOT raise an error if the update fails. Args: - flow_run_id (str): the flow run ID to heartbeat """ mutation = { "mutation": { with_args( "update_flow_run_heartbeat", {"input": {"flow_run_id": flow_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def update_task_run_heartbeat(self, task_run_id: str) -> None: """ Convenience method for heartbeating a task run. Does NOT raise an error if the update fails. Args: - task_run_id (str): the task run ID to heartbeat """ mutation = { "mutation": { with_args( "update_task_run_heartbeat", {"input": {"task_run_id": task_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def get_flow_run_state(self, flow_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a flow run. Args: - flow_run_id (str): the id for this flow run Returns: - State: a Prefect State object """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "serialized_state": True, } } } flow_run = self.graphql(query).data.flow_run_by_pk return prefect.engine.state.State.deserialize(flow_run.serialized_state) def set_flow_run_state( self, flow_run_id: str, state: "prefect.engine.state.State", version: int = None, ) -> "prefect.engine.state.State": """ Sets new state for a flow run in the database. Args: - flow_run_id (str): the id of the flow run to set state for - state (State): the new state for this flow run - version (int, optional): the current version of the flow run state. This is optional but it can be supplied to enforce version-locking. Returns: - State: the state the current flow run should be considered in Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation($input: set_flow_run_states_input!)": { "set_flow_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, flow_run_id=flow_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_flow_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def get_latest_cached_states( self, task_id: str, cache_key: Optional[str], created_after: datetime.datetime ) -> List["prefect.engine.state.State"]: """ Pulls all Cached states for the given task that were created after the provided date. Args: - task_id (str): the task id for this task run - cache_key (Optional[str]): the cache key for this Task's cache; if `None`, the task id alone will be used - created_after (datetime.datetime): the earliest date the state should have been created at Returns: - List[State]: a list of Cached states created after the given date """ args = { "where": { "state": {"_eq": "Cached"}, "state_timestamp": {"_gte": created_after.isoformat()}, }, "order_by": {"state_timestamp": EnumValue("desc")}, "limit": 100, } # type: Dict[str, Any] # if a cache key was provided, match it against all tasks if cache_key is not None: args["where"].update({"cache_key": {"_eq": cache_key}}) # otherwise match against only this task, across all cache keys else: args["where"].update({"task_id": {"_eq": task_id}}) query = {"query": {with_args("task_run", args): "serialized_state"}} result = self.graphql(query) # type: Any deserializer = prefect.engine.state.State.deserialize valid_states = [ deserializer(res.serialized_state) for res in result.data.task_run ] return valid_states def get_task_run_info( self, flow_run_id: str, task_id: str, map_index: Optional[int] = None ) -> TaskRunInfoResult: """ Retrieves version and current state information for the given task run. Args: - flow_run_id (str): the id of the flow run that this task run lives in - task_id (str): the task id for this task run - map_index (int, optional): the mapping index for this task run; if `None`, it is assumed this task is _not_ mapped Returns: - NamedTuple: a tuple containing `id, task_id, version, state` Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation": { with_args( "get_or_create_task_run", { "input": { "flow_run_id": flow_run_id, "task_id": task_id, "map_index": -1 if map_index is None else map_index, } }, ): { "id": True, } } } result = self.graphql(mutation) # type: Any if result is None: raise ClientError("Failed to create task run.") task_run_id = result.data.get_or_create_task_run.id query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "version": True, "serialized_state": True, "task": {"slug": True}, } } } task_run = self.graphql(query).data.task_run_by_pk # type: ignore if task_run is None: raise ClientError('Task run ID not found: "{}"'.format(task_run_id)) state = prefect.engine.state.State.deserialize(task_run.serialized_state) return TaskRunInfoResult( id=task_run_id, task_id=task_id, task_slug=task_run.task.slug, version=task_run.version, state=state, ) def set_task_run_name(self, task_run_id: str, name: str) -> bool: """ Set the name of a task run Args: - task_run_id (str): the id of a task run - name (str): a name for this task run Returns: - bool: whether or not the task run name was updated """ mutation = { "mutation($input: set_task_run_name_input!)": { "set_task_run_name(input: $input)": { "success": True, } } } result = self.graphql( mutation, variables=dict(input=dict(task_run_id=task_run_id, name=name)) ) return result.data.set_task_run_name.success def get_task_run_state(self, task_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a task run. Args: - task_run_id (str): the id for this task run Returns: - State: a Prefect State object """ query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "serialized_state": True, } } } task_run = self.graphql(query).data.task_run_by_pk return prefect.engine.state.State.deserialize(task_run.serialized_state) def set_task_run_state( self, task_run_id: str, state: "prefect.engine.state.State", version: int = None, cache_for: datetime.timedelta = None, ) -> "prefect.engine.state.State": """ Sets new state for a task run. Args: - task_run_id (str): the id of the task run to set state for - state (State): the new state for this task run - version (int, optional): the current version of the task run state. This is optional but it can be supplied to enforce version-locking. - cache_for (timedelta, optional): how long to store the result of this task for, using the serializer set in config; if not provided, no caching occurs Raises: - ClientError: if the GraphQL mutation is bad for any reason Returns: - State: the state the current task run should be considered in """ mutation = { "mutation($input: set_task_run_states_input!)": { "set_task_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, task_run_id=task_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_task_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def set_secret(self, name: str, value: Any) -> None: """ Set a secret with the given name and value. Args: - name (str): the name of the secret; used for retrieving the secret during task runs - value (Any): the value of the secret Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the secret-setting was unsuccessful """ mutation = { "mutation($input: set_secret_input!)": { "set_secret(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(name=name, value=value)) ) # type: Any if not result.data.set_secret.success: raise ValueError("Setting secret failed.") def get_task_tag_limit(self, tag: str) -> Optional[int]: """ Retrieve the current task tag concurrency limit for a given tag. Args: - tag (str): the tag to update Raises: - ClientError: if the GraphQL query fails """ query = { "query": { with_args("task_tag_limit", {"where": {"tag": {"_eq": tag}}}): { "limit": True } } } result = self.graphql(query) # type: Any if result.data.task_tag_limit: return result.data.task_tag_limit[0].limit else: return None def update_task_tag_limit(self, tag: str, limit: int) -> None: """ Update the task tag concurrency limit for a given tag; requires tenant admin permissions. Args: - tag (str): the tag to update - limit (int): the concurrency limit to enforce on the tag; should be a value >= 0 Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag limit-setting was unsuccessful, or if a bad limit was provided """ if limit < 0: raise ValueError("Concurrency limits must be >= 0") mutation = { "mutation($input: update_task_tag_limit_input!)": { "update_task_tag_limit(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict(input=dict(tag=tag, limit=limit)) ) # type: Any if not result.data.update_task_tag_limit.id: raise ValueError("Updating the task tag concurrency limit failed.") def delete_task_tag_limit(self, limit_id: str) -> None: """ Deletes a given task tag concurrency limit; requires tenant admin permissions. Args: - limit_id (str): the ID of the tag to delete Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag deletion was unsuccessful, or if a bad tag ID was provided """ mutation = { "mutation($input: delete_task_tag_limit_input!)": { "delete_task_tag_limit(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(limit_id=limit_id)) ) # type: Any if not result.data.delete_task_tag_limit.success: raise ValueError("Deleting the task tag concurrency limit failed.") # MASKED: write_run_logs function (lines 1466-1487) def register_agent( self, agent_type: str, name: str = None, labels: List[str] = None, agent_config_id: str = None, ) -> str: """ Register an agent with a backend API Args: - agent_type (str): The type of agent being registered - name: (str, optional): The name of the agent being registered - labels (List[str], optional): A list of any present labels on the agent being registered - agent_config_id (str, optional): The ID of an agent configuration to register with Returns: - The agent ID as a string """ mutation = { "mutation($input: register_agent_input!)": { "register_agent(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict( input=dict( type=agent_type, name=name, labels=labels or [], tenant_id=self._active_tenant_id, agent_config_id=agent_config_id, ) ), ) if not result.data.register_agent.id: raise ValueError("Error registering agent") return result.data.register_agent.id def get_agent_config(self, agent_config_id: str) -> dict: """ Get agent config settings Args: - agent_config_id (str): The ID of an agent configuration to retrieve Returns: - dict: the agent configuration's `settings` """ query = { "query": { with_args( "agent_config", {"where": {"id": {"_eq": agent_config_id}}} ): {"settings": True} } } result = self.graphql(query) # type: Any return result.data.agent_config[0].settings
def write_run_logs(self, logs: List[Dict]) -> None: """ Uploads a collection of logs to Cloud. Args: - logs (List[Dict]): a list of log entries to add Raises: - ValueError: if uploading the logs fail """ mutation = { "mutation($input: write_run_logs_input!)": { "write_run_logs(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(logs=logs)) ) # type: Any if not result.data.write_run_logs.success: raise ValueError("Writing logs failed.")
1,466
1,487
import datetime import json import os import re import time import uuid import warnings from pathlib import Path from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Union from urllib.parse import urljoin # if simplejson is installed, `requests` defaults to using it instead of json # this allows the client to gracefully handle either json or simplejson try: from simplejson.errors import JSONDecodeError except ImportError: from json.decoder import JSONDecodeError import pendulum import toml from slugify import slugify import prefect from prefect.utilities.exceptions import ( AuthorizationError, ClientError, VersionLockError, ) from prefect.utilities.graphql import ( EnumValue, GraphQLResult, compress, parse_graphql, with_args, ) from prefect.utilities.logging import create_diagnostic_logger if TYPE_CHECKING: from prefect.core import Flow import requests JSONLike = Union[bool, dict, list, str, int, float, None] # type definitions for GraphQL results TaskRunInfoResult = NamedTuple( "TaskRunInfoResult", [ ("id", str), ("task_id", str), ("task_slug", str), ("version", int), ("state", "prefect.engine.state.State"), ], ) FlowRunInfoResult = NamedTuple( "FlowRunInfoResult", [ ("id", str), ("name", str), ("flow_id", str), ("parameters", Dict[str, Any]), ("context", Dict[str, Any]), ("version", int), ("scheduled_start_time", datetime.datetime), ("state", "prefect.engine.state.State"), ("task_runs", List[TaskRunInfoResult]), ], ) class Client: """ Client for communication with Prefect Cloud If the arguments aren't specified the client initialization first checks the prefect configuration and if the server is not set there it checks the current context. The token will only be present in the current context. Args: - api_server (str, optional): the URL to send all GraphQL requests to; if not provided, will be pulled from `cloud.graphql` config var - api_token (str, optional): a Prefect Cloud API token, taken from `config.cloud.auth_token` if not provided. If this token is USER-scoped, it may be used to log in to any tenant that the user is a member of. In that case, ephemeral JWTs will be loaded as necessary. Otherwise, the API token itself will be used as authorization. """ def __init__(self, api_server: str = None, api_token: str = None): self._access_token = None self._refresh_token = None self._access_token_expires_at = pendulum.now() self._active_tenant_id = None self._attached_headers = {} # type: Dict[str, str] self.logger = create_diagnostic_logger("Diagnostics") # store api server self.api_server = api_server or prefect.context.config.cloud.get("graphql") # store api token self._api_token = api_token or prefect.context.config.cloud.get( "auth_token", None ) if prefect.config.backend == "cloud": if not self._api_token: # if no api token was passed, attempt to load state from local storage settings = self._load_local_settings() self._api_token = settings.get("api_token") if self._api_token: self._active_tenant_id = settings.get("active_tenant_id") if self._active_tenant_id: try: self.login_to_tenant(tenant_id=self._active_tenant_id) except AuthorizationError: # if an authorization error is raised, then the token is invalid and should # be cleared self.logout_from_tenant() else: # TODO: Separate put this functionality and clean up initial tenant access handling if not self._active_tenant_id: tenant_info = self.graphql({"query": {"tenant": {"id"}}}) if tenant_info.data.tenant: self._active_tenant_id = tenant_info.data.tenant[0].id def create_tenant(self, name: str, slug: str = None) -> str: """ Creates a new tenant. Note this route only works when run against Prefect Server. Args: - name (str): the name of the tenant to create - slug (str, optional): the slug of the tenant to create; defaults to name Returns: - str: the ID of the newly created tenant, or the ID of the currently active tenant Raises: - ValueError: if run against Prefect Cloud """ if prefect.config.backend != "server": msg = "To create a tenant with Prefect Cloud, please signup at https://cloud.prefect.io/" raise ValueError(msg) if slug is None: slug = slugify(name) tenant_info = self.graphql( { "mutation($input: create_tenant_input!)": { "create_tenant(input: $input)": {"id"} } }, variables=dict(input=dict(name=name, slug=slug)), ) return tenant_info.data.create_tenant.id # ------------------------------------------------------------------------- # Utilities def get( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and GET request Args: - path (str): the path of the API url. For example, to GET http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the GET request to; defaults to `self.api_server` - headers (dict, optional): Headers to pass with the request - params (dict): GET parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="GET", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def post( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and POST request Args: - path (str): the path of the API url. For example, to POST http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the POST request to; defaults to `self.api_server` - headers(dict): headers to pass with the request - params (dict): POST parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="POST", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def graphql( self, query: Any, raise_on_error: bool = True, headers: Dict[str, str] = None, variables: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> GraphQLResult: """ Convenience function for running queries against the Prefect GraphQL API Args: - query (Any): A representation of a graphql query to be executed. It will be parsed by prefect.utilities.graphql.parse_graphql(). - raise_on_error (bool): if True, a `ClientError` will be raised if the GraphQL returns any `errors`. - headers (dict): any additional headers that should be passed as part of the request - variables (dict): Variables to be filled into a query with the key being equivalent to the variables that are accepted by the query - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Data returned from the GraphQL query Raises: - ClientError if there are errors raised by the GraphQL mutation """ result = self.post( path="", server=self.api_server, headers=headers, params=dict(query=parse_graphql(query), variables=json.dumps(variables)), token=token, retry_on_api_error=retry_on_api_error, ) if raise_on_error and "errors" in result: if "UNAUTHENTICATED" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif "Malformed Authorization header" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif ( result["errors"][0].get("extensions", {}).get("code") == "VERSION_LOCKING_ERROR" ): raise VersionLockError(result["errors"]) raise ClientError(result["errors"]) else: return GraphQLResult(result) # type: ignore def _send_request( self, session: "requests.Session", method: str, url: str, params: Dict[str, JSONLike] = None, headers: dict = None, ) -> "requests.models.Response": if prefect.context.config.cloud.get("diagnostics") is True: self.logger.debug(f"Preparing request to {url}") clean_headers = { head: re.sub("Bearer .*", "Bearer XXXX", val) for head, val in headers.items() # type: ignore } self.logger.debug(f"Headers: {clean_headers}") self.logger.debug(f"Request: {params}") start_time = time.time() if method == "GET": response = session.get(url, headers=headers, params=params, timeout=30) elif method == "POST": response = session.post(url, headers=headers, json=params, timeout=30) elif method == "DELETE": response = session.delete(url, headers=headers, timeout=30) else: raise ValueError("Invalid method: {}".format(method)) if prefect.context.config.cloud.get("diagnostics") is True: end_time = time.time() self.logger.debug(f"Response: {response.json()}") self.logger.debug( f"Request duration: {round(end_time - start_time, 4)} seconds" ) # Check if request returned a successful status response.raise_for_status() return response def _request( self, method: str, path: str, params: Dict[str, JSONLike] = None, server: str = None, headers: dict = None, token: str = None, retry_on_api_error: bool = True, ) -> "requests.models.Response": """ Runs any specified request (GET, POST, DELETE) against the server Args: - method (str): The type of request to be made (GET, POST, DELETE) - path (str): Path of the API URL - params (dict, optional): Parameters used for the request - server (str, optional): The server to make requests against, base API server is used if not specified - headers (dict, optional): Headers to pass with the request - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - requests.models.Response: The response returned from the request Raises: - ClientError: if the client token is not in the context (due to not being logged in) - ValueError: if a method is specified outside of the accepted GET, POST, DELETE - requests.HTTPError: if a status code is returned that is not `200` or `401` """ if server is None: server = self.api_server assert isinstance(server, str) # mypy assert if token is None: token = self.get_auth_token() # 'import requests' is expensive time-wise, we should do this just-in-time to keep # the 'import prefect' time low import requests url = urljoin(server, path.lstrip("/")).rstrip("/") params = params or {} headers = headers or {} if token: headers["Authorization"] = "Bearer {}".format(token) headers["X-PREFECT-CORE-VERSION"] = str(prefect.__version__) if self._attached_headers: headers.update(self._attached_headers) session = requests.Session() retry_total = 6 if prefect.config.backend == "cloud" else 1 retries = requests.packages.urllib3.util.retry.Retry( total=retry_total, backoff_factor=1, status_forcelist=[500, 502, 503, 504], method_whitelist=["DELETE", "GET", "POST"], ) session.mount("https://", requests.adapters.HTTPAdapter(max_retries=retries)) response = self._send_request( session=session, method=method, url=url, params=params, headers=headers ) # parse the response try: json_resp = response.json() except JSONDecodeError as exc: if prefect.config.backend == "cloud" and "Authorization" not in headers: raise ClientError( "Malformed response received from Cloud - please ensure that you " "have an API token properly configured." ) from exc else: raise ClientError("Malformed response received from API.") from exc # check if there was an API_ERROR code in the response if "API_ERROR" in str(json_resp.get("errors")) and retry_on_api_error: success, retry_count = False, 0 # retry up to six times while success is False and retry_count < 6: response = self._send_request( session=session, method=method, url=url, params=params, headers=headers, ) if "API_ERROR" in str(response.json().get("errors")): retry_count += 1 time.sleep(0.25 * (2 ** (retry_count - 1))) else: success = True return response def attach_headers(self, headers: dict) -> None: """ Set headers to be attached to this Client Args: - headers (dict): A dictionary of headers to attach to this client. These headers get added on to the existing dictionary of headers. """ self._attached_headers.update(headers) # ------------------------------------------------------------------------- # Auth # ------------------------------------------------------------------------- @property def _local_settings_path(self) -> Path: """ Returns the local settings directory corresponding to the current API servers """ path = "{home}/client/{server}".format( home=prefect.context.config.home_dir, server=slugify(self.api_server, regex_pattern=r"[^-\.a-z0-9]+"), ) return Path(os.path.expanduser(path)) / "settings.toml" def _save_local_settings(self, settings: dict) -> None: """ Writes settings to local storage """ self._local_settings_path.parent.mkdir(exist_ok=True, parents=True) with self._local_settings_path.open("w+") as f: toml.dump(settings, f) def _load_local_settings(self) -> dict: """ Loads settings from local storage """ if self._local_settings_path.exists(): with self._local_settings_path.open("r") as f: return toml.load(f) # type: ignore return {} def save_api_token(self) -> None: """ Saves the API token in local storage. """ settings = self._load_local_settings() settings["api_token"] = self._api_token self._save_local_settings(settings) def get_auth_token(self) -> str: """ Returns an auth token: - if no explicit access token is stored, returns the api token - if there is an access token: - if there's a refresh token and the access token expires in the next 30 seconds, then we refresh the access token and store the result - return the access token Returns: - str: the access token """ if not self._access_token: return self._api_token expiration = self._access_token_expires_at or pendulum.now() if self._refresh_token and pendulum.now().add(seconds=30) > expiration: self._refresh_access_token() return self._access_token def get_available_tenants(self) -> List[Dict]: """ Returns a list of available tenants. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - List[Dict]: a list of dictionaries containing the id, slug, and name of available tenants """ result = self.graphql( {"query": {"tenant(order_by: {slug: asc})": {"id", "slug", "name"}}}, # use the API token to see all available tenants token=self._api_token, ) # type: ignore return result.data.tenant # type: ignore def login_to_tenant(self, tenant_slug: str = None, tenant_id: str = None) -> bool: """ Log in to a specific tenant NOTE: this should only be called by users who have provided a USER-scoped API token. Args: - tenant_slug (str): the tenant's slug - tenant_id (str): the tenant's id Returns: - bool: True if the login was successful Raises: - ValueError: if at least one of `tenant_slug` or `tenant_id` isn't provided - ValueError: if the `tenant_id` is not a valid UUID - ValueError: if no matching tenants are found """ if tenant_slug is None and tenant_id is None: raise ValueError( "At least one of `tenant_slug` or `tenant_id` must be provided." ) elif tenant_id: try: uuid.UUID(tenant_id) except ValueError as exc: raise ValueError("The `tenant_id` must be a valid UUID.") from exc tenant = self.graphql( { "query($slug: String, $id: uuid)": { "tenant(where: {slug: { _eq: $slug }, id: { _eq: $id } })": {"id"} } }, variables=dict(slug=tenant_slug, id=tenant_id), # use the API token to query the tenant token=self._api_token, ) # type: ignore if not tenant.data.tenant: # type: ignore raise ValueError("No matching tenants found.") tenant_id = tenant.data.tenant[0].id # type: ignore if prefect.config.backend == "cloud": payload = self.graphql( { "mutation($input: switch_tenant_input!)": { "switch_tenant(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(tenant_id=tenant_id)), # Use the API token to switch tenants token=self._api_token, ) # type: ignore self._access_token = payload.data.switch_tenant.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.switch_tenant.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.switch_tenant.refresh_token # type: ignore self._active_tenant_id = tenant_id # save the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = self._active_tenant_id self._save_local_settings(settings) return True def logout_from_tenant(self) -> None: self._access_token = None self._refresh_token = None self._active_tenant_id = None # remove the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = None self._save_local_settings(settings) def _refresh_access_token(self) -> bool: """ Refresh the client's JWT access token. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - bool: True if the refresh succeeds """ payload = self.graphql( { "mutation($input: refresh_token_input!)": { "refresh_token(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(access_token=self._access_token)), # pass the refresh token as the auth header token=self._refresh_token, ) # type: ignore self._access_token = payload.data.refresh_token.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.refresh_token.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.refresh_token.refresh_token # type: ignore return True # ------------------------------------------------------------------------- # Actions # ------------------------------------------------------------------------- def register( self, flow: "Flow", project_name: str = None, build: bool = True, set_schedule_active: bool = True, version_group_id: str = None, compressed: bool = True, no_url: bool = False, ) -> str: """ Push a new flow to Prefect Cloud Args: - flow (Flow): a flow to register - project_name (str, optional): the project that should contain this flow. - build (bool, optional): if `True`, the flow's environment is built prior to serialization; defaults to `True` - set_schedule_active (bool, optional): if `False`, will set the schedule to inactive in the database to prevent auto-scheduling runs (if the Flow has a schedule). Defaults to `True`. This can be changed later. - version_group_id (str, optional): the UUID version group ID to use for versioning this Flow in Cloud; if not provided, the version group ID associated with this Flow's project and name will be used. - compressed (bool, optional): if `True`, the serialized flow will be; defaults to `True` compressed - no_url (bool, optional): if `True`, the stdout from this function will not contain the URL link to the newly-registered flow in the Cloud UI Returns: - str: the ID of the newly-registered flow Raises: - ClientError: if the register failed """ required_parameters = {p for p in flow.parameters() if p.required} if flow.schedule is not None and required_parameters: required_names = {p.name for p in required_parameters} if not all( [ required_names <= set(c.parameter_defaults.keys()) for c in flow.schedule.clocks ] ): raise ClientError( "Flows with required parameters can not be scheduled automatically." ) if any(e.key for e in flow.edges) and flow.result is None: warnings.warn( "No result handler was specified on your Flow. Cloud features such as " "input caching and resuming task runs from failure may not work properly.", stacklevel=2, ) if compressed: create_mutation = { "mutation($input: create_flow_from_compressed_string_input!)": { "create_flow_from_compressed_string(input: $input)": {"id"} } } else: create_mutation = { "mutation($input: create_flow_input!)": { "create_flow(input: $input)": {"id"} } } project = None if project_name is None: raise TypeError( "'project_name' is a required field when registering a flow." ) query_project = { "query": { with_args("project", {"where": {"name": {"_eq": project_name}}}): { "id": True } } } project = self.graphql(query_project).data.project # type: ignore if not project: raise ValueError( "Project {} not found. Run `prefect create project '{}'` to create it.".format( project_name, project_name ) ) serialized_flow = flow.serialize(build=build) # type: Any # Set Docker storage image in environment metadata if provided if isinstance(flow.storage, prefect.environments.storage.Docker): flow.environment.metadata["image"] = flow.storage.name serialized_flow = flow.serialize(build=False) # If no image ever set, default metadata to all_extras image on current version if not flow.environment.metadata.get("image"): version = prefect.__version__.split("+")[0] flow.environment.metadata[ "image" ] = f"prefecthq/prefect:all_extras-{version}" serialized_flow = flow.serialize(build=False) # verify that the serialized flow can be deserialized try: prefect.serialization.flow.FlowSchema().load(serialized_flow) except Exception as exc: raise ValueError( "Flow could not be deserialized successfully. Error was: {}".format( repr(exc) ) ) from exc if compressed: serialized_flow = compress(serialized_flow) res = self.graphql( create_mutation, variables=dict( input=dict( project_id=(project[0].id if project else None), serialized_flow=serialized_flow, set_schedule_active=set_schedule_active, version_group_id=version_group_id, ) ), retry_on_api_error=False, ) # type: Any flow_id = ( res.data.create_flow_from_compressed_string.id if compressed else res.data.create_flow.id ) if not no_url: # Generate direct link to Cloud flow flow_url = self.get_cloud_url("flow", flow_id) prefix = "└── " print("Flow URL: {}".format(flow_url)) # Extra information to improve visibility msg = ( f" {prefix}ID: {flow_id}\n" f" {prefix}Project: {project_name}\n" f" {prefix}Labels: {list(flow.environment.labels)}" ) print(msg) return flow_id def get_cloud_url(self, subdirectory: str, id: str, as_user: bool = True) -> str: """ Convenience method for creating Prefect Cloud URLs for a given subdirectory. Args: - subdirectory (str): the subdirectory to use (e.g., `"flow-run"`) - id (str): the ID of the page - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the URL corresponding to the appropriate base URL, tenant slug, subdirectory and ID Example: ```python from prefect import Client client = Client() client.get_cloud_url("flow-run", "424242-ca-94611-111-55") # returns "https://cloud.prefect.io/my-tenant-slug/flow-run/424242-ca-94611-111-55" ``` """ # Generate direct link to UI if prefect.config.backend == "cloud": tenant_slug = self.get_default_tenant_slug(as_user=as_user) else: tenant_slug = "" base_url = ( re.sub("api-", "", prefect.config.cloud.api) if re.search("api-", prefect.config.cloud.api) else re.sub("api", "cloud", prefect.config.cloud.api) ) full_url = prefect.config.cloud.api if tenant_slug: full_url = "/".join([base_url.rstrip("/"), tenant_slug, subdirectory, id]) elif prefect.config.backend == "server": full_url = "/".join([prefect.config.server.ui.endpoint, subdirectory, id]) return full_url def get_default_tenant_slug(self, as_user: bool = True) -> str: """ Get the default tenant slug for the currently authenticated user Args: - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the slug of the current default tenant for this user """ if as_user: query = { "query": {"user": {"default_membership": {"tenant": "slug"}}} } # type: dict else: query = {"query": {"tenant": {"slug"}}} res = self.graphql(query) if as_user: user = res.get("data").user[0] slug = user.default_membership.tenant.slug else: slug = res.get("data").tenant[0].slug return slug def create_project(self, project_name: str, project_description: str = None) -> str: """ Create a new Project Args: - project_name (str): the project that should contain this flow - project_description (str, optional): the project description Returns: - str: the ID of the newly-created project Raises: - ClientError: if the project creation failed """ project_mutation = { "mutation($input: create_project_input!)": { "create_project(input: $input)": {"id"} } } res = self.graphql( project_mutation, variables=dict( input=dict( name=project_name, description=project_description, tenant_id=self._active_tenant_id, ) ), ) # type: Any return res.data.create_project.id def create_flow_run( self, flow_id: str = None, context: dict = None, parameters: dict = None, scheduled_start_time: datetime.datetime = None, idempotency_key: str = None, run_name: str = None, version_group_id: str = None, ) -> str: """ Create a new flow run for the given flow id. If `start_time` is not provided, the flow run will be scheduled to start immediately. If both `flow_id` and `version_group_id` are provided, only the `flow_id` will be used. Args: - flow_id (str, optional): the id of the Flow you wish to schedule - context (dict, optional): the run context - parameters (dict, optional): a dictionary of parameter values to pass to the flow run - scheduled_start_time (datetime, optional): the time to schedule the execution for; if not provided, defaults to now - idempotency_key (str, optional): an idempotency key; if provided, this run will be cached for 24 hours. Any subsequent attempts to create a run with the same idempotency key will return the ID of the originally created run (no new run will be created after the first). An error will be raised if parameters or context are provided and don't match the original. Each subsequent request will reset the TTL for 24 hours. - run_name (str, optional): The name assigned to this flow run - version_group_id (str, optional): if provided, the unique unarchived flow within this version group will be scheduled to run. This input can be used as a stable API for running flows which are regularly updated. Returns: - str: the ID of the newly-created flow run Raises: - ClientError: if the GraphQL query is bad for any reason """ create_mutation = { "mutation($input: create_flow_run_input!)": { "create_flow_run(input: $input)": {"id": True} } } if not flow_id and not version_group_id: raise ValueError("One of flow_id or version_group_id must be provided") if flow_id: inputs = dict(flow_id=flow_id) else: inputs = dict(version_group_id=version_group_id) # type: ignore if parameters is not None: inputs.update(parameters=parameters) # type: ignore if context is not None: inputs.update(context=context) # type: ignore if idempotency_key is not None: inputs.update(idempotency_key=idempotency_key) # type: ignore if scheduled_start_time is not None: inputs.update( scheduled_start_time=scheduled_start_time.isoformat() ) # type: ignore if run_name is not None: inputs.update(flow_run_name=run_name) # type: ignore res = self.graphql(create_mutation, variables=dict(input=inputs)) return res.data.create_flow_run.id # type: ignore def get_flow_run_info(self, flow_run_id: str) -> FlowRunInfoResult: """ Retrieves version and current state information for the given flow run. Args: - flow_run_id (str): the id of the flow run to get information for Returns: - GraphQLResult: an object representing information about the flow run Raises: - ClientError: if the GraphQL mutation is bad for any reason """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "id": True, "name": True, "flow_id": True, "parameters": True, "context": True, "version": True, "scheduled_start_time": True, "serialized_state": True, # load all task runs except dynamic task runs with_args("task_runs", {"where": {"map_index": {"_eq": -1}}}): { "id": True, "task": {"id": True, "slug": True}, "version": True, "serialized_state": True, }, } } } result = self.graphql(query).data.flow_run_by_pk # type: ignore if result is None: raise ClientError('Flow run ID not found: "{}"'.format(flow_run_id)) # convert scheduled_start_time from string to datetime result.scheduled_start_time = pendulum.parse(result.scheduled_start_time) # create "state" attribute from serialized_state result.state = prefect.engine.state.State.deserialize( result.pop("serialized_state") ) # reformat task_runs task_runs = [] for tr in result.task_runs: tr.state = prefect.engine.state.State.deserialize( tr.pop("serialized_state") ) task_info = tr.pop("task") tr.task_id = task_info["id"] tr.task_slug = task_info["slug"] task_runs.append(TaskRunInfoResult(**tr)) result.task_runs = task_runs result.context = ( result.context.to_dict() if result.context is not None else None ) result.parameters = ( result.parameters.to_dict() if result.parameters is not None else None ) return FlowRunInfoResult(**result) def update_flow_run_heartbeat(self, flow_run_id: str) -> None: """ Convenience method for heartbeating a flow run. Does NOT raise an error if the update fails. Args: - flow_run_id (str): the flow run ID to heartbeat """ mutation = { "mutation": { with_args( "update_flow_run_heartbeat", {"input": {"flow_run_id": flow_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def update_task_run_heartbeat(self, task_run_id: str) -> None: """ Convenience method for heartbeating a task run. Does NOT raise an error if the update fails. Args: - task_run_id (str): the task run ID to heartbeat """ mutation = { "mutation": { with_args( "update_task_run_heartbeat", {"input": {"task_run_id": task_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def get_flow_run_state(self, flow_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a flow run. Args: - flow_run_id (str): the id for this flow run Returns: - State: a Prefect State object """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "serialized_state": True, } } } flow_run = self.graphql(query).data.flow_run_by_pk return prefect.engine.state.State.deserialize(flow_run.serialized_state) def set_flow_run_state( self, flow_run_id: str, state: "prefect.engine.state.State", version: int = None, ) -> "prefect.engine.state.State": """ Sets new state for a flow run in the database. Args: - flow_run_id (str): the id of the flow run to set state for - state (State): the new state for this flow run - version (int, optional): the current version of the flow run state. This is optional but it can be supplied to enforce version-locking. Returns: - State: the state the current flow run should be considered in Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation($input: set_flow_run_states_input!)": { "set_flow_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, flow_run_id=flow_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_flow_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def get_latest_cached_states( self, task_id: str, cache_key: Optional[str], created_after: datetime.datetime ) -> List["prefect.engine.state.State"]: """ Pulls all Cached states for the given task that were created after the provided date. Args: - task_id (str): the task id for this task run - cache_key (Optional[str]): the cache key for this Task's cache; if `None`, the task id alone will be used - created_after (datetime.datetime): the earliest date the state should have been created at Returns: - List[State]: a list of Cached states created after the given date """ args = { "where": { "state": {"_eq": "Cached"}, "state_timestamp": {"_gte": created_after.isoformat()}, }, "order_by": {"state_timestamp": EnumValue("desc")}, "limit": 100, } # type: Dict[str, Any] # if a cache key was provided, match it against all tasks if cache_key is not None: args["where"].update({"cache_key": {"_eq": cache_key}}) # otherwise match against only this task, across all cache keys else: args["where"].update({"task_id": {"_eq": task_id}}) query = {"query": {with_args("task_run", args): "serialized_state"}} result = self.graphql(query) # type: Any deserializer = prefect.engine.state.State.deserialize valid_states = [ deserializer(res.serialized_state) for res in result.data.task_run ] return valid_states def get_task_run_info( self, flow_run_id: str, task_id: str, map_index: Optional[int] = None ) -> TaskRunInfoResult: """ Retrieves version and current state information for the given task run. Args: - flow_run_id (str): the id of the flow run that this task run lives in - task_id (str): the task id for this task run - map_index (int, optional): the mapping index for this task run; if `None`, it is assumed this task is _not_ mapped Returns: - NamedTuple: a tuple containing `id, task_id, version, state` Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation": { with_args( "get_or_create_task_run", { "input": { "flow_run_id": flow_run_id, "task_id": task_id, "map_index": -1 if map_index is None else map_index, } }, ): { "id": True, } } } result = self.graphql(mutation) # type: Any if result is None: raise ClientError("Failed to create task run.") task_run_id = result.data.get_or_create_task_run.id query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "version": True, "serialized_state": True, "task": {"slug": True}, } } } task_run = self.graphql(query).data.task_run_by_pk # type: ignore if task_run is None: raise ClientError('Task run ID not found: "{}"'.format(task_run_id)) state = prefect.engine.state.State.deserialize(task_run.serialized_state) return TaskRunInfoResult( id=task_run_id, task_id=task_id, task_slug=task_run.task.slug, version=task_run.version, state=state, ) def set_task_run_name(self, task_run_id: str, name: str) -> bool: """ Set the name of a task run Args: - task_run_id (str): the id of a task run - name (str): a name for this task run Returns: - bool: whether or not the task run name was updated """ mutation = { "mutation($input: set_task_run_name_input!)": { "set_task_run_name(input: $input)": { "success": True, } } } result = self.graphql( mutation, variables=dict(input=dict(task_run_id=task_run_id, name=name)) ) return result.data.set_task_run_name.success def get_task_run_state(self, task_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a task run. Args: - task_run_id (str): the id for this task run Returns: - State: a Prefect State object """ query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "serialized_state": True, } } } task_run = self.graphql(query).data.task_run_by_pk return prefect.engine.state.State.deserialize(task_run.serialized_state) def set_task_run_state( self, task_run_id: str, state: "prefect.engine.state.State", version: int = None, cache_for: datetime.timedelta = None, ) -> "prefect.engine.state.State": """ Sets new state for a task run. Args: - task_run_id (str): the id of the task run to set state for - state (State): the new state for this task run - version (int, optional): the current version of the task run state. This is optional but it can be supplied to enforce version-locking. - cache_for (timedelta, optional): how long to store the result of this task for, using the serializer set in config; if not provided, no caching occurs Raises: - ClientError: if the GraphQL mutation is bad for any reason Returns: - State: the state the current task run should be considered in """ mutation = { "mutation($input: set_task_run_states_input!)": { "set_task_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, task_run_id=task_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_task_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def set_secret(self, name: str, value: Any) -> None: """ Set a secret with the given name and value. Args: - name (str): the name of the secret; used for retrieving the secret during task runs - value (Any): the value of the secret Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the secret-setting was unsuccessful """ mutation = { "mutation($input: set_secret_input!)": { "set_secret(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(name=name, value=value)) ) # type: Any if not result.data.set_secret.success: raise ValueError("Setting secret failed.") def get_task_tag_limit(self, tag: str) -> Optional[int]: """ Retrieve the current task tag concurrency limit for a given tag. Args: - tag (str): the tag to update Raises: - ClientError: if the GraphQL query fails """ query = { "query": { with_args("task_tag_limit", {"where": {"tag": {"_eq": tag}}}): { "limit": True } } } result = self.graphql(query) # type: Any if result.data.task_tag_limit: return result.data.task_tag_limit[0].limit else: return None def update_task_tag_limit(self, tag: str, limit: int) -> None: """ Update the task tag concurrency limit for a given tag; requires tenant admin permissions. Args: - tag (str): the tag to update - limit (int): the concurrency limit to enforce on the tag; should be a value >= 0 Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag limit-setting was unsuccessful, or if a bad limit was provided """ if limit < 0: raise ValueError("Concurrency limits must be >= 0") mutation = { "mutation($input: update_task_tag_limit_input!)": { "update_task_tag_limit(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict(input=dict(tag=tag, limit=limit)) ) # type: Any if not result.data.update_task_tag_limit.id: raise ValueError("Updating the task tag concurrency limit failed.") def delete_task_tag_limit(self, limit_id: str) -> None: """ Deletes a given task tag concurrency limit; requires tenant admin permissions. Args: - limit_id (str): the ID of the tag to delete Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag deletion was unsuccessful, or if a bad tag ID was provided """ mutation = { "mutation($input: delete_task_tag_limit_input!)": { "delete_task_tag_limit(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(limit_id=limit_id)) ) # type: Any if not result.data.delete_task_tag_limit.success: raise ValueError("Deleting the task tag concurrency limit failed.") def write_run_logs(self, logs: List[Dict]) -> None: """ Uploads a collection of logs to Cloud. Args: - logs (List[Dict]): a list of log entries to add Raises: - ValueError: if uploading the logs fail """ mutation = { "mutation($input: write_run_logs_input!)": { "write_run_logs(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(logs=logs)) ) # type: Any if not result.data.write_run_logs.success: raise ValueError("Writing logs failed.") def register_agent( self, agent_type: str, name: str = None, labels: List[str] = None, agent_config_id: str = None, ) -> str: """ Register an agent with a backend API Args: - agent_type (str): The type of agent being registered - name: (str, optional): The name of the agent being registered - labels (List[str], optional): A list of any present labels on the agent being registered - agent_config_id (str, optional): The ID of an agent configuration to register with Returns: - The agent ID as a string """ mutation = { "mutation($input: register_agent_input!)": { "register_agent(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict( input=dict( type=agent_type, name=name, labels=labels or [], tenant_id=self._active_tenant_id, agent_config_id=agent_config_id, ) ), ) if not result.data.register_agent.id: raise ValueError("Error registering agent") return result.data.register_agent.id def get_agent_config(self, agent_config_id: str) -> dict: """ Get agent config settings Args: - agent_config_id (str): The ID of an agent configuration to retrieve Returns: - dict: the agent configuration's `settings` """ query = { "query": { with_args( "agent_config", {"where": {"id": {"_eq": agent_config_id}}} ): {"settings": True} } } result = self.graphql(query) # type: Any return result.data.agent_config[0].settings
register_agent
Register an agent with a backend API Args: - agent_type (str): The type of agent being registered - name: (str, optional): The name of the agent being registered - labels (List[str], optional): A list of any present labels on the agent being registered - agent_config_id (str, optional): The ID of an agent configuration to register with Returns: - The agent ID as a string
import datetime import json import os import re import time import uuid import warnings from pathlib import Path from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Union from urllib.parse import urljoin # if simplejson is installed, `requests` defaults to using it instead of json # this allows the client to gracefully handle either json or simplejson try: from simplejson.errors import JSONDecodeError except ImportError: from json.decoder import JSONDecodeError import pendulum import toml from slugify import slugify import prefect from prefect.utilities.exceptions import ( AuthorizationError, ClientError, VersionLockError, ) from prefect.utilities.graphql import ( EnumValue, GraphQLResult, compress, parse_graphql, with_args, ) from prefect.utilities.logging import create_diagnostic_logger if TYPE_CHECKING: from prefect.core import Flow import requests JSONLike = Union[bool, dict, list, str, int, float, None] # type definitions for GraphQL results TaskRunInfoResult = NamedTuple( "TaskRunInfoResult", [ ("id", str), ("task_id", str), ("task_slug", str), ("version", int), ("state", "prefect.engine.state.State"), ], ) FlowRunInfoResult = NamedTuple( "FlowRunInfoResult", [ ("id", str), ("name", str), ("flow_id", str), ("parameters", Dict[str, Any]), ("context", Dict[str, Any]), ("version", int), ("scheduled_start_time", datetime.datetime), ("state", "prefect.engine.state.State"), ("task_runs", List[TaskRunInfoResult]), ], ) class Client: """ Client for communication with Prefect Cloud If the arguments aren't specified the client initialization first checks the prefect configuration and if the server is not set there it checks the current context. The token will only be present in the current context. Args: - api_server (str, optional): the URL to send all GraphQL requests to; if not provided, will be pulled from `cloud.graphql` config var - api_token (str, optional): a Prefect Cloud API token, taken from `config.cloud.auth_token` if not provided. If this token is USER-scoped, it may be used to log in to any tenant that the user is a member of. In that case, ephemeral JWTs will be loaded as necessary. Otherwise, the API token itself will be used as authorization. """ def __init__(self, api_server: str = None, api_token: str = None): self._access_token = None self._refresh_token = None self._access_token_expires_at = pendulum.now() self._active_tenant_id = None self._attached_headers = {} # type: Dict[str, str] self.logger = create_diagnostic_logger("Diagnostics") # store api server self.api_server = api_server or prefect.context.config.cloud.get("graphql") # store api token self._api_token = api_token or prefect.context.config.cloud.get( "auth_token", None ) if prefect.config.backend == "cloud": if not self._api_token: # if no api token was passed, attempt to load state from local storage settings = self._load_local_settings() self._api_token = settings.get("api_token") if self._api_token: self._active_tenant_id = settings.get("active_tenant_id") if self._active_tenant_id: try: self.login_to_tenant(tenant_id=self._active_tenant_id) except AuthorizationError: # if an authorization error is raised, then the token is invalid and should # be cleared self.logout_from_tenant() else: # TODO: Separate put this functionality and clean up initial tenant access handling if not self._active_tenant_id: tenant_info = self.graphql({"query": {"tenant": {"id"}}}) if tenant_info.data.tenant: self._active_tenant_id = tenant_info.data.tenant[0].id def create_tenant(self, name: str, slug: str = None) -> str: """ Creates a new tenant. Note this route only works when run against Prefect Server. Args: - name (str): the name of the tenant to create - slug (str, optional): the slug of the tenant to create; defaults to name Returns: - str: the ID of the newly created tenant, or the ID of the currently active tenant Raises: - ValueError: if run against Prefect Cloud """ if prefect.config.backend != "server": msg = "To create a tenant with Prefect Cloud, please signup at https://cloud.prefect.io/" raise ValueError(msg) if slug is None: slug = slugify(name) tenant_info = self.graphql( { "mutation($input: create_tenant_input!)": { "create_tenant(input: $input)": {"id"} } }, variables=dict(input=dict(name=name, slug=slug)), ) return tenant_info.data.create_tenant.id # ------------------------------------------------------------------------- # Utilities def get( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and GET request Args: - path (str): the path of the API url. For example, to GET http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the GET request to; defaults to `self.api_server` - headers (dict, optional): Headers to pass with the request - params (dict): GET parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="GET", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def post( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and POST request Args: - path (str): the path of the API url. For example, to POST http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the POST request to; defaults to `self.api_server` - headers(dict): headers to pass with the request - params (dict): POST parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="POST", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def graphql( self, query: Any, raise_on_error: bool = True, headers: Dict[str, str] = None, variables: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> GraphQLResult: """ Convenience function for running queries against the Prefect GraphQL API Args: - query (Any): A representation of a graphql query to be executed. It will be parsed by prefect.utilities.graphql.parse_graphql(). - raise_on_error (bool): if True, a `ClientError` will be raised if the GraphQL returns any `errors`. - headers (dict): any additional headers that should be passed as part of the request - variables (dict): Variables to be filled into a query with the key being equivalent to the variables that are accepted by the query - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Data returned from the GraphQL query Raises: - ClientError if there are errors raised by the GraphQL mutation """ result = self.post( path="", server=self.api_server, headers=headers, params=dict(query=parse_graphql(query), variables=json.dumps(variables)), token=token, retry_on_api_error=retry_on_api_error, ) if raise_on_error and "errors" in result: if "UNAUTHENTICATED" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif "Malformed Authorization header" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif ( result["errors"][0].get("extensions", {}).get("code") == "VERSION_LOCKING_ERROR" ): raise VersionLockError(result["errors"]) raise ClientError(result["errors"]) else: return GraphQLResult(result) # type: ignore def _send_request( self, session: "requests.Session", method: str, url: str, params: Dict[str, JSONLike] = None, headers: dict = None, ) -> "requests.models.Response": if prefect.context.config.cloud.get("diagnostics") is True: self.logger.debug(f"Preparing request to {url}") clean_headers = { head: re.sub("Bearer .*", "Bearer XXXX", val) for head, val in headers.items() # type: ignore } self.logger.debug(f"Headers: {clean_headers}") self.logger.debug(f"Request: {params}") start_time = time.time() if method == "GET": response = session.get(url, headers=headers, params=params, timeout=30) elif method == "POST": response = session.post(url, headers=headers, json=params, timeout=30) elif method == "DELETE": response = session.delete(url, headers=headers, timeout=30) else: raise ValueError("Invalid method: {}".format(method)) if prefect.context.config.cloud.get("diagnostics") is True: end_time = time.time() self.logger.debug(f"Response: {response.json()}") self.logger.debug( f"Request duration: {round(end_time - start_time, 4)} seconds" ) # Check if request returned a successful status response.raise_for_status() return response def _request( self, method: str, path: str, params: Dict[str, JSONLike] = None, server: str = None, headers: dict = None, token: str = None, retry_on_api_error: bool = True, ) -> "requests.models.Response": """ Runs any specified request (GET, POST, DELETE) against the server Args: - method (str): The type of request to be made (GET, POST, DELETE) - path (str): Path of the API URL - params (dict, optional): Parameters used for the request - server (str, optional): The server to make requests against, base API server is used if not specified - headers (dict, optional): Headers to pass with the request - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - requests.models.Response: The response returned from the request Raises: - ClientError: if the client token is not in the context (due to not being logged in) - ValueError: if a method is specified outside of the accepted GET, POST, DELETE - requests.HTTPError: if a status code is returned that is not `200` or `401` """ if server is None: server = self.api_server assert isinstance(server, str) # mypy assert if token is None: token = self.get_auth_token() # 'import requests' is expensive time-wise, we should do this just-in-time to keep # the 'import prefect' time low import requests url = urljoin(server, path.lstrip("/")).rstrip("/") params = params or {} headers = headers or {} if token: headers["Authorization"] = "Bearer {}".format(token) headers["X-PREFECT-CORE-VERSION"] = str(prefect.__version__) if self._attached_headers: headers.update(self._attached_headers) session = requests.Session() retry_total = 6 if prefect.config.backend == "cloud" else 1 retries = requests.packages.urllib3.util.retry.Retry( total=retry_total, backoff_factor=1, status_forcelist=[500, 502, 503, 504], method_whitelist=["DELETE", "GET", "POST"], ) session.mount("https://", requests.adapters.HTTPAdapter(max_retries=retries)) response = self._send_request( session=session, method=method, url=url, params=params, headers=headers ) # parse the response try: json_resp = response.json() except JSONDecodeError as exc: if prefect.config.backend == "cloud" and "Authorization" not in headers: raise ClientError( "Malformed response received from Cloud - please ensure that you " "have an API token properly configured." ) from exc else: raise ClientError("Malformed response received from API.") from exc # check if there was an API_ERROR code in the response if "API_ERROR" in str(json_resp.get("errors")) and retry_on_api_error: success, retry_count = False, 0 # retry up to six times while success is False and retry_count < 6: response = self._send_request( session=session, method=method, url=url, params=params, headers=headers, ) if "API_ERROR" in str(response.json().get("errors")): retry_count += 1 time.sleep(0.25 * (2 ** (retry_count - 1))) else: success = True return response def attach_headers(self, headers: dict) -> None: """ Set headers to be attached to this Client Args: - headers (dict): A dictionary of headers to attach to this client. These headers get added on to the existing dictionary of headers. """ self._attached_headers.update(headers) # ------------------------------------------------------------------------- # Auth # ------------------------------------------------------------------------- @property def _local_settings_path(self) -> Path: """ Returns the local settings directory corresponding to the current API servers """ path = "{home}/client/{server}".format( home=prefect.context.config.home_dir, server=slugify(self.api_server, regex_pattern=r"[^-\.a-z0-9]+"), ) return Path(os.path.expanduser(path)) / "settings.toml" def _save_local_settings(self, settings: dict) -> None: """ Writes settings to local storage """ self._local_settings_path.parent.mkdir(exist_ok=True, parents=True) with self._local_settings_path.open("w+") as f: toml.dump(settings, f) def _load_local_settings(self) -> dict: """ Loads settings from local storage """ if self._local_settings_path.exists(): with self._local_settings_path.open("r") as f: return toml.load(f) # type: ignore return {} def save_api_token(self) -> None: """ Saves the API token in local storage. """ settings = self._load_local_settings() settings["api_token"] = self._api_token self._save_local_settings(settings) def get_auth_token(self) -> str: """ Returns an auth token: - if no explicit access token is stored, returns the api token - if there is an access token: - if there's a refresh token and the access token expires in the next 30 seconds, then we refresh the access token and store the result - return the access token Returns: - str: the access token """ if not self._access_token: return self._api_token expiration = self._access_token_expires_at or pendulum.now() if self._refresh_token and pendulum.now().add(seconds=30) > expiration: self._refresh_access_token() return self._access_token def get_available_tenants(self) -> List[Dict]: """ Returns a list of available tenants. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - List[Dict]: a list of dictionaries containing the id, slug, and name of available tenants """ result = self.graphql( {"query": {"tenant(order_by: {slug: asc})": {"id", "slug", "name"}}}, # use the API token to see all available tenants token=self._api_token, ) # type: ignore return result.data.tenant # type: ignore def login_to_tenant(self, tenant_slug: str = None, tenant_id: str = None) -> bool: """ Log in to a specific tenant NOTE: this should only be called by users who have provided a USER-scoped API token. Args: - tenant_slug (str): the tenant's slug - tenant_id (str): the tenant's id Returns: - bool: True if the login was successful Raises: - ValueError: if at least one of `tenant_slug` or `tenant_id` isn't provided - ValueError: if the `tenant_id` is not a valid UUID - ValueError: if no matching tenants are found """ if tenant_slug is None and tenant_id is None: raise ValueError( "At least one of `tenant_slug` or `tenant_id` must be provided." ) elif tenant_id: try: uuid.UUID(tenant_id) except ValueError as exc: raise ValueError("The `tenant_id` must be a valid UUID.") from exc tenant = self.graphql( { "query($slug: String, $id: uuid)": { "tenant(where: {slug: { _eq: $slug }, id: { _eq: $id } })": {"id"} } }, variables=dict(slug=tenant_slug, id=tenant_id), # use the API token to query the tenant token=self._api_token, ) # type: ignore if not tenant.data.tenant: # type: ignore raise ValueError("No matching tenants found.") tenant_id = tenant.data.tenant[0].id # type: ignore if prefect.config.backend == "cloud": payload = self.graphql( { "mutation($input: switch_tenant_input!)": { "switch_tenant(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(tenant_id=tenant_id)), # Use the API token to switch tenants token=self._api_token, ) # type: ignore self._access_token = payload.data.switch_tenant.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.switch_tenant.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.switch_tenant.refresh_token # type: ignore self._active_tenant_id = tenant_id # save the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = self._active_tenant_id self._save_local_settings(settings) return True def logout_from_tenant(self) -> None: self._access_token = None self._refresh_token = None self._active_tenant_id = None # remove the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = None self._save_local_settings(settings) def _refresh_access_token(self) -> bool: """ Refresh the client's JWT access token. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - bool: True if the refresh succeeds """ payload = self.graphql( { "mutation($input: refresh_token_input!)": { "refresh_token(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(access_token=self._access_token)), # pass the refresh token as the auth header token=self._refresh_token, ) # type: ignore self._access_token = payload.data.refresh_token.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.refresh_token.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.refresh_token.refresh_token # type: ignore return True # ------------------------------------------------------------------------- # Actions # ------------------------------------------------------------------------- def register( self, flow: "Flow", project_name: str = None, build: bool = True, set_schedule_active: bool = True, version_group_id: str = None, compressed: bool = True, no_url: bool = False, ) -> str: """ Push a new flow to Prefect Cloud Args: - flow (Flow): a flow to register - project_name (str, optional): the project that should contain this flow. - build (bool, optional): if `True`, the flow's environment is built prior to serialization; defaults to `True` - set_schedule_active (bool, optional): if `False`, will set the schedule to inactive in the database to prevent auto-scheduling runs (if the Flow has a schedule). Defaults to `True`. This can be changed later. - version_group_id (str, optional): the UUID version group ID to use for versioning this Flow in Cloud; if not provided, the version group ID associated with this Flow's project and name will be used. - compressed (bool, optional): if `True`, the serialized flow will be; defaults to `True` compressed - no_url (bool, optional): if `True`, the stdout from this function will not contain the URL link to the newly-registered flow in the Cloud UI Returns: - str: the ID of the newly-registered flow Raises: - ClientError: if the register failed """ required_parameters = {p for p in flow.parameters() if p.required} if flow.schedule is not None and required_parameters: required_names = {p.name for p in required_parameters} if not all( [ required_names <= set(c.parameter_defaults.keys()) for c in flow.schedule.clocks ] ): raise ClientError( "Flows with required parameters can not be scheduled automatically." ) if any(e.key for e in flow.edges) and flow.result is None: warnings.warn( "No result handler was specified on your Flow. Cloud features such as " "input caching and resuming task runs from failure may not work properly.", stacklevel=2, ) if compressed: create_mutation = { "mutation($input: create_flow_from_compressed_string_input!)": { "create_flow_from_compressed_string(input: $input)": {"id"} } } else: create_mutation = { "mutation($input: create_flow_input!)": { "create_flow(input: $input)": {"id"} } } project = None if project_name is None: raise TypeError( "'project_name' is a required field when registering a flow." ) query_project = { "query": { with_args("project", {"where": {"name": {"_eq": project_name}}}): { "id": True } } } project = self.graphql(query_project).data.project # type: ignore if not project: raise ValueError( "Project {} not found. Run `prefect create project '{}'` to create it.".format( project_name, project_name ) ) serialized_flow = flow.serialize(build=build) # type: Any # Set Docker storage image in environment metadata if provided if isinstance(flow.storage, prefect.environments.storage.Docker): flow.environment.metadata["image"] = flow.storage.name serialized_flow = flow.serialize(build=False) # If no image ever set, default metadata to all_extras image on current version if not flow.environment.metadata.get("image"): version = prefect.__version__.split("+")[0] flow.environment.metadata[ "image" ] = f"prefecthq/prefect:all_extras-{version}" serialized_flow = flow.serialize(build=False) # verify that the serialized flow can be deserialized try: prefect.serialization.flow.FlowSchema().load(serialized_flow) except Exception as exc: raise ValueError( "Flow could not be deserialized successfully. Error was: {}".format( repr(exc) ) ) from exc if compressed: serialized_flow = compress(serialized_flow) res = self.graphql( create_mutation, variables=dict( input=dict( project_id=(project[0].id if project else None), serialized_flow=serialized_flow, set_schedule_active=set_schedule_active, version_group_id=version_group_id, ) ), retry_on_api_error=False, ) # type: Any flow_id = ( res.data.create_flow_from_compressed_string.id if compressed else res.data.create_flow.id ) if not no_url: # Generate direct link to Cloud flow flow_url = self.get_cloud_url("flow", flow_id) prefix = "└── " print("Flow URL: {}".format(flow_url)) # Extra information to improve visibility msg = ( f" {prefix}ID: {flow_id}\n" f" {prefix}Project: {project_name}\n" f" {prefix}Labels: {list(flow.environment.labels)}" ) print(msg) return flow_id def get_cloud_url(self, subdirectory: str, id: str, as_user: bool = True) -> str: """ Convenience method for creating Prefect Cloud URLs for a given subdirectory. Args: - subdirectory (str): the subdirectory to use (e.g., `"flow-run"`) - id (str): the ID of the page - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the URL corresponding to the appropriate base URL, tenant slug, subdirectory and ID Example: ```python from prefect import Client client = Client() client.get_cloud_url("flow-run", "424242-ca-94611-111-55") # returns "https://cloud.prefect.io/my-tenant-slug/flow-run/424242-ca-94611-111-55" ``` """ # Generate direct link to UI if prefect.config.backend == "cloud": tenant_slug = self.get_default_tenant_slug(as_user=as_user) else: tenant_slug = "" base_url = ( re.sub("api-", "", prefect.config.cloud.api) if re.search("api-", prefect.config.cloud.api) else re.sub("api", "cloud", prefect.config.cloud.api) ) full_url = prefect.config.cloud.api if tenant_slug: full_url = "/".join([base_url.rstrip("/"), tenant_slug, subdirectory, id]) elif prefect.config.backend == "server": full_url = "/".join([prefect.config.server.ui.endpoint, subdirectory, id]) return full_url def get_default_tenant_slug(self, as_user: bool = True) -> str: """ Get the default tenant slug for the currently authenticated user Args: - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the slug of the current default tenant for this user """ if as_user: query = { "query": {"user": {"default_membership": {"tenant": "slug"}}} } # type: dict else: query = {"query": {"tenant": {"slug"}}} res = self.graphql(query) if as_user: user = res.get("data").user[0] slug = user.default_membership.tenant.slug else: slug = res.get("data").tenant[0].slug return slug def create_project(self, project_name: str, project_description: str = None) -> str: """ Create a new Project Args: - project_name (str): the project that should contain this flow - project_description (str, optional): the project description Returns: - str: the ID of the newly-created project Raises: - ClientError: if the project creation failed """ project_mutation = { "mutation($input: create_project_input!)": { "create_project(input: $input)": {"id"} } } res = self.graphql( project_mutation, variables=dict( input=dict( name=project_name, description=project_description, tenant_id=self._active_tenant_id, ) ), ) # type: Any return res.data.create_project.id def create_flow_run( self, flow_id: str = None, context: dict = None, parameters: dict = None, scheduled_start_time: datetime.datetime = None, idempotency_key: str = None, run_name: str = None, version_group_id: str = None, ) -> str: """ Create a new flow run for the given flow id. If `start_time` is not provided, the flow run will be scheduled to start immediately. If both `flow_id` and `version_group_id` are provided, only the `flow_id` will be used. Args: - flow_id (str, optional): the id of the Flow you wish to schedule - context (dict, optional): the run context - parameters (dict, optional): a dictionary of parameter values to pass to the flow run - scheduled_start_time (datetime, optional): the time to schedule the execution for; if not provided, defaults to now - idempotency_key (str, optional): an idempotency key; if provided, this run will be cached for 24 hours. Any subsequent attempts to create a run with the same idempotency key will return the ID of the originally created run (no new run will be created after the first). An error will be raised if parameters or context are provided and don't match the original. Each subsequent request will reset the TTL for 24 hours. - run_name (str, optional): The name assigned to this flow run - version_group_id (str, optional): if provided, the unique unarchived flow within this version group will be scheduled to run. This input can be used as a stable API for running flows which are regularly updated. Returns: - str: the ID of the newly-created flow run Raises: - ClientError: if the GraphQL query is bad for any reason """ create_mutation = { "mutation($input: create_flow_run_input!)": { "create_flow_run(input: $input)": {"id": True} } } if not flow_id and not version_group_id: raise ValueError("One of flow_id or version_group_id must be provided") if flow_id: inputs = dict(flow_id=flow_id) else: inputs = dict(version_group_id=version_group_id) # type: ignore if parameters is not None: inputs.update(parameters=parameters) # type: ignore if context is not None: inputs.update(context=context) # type: ignore if idempotency_key is not None: inputs.update(idempotency_key=idempotency_key) # type: ignore if scheduled_start_time is not None: inputs.update( scheduled_start_time=scheduled_start_time.isoformat() ) # type: ignore if run_name is not None: inputs.update(flow_run_name=run_name) # type: ignore res = self.graphql(create_mutation, variables=dict(input=inputs)) return res.data.create_flow_run.id # type: ignore def get_flow_run_info(self, flow_run_id: str) -> FlowRunInfoResult: """ Retrieves version and current state information for the given flow run. Args: - flow_run_id (str): the id of the flow run to get information for Returns: - GraphQLResult: an object representing information about the flow run Raises: - ClientError: if the GraphQL mutation is bad for any reason """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "id": True, "name": True, "flow_id": True, "parameters": True, "context": True, "version": True, "scheduled_start_time": True, "serialized_state": True, # load all task runs except dynamic task runs with_args("task_runs", {"where": {"map_index": {"_eq": -1}}}): { "id": True, "task": {"id": True, "slug": True}, "version": True, "serialized_state": True, }, } } } result = self.graphql(query).data.flow_run_by_pk # type: ignore if result is None: raise ClientError('Flow run ID not found: "{}"'.format(flow_run_id)) # convert scheduled_start_time from string to datetime result.scheduled_start_time = pendulum.parse(result.scheduled_start_time) # create "state" attribute from serialized_state result.state = prefect.engine.state.State.deserialize( result.pop("serialized_state") ) # reformat task_runs task_runs = [] for tr in result.task_runs: tr.state = prefect.engine.state.State.deserialize( tr.pop("serialized_state") ) task_info = tr.pop("task") tr.task_id = task_info["id"] tr.task_slug = task_info["slug"] task_runs.append(TaskRunInfoResult(**tr)) result.task_runs = task_runs result.context = ( result.context.to_dict() if result.context is not None else None ) result.parameters = ( result.parameters.to_dict() if result.parameters is not None else None ) return FlowRunInfoResult(**result) def update_flow_run_heartbeat(self, flow_run_id: str) -> None: """ Convenience method for heartbeating a flow run. Does NOT raise an error if the update fails. Args: - flow_run_id (str): the flow run ID to heartbeat """ mutation = { "mutation": { with_args( "update_flow_run_heartbeat", {"input": {"flow_run_id": flow_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def update_task_run_heartbeat(self, task_run_id: str) -> None: """ Convenience method for heartbeating a task run. Does NOT raise an error if the update fails. Args: - task_run_id (str): the task run ID to heartbeat """ mutation = { "mutation": { with_args( "update_task_run_heartbeat", {"input": {"task_run_id": task_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def get_flow_run_state(self, flow_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a flow run. Args: - flow_run_id (str): the id for this flow run Returns: - State: a Prefect State object """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "serialized_state": True, } } } flow_run = self.graphql(query).data.flow_run_by_pk return prefect.engine.state.State.deserialize(flow_run.serialized_state) def set_flow_run_state( self, flow_run_id: str, state: "prefect.engine.state.State", version: int = None, ) -> "prefect.engine.state.State": """ Sets new state for a flow run in the database. Args: - flow_run_id (str): the id of the flow run to set state for - state (State): the new state for this flow run - version (int, optional): the current version of the flow run state. This is optional but it can be supplied to enforce version-locking. Returns: - State: the state the current flow run should be considered in Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation($input: set_flow_run_states_input!)": { "set_flow_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, flow_run_id=flow_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_flow_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def get_latest_cached_states( self, task_id: str, cache_key: Optional[str], created_after: datetime.datetime ) -> List["prefect.engine.state.State"]: """ Pulls all Cached states for the given task that were created after the provided date. Args: - task_id (str): the task id for this task run - cache_key (Optional[str]): the cache key for this Task's cache; if `None`, the task id alone will be used - created_after (datetime.datetime): the earliest date the state should have been created at Returns: - List[State]: a list of Cached states created after the given date """ args = { "where": { "state": {"_eq": "Cached"}, "state_timestamp": {"_gte": created_after.isoformat()}, }, "order_by": {"state_timestamp": EnumValue("desc")}, "limit": 100, } # type: Dict[str, Any] # if a cache key was provided, match it against all tasks if cache_key is not None: args["where"].update({"cache_key": {"_eq": cache_key}}) # otherwise match against only this task, across all cache keys else: args["where"].update({"task_id": {"_eq": task_id}}) query = {"query": {with_args("task_run", args): "serialized_state"}} result = self.graphql(query) # type: Any deserializer = prefect.engine.state.State.deserialize valid_states = [ deserializer(res.serialized_state) for res in result.data.task_run ] return valid_states def get_task_run_info( self, flow_run_id: str, task_id: str, map_index: Optional[int] = None ) -> TaskRunInfoResult: """ Retrieves version and current state information for the given task run. Args: - flow_run_id (str): the id of the flow run that this task run lives in - task_id (str): the task id for this task run - map_index (int, optional): the mapping index for this task run; if `None`, it is assumed this task is _not_ mapped Returns: - NamedTuple: a tuple containing `id, task_id, version, state` Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation": { with_args( "get_or_create_task_run", { "input": { "flow_run_id": flow_run_id, "task_id": task_id, "map_index": -1 if map_index is None else map_index, } }, ): { "id": True, } } } result = self.graphql(mutation) # type: Any if result is None: raise ClientError("Failed to create task run.") task_run_id = result.data.get_or_create_task_run.id query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "version": True, "serialized_state": True, "task": {"slug": True}, } } } task_run = self.graphql(query).data.task_run_by_pk # type: ignore if task_run is None: raise ClientError('Task run ID not found: "{}"'.format(task_run_id)) state = prefect.engine.state.State.deserialize(task_run.serialized_state) return TaskRunInfoResult( id=task_run_id, task_id=task_id, task_slug=task_run.task.slug, version=task_run.version, state=state, ) def set_task_run_name(self, task_run_id: str, name: str) -> bool: """ Set the name of a task run Args: - task_run_id (str): the id of a task run - name (str): a name for this task run Returns: - bool: whether or not the task run name was updated """ mutation = { "mutation($input: set_task_run_name_input!)": { "set_task_run_name(input: $input)": { "success": True, } } } result = self.graphql( mutation, variables=dict(input=dict(task_run_id=task_run_id, name=name)) ) return result.data.set_task_run_name.success def get_task_run_state(self, task_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a task run. Args: - task_run_id (str): the id for this task run Returns: - State: a Prefect State object """ query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "serialized_state": True, } } } task_run = self.graphql(query).data.task_run_by_pk return prefect.engine.state.State.deserialize(task_run.serialized_state) def set_task_run_state( self, task_run_id: str, state: "prefect.engine.state.State", version: int = None, cache_for: datetime.timedelta = None, ) -> "prefect.engine.state.State": """ Sets new state for a task run. Args: - task_run_id (str): the id of the task run to set state for - state (State): the new state for this task run - version (int, optional): the current version of the task run state. This is optional but it can be supplied to enforce version-locking. - cache_for (timedelta, optional): how long to store the result of this task for, using the serializer set in config; if not provided, no caching occurs Raises: - ClientError: if the GraphQL mutation is bad for any reason Returns: - State: the state the current task run should be considered in """ mutation = { "mutation($input: set_task_run_states_input!)": { "set_task_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, task_run_id=task_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_task_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def set_secret(self, name: str, value: Any) -> None: """ Set a secret with the given name and value. Args: - name (str): the name of the secret; used for retrieving the secret during task runs - value (Any): the value of the secret Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the secret-setting was unsuccessful """ mutation = { "mutation($input: set_secret_input!)": { "set_secret(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(name=name, value=value)) ) # type: Any if not result.data.set_secret.success: raise ValueError("Setting secret failed.") def get_task_tag_limit(self, tag: str) -> Optional[int]: """ Retrieve the current task tag concurrency limit for a given tag. Args: - tag (str): the tag to update Raises: - ClientError: if the GraphQL query fails """ query = { "query": { with_args("task_tag_limit", {"where": {"tag": {"_eq": tag}}}): { "limit": True } } } result = self.graphql(query) # type: Any if result.data.task_tag_limit: return result.data.task_tag_limit[0].limit else: return None def update_task_tag_limit(self, tag: str, limit: int) -> None: """ Update the task tag concurrency limit for a given tag; requires tenant admin permissions. Args: - tag (str): the tag to update - limit (int): the concurrency limit to enforce on the tag; should be a value >= 0 Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag limit-setting was unsuccessful, or if a bad limit was provided """ if limit < 0: raise ValueError("Concurrency limits must be >= 0") mutation = { "mutation($input: update_task_tag_limit_input!)": { "update_task_tag_limit(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict(input=dict(tag=tag, limit=limit)) ) # type: Any if not result.data.update_task_tag_limit.id: raise ValueError("Updating the task tag concurrency limit failed.") def delete_task_tag_limit(self, limit_id: str) -> None: """ Deletes a given task tag concurrency limit; requires tenant admin permissions. Args: - limit_id (str): the ID of the tag to delete Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag deletion was unsuccessful, or if a bad tag ID was provided """ mutation = { "mutation($input: delete_task_tag_limit_input!)": { "delete_task_tag_limit(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(limit_id=limit_id)) ) # type: Any if not result.data.delete_task_tag_limit.success: raise ValueError("Deleting the task tag concurrency limit failed.") def write_run_logs(self, logs: List[Dict]) -> None: """ Uploads a collection of logs to Cloud. Args: - logs (List[Dict]): a list of log entries to add Raises: - ValueError: if uploading the logs fail """ mutation = { "mutation($input: write_run_logs_input!)": { "write_run_logs(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(logs=logs)) ) # type: Any if not result.data.write_run_logs.success: raise ValueError("Writing logs failed.") # MASKED: register_agent function (lines 1489-1531) def get_agent_config(self, agent_config_id: str) -> dict: """ Get agent config settings Args: - agent_config_id (str): The ID of an agent configuration to retrieve Returns: - dict: the agent configuration's `settings` """ query = { "query": { with_args( "agent_config", {"where": {"id": {"_eq": agent_config_id}}} ): {"settings": True} } } result = self.graphql(query) # type: Any return result.data.agent_config[0].settings
def register_agent( self, agent_type: str, name: str = None, labels: List[str] = None, agent_config_id: str = None, ) -> str: """ Register an agent with a backend API Args: - agent_type (str): The type of agent being registered - name: (str, optional): The name of the agent being registered - labels (List[str], optional): A list of any present labels on the agent being registered - agent_config_id (str, optional): The ID of an agent configuration to register with Returns: - The agent ID as a string """ mutation = { "mutation($input: register_agent_input!)": { "register_agent(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict( input=dict( type=agent_type, name=name, labels=labels or [], tenant_id=self._active_tenant_id, agent_config_id=agent_config_id, ) ), ) if not result.data.register_agent.id: raise ValueError("Error registering agent") return result.data.register_agent.id
1,489
1,531
import datetime import json import os import re import time import uuid import warnings from pathlib import Path from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Union from urllib.parse import urljoin # if simplejson is installed, `requests` defaults to using it instead of json # this allows the client to gracefully handle either json or simplejson try: from simplejson.errors import JSONDecodeError except ImportError: from json.decoder import JSONDecodeError import pendulum import toml from slugify import slugify import prefect from prefect.utilities.exceptions import ( AuthorizationError, ClientError, VersionLockError, ) from prefect.utilities.graphql import ( EnumValue, GraphQLResult, compress, parse_graphql, with_args, ) from prefect.utilities.logging import create_diagnostic_logger if TYPE_CHECKING: from prefect.core import Flow import requests JSONLike = Union[bool, dict, list, str, int, float, None] # type definitions for GraphQL results TaskRunInfoResult = NamedTuple( "TaskRunInfoResult", [ ("id", str), ("task_id", str), ("task_slug", str), ("version", int), ("state", "prefect.engine.state.State"), ], ) FlowRunInfoResult = NamedTuple( "FlowRunInfoResult", [ ("id", str), ("name", str), ("flow_id", str), ("parameters", Dict[str, Any]), ("context", Dict[str, Any]), ("version", int), ("scheduled_start_time", datetime.datetime), ("state", "prefect.engine.state.State"), ("task_runs", List[TaskRunInfoResult]), ], ) class Client: """ Client for communication with Prefect Cloud If the arguments aren't specified the client initialization first checks the prefect configuration and if the server is not set there it checks the current context. The token will only be present in the current context. Args: - api_server (str, optional): the URL to send all GraphQL requests to; if not provided, will be pulled from `cloud.graphql` config var - api_token (str, optional): a Prefect Cloud API token, taken from `config.cloud.auth_token` if not provided. If this token is USER-scoped, it may be used to log in to any tenant that the user is a member of. In that case, ephemeral JWTs will be loaded as necessary. Otherwise, the API token itself will be used as authorization. """ def __init__(self, api_server: str = None, api_token: str = None): self._access_token = None self._refresh_token = None self._access_token_expires_at = pendulum.now() self._active_tenant_id = None self._attached_headers = {} # type: Dict[str, str] self.logger = create_diagnostic_logger("Diagnostics") # store api server self.api_server = api_server or prefect.context.config.cloud.get("graphql") # store api token self._api_token = api_token or prefect.context.config.cloud.get( "auth_token", None ) if prefect.config.backend == "cloud": if not self._api_token: # if no api token was passed, attempt to load state from local storage settings = self._load_local_settings() self._api_token = settings.get("api_token") if self._api_token: self._active_tenant_id = settings.get("active_tenant_id") if self._active_tenant_id: try: self.login_to_tenant(tenant_id=self._active_tenant_id) except AuthorizationError: # if an authorization error is raised, then the token is invalid and should # be cleared self.logout_from_tenant() else: # TODO: Separate put this functionality and clean up initial tenant access handling if not self._active_tenant_id: tenant_info = self.graphql({"query": {"tenant": {"id"}}}) if tenant_info.data.tenant: self._active_tenant_id = tenant_info.data.tenant[0].id def create_tenant(self, name: str, slug: str = None) -> str: """ Creates a new tenant. Note this route only works when run against Prefect Server. Args: - name (str): the name of the tenant to create - slug (str, optional): the slug of the tenant to create; defaults to name Returns: - str: the ID of the newly created tenant, or the ID of the currently active tenant Raises: - ValueError: if run against Prefect Cloud """ if prefect.config.backend != "server": msg = "To create a tenant with Prefect Cloud, please signup at https://cloud.prefect.io/" raise ValueError(msg) if slug is None: slug = slugify(name) tenant_info = self.graphql( { "mutation($input: create_tenant_input!)": { "create_tenant(input: $input)": {"id"} } }, variables=dict(input=dict(name=name, slug=slug)), ) return tenant_info.data.create_tenant.id # ------------------------------------------------------------------------- # Utilities def get( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and GET request Args: - path (str): the path of the API url. For example, to GET http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the GET request to; defaults to `self.api_server` - headers (dict, optional): Headers to pass with the request - params (dict): GET parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="GET", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def post( self, path: str, server: str = None, headers: dict = None, params: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> dict: """ Convenience function for calling the Prefect API with token auth and POST request Args: - path (str): the path of the API url. For example, to POST http://prefect-server/v1/auth/login, path would be 'auth/login'. - server (str, optional): the server to send the POST request to; defaults to `self.api_server` - headers(dict): headers to pass with the request - params (dict): POST parameters - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Dictionary representation of the request made """ response = self._request( method="POST", path=path, params=params, server=server, headers=headers, token=token, retry_on_api_error=retry_on_api_error, ) if response.text: return response.json() else: return {} def graphql( self, query: Any, raise_on_error: bool = True, headers: Dict[str, str] = None, variables: Dict[str, JSONLike] = None, token: str = None, retry_on_api_error: bool = True, ) -> GraphQLResult: """ Convenience function for running queries against the Prefect GraphQL API Args: - query (Any): A representation of a graphql query to be executed. It will be parsed by prefect.utilities.graphql.parse_graphql(). - raise_on_error (bool): if True, a `ClientError` will be raised if the GraphQL returns any `errors`. - headers (dict): any additional headers that should be passed as part of the request - variables (dict): Variables to be filled into a query with the key being equivalent to the variables that are accepted by the query - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - dict: Data returned from the GraphQL query Raises: - ClientError if there are errors raised by the GraphQL mutation """ result = self.post( path="", server=self.api_server, headers=headers, params=dict(query=parse_graphql(query), variables=json.dumps(variables)), token=token, retry_on_api_error=retry_on_api_error, ) if raise_on_error and "errors" in result: if "UNAUTHENTICATED" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif "Malformed Authorization header" in str(result["errors"]): raise AuthorizationError(result["errors"]) elif ( result["errors"][0].get("extensions", {}).get("code") == "VERSION_LOCKING_ERROR" ): raise VersionLockError(result["errors"]) raise ClientError(result["errors"]) else: return GraphQLResult(result) # type: ignore def _send_request( self, session: "requests.Session", method: str, url: str, params: Dict[str, JSONLike] = None, headers: dict = None, ) -> "requests.models.Response": if prefect.context.config.cloud.get("diagnostics") is True: self.logger.debug(f"Preparing request to {url}") clean_headers = { head: re.sub("Bearer .*", "Bearer XXXX", val) for head, val in headers.items() # type: ignore } self.logger.debug(f"Headers: {clean_headers}") self.logger.debug(f"Request: {params}") start_time = time.time() if method == "GET": response = session.get(url, headers=headers, params=params, timeout=30) elif method == "POST": response = session.post(url, headers=headers, json=params, timeout=30) elif method == "DELETE": response = session.delete(url, headers=headers, timeout=30) else: raise ValueError("Invalid method: {}".format(method)) if prefect.context.config.cloud.get("diagnostics") is True: end_time = time.time() self.logger.debug(f"Response: {response.json()}") self.logger.debug( f"Request duration: {round(end_time - start_time, 4)} seconds" ) # Check if request returned a successful status response.raise_for_status() return response def _request( self, method: str, path: str, params: Dict[str, JSONLike] = None, server: str = None, headers: dict = None, token: str = None, retry_on_api_error: bool = True, ) -> "requests.models.Response": """ Runs any specified request (GET, POST, DELETE) against the server Args: - method (str): The type of request to be made (GET, POST, DELETE) - path (str): Path of the API URL - params (dict, optional): Parameters used for the request - server (str, optional): The server to make requests against, base API server is used if not specified - headers (dict, optional): Headers to pass with the request - token (str): an auth token. If not supplied, the `client.access_token` is used. - retry_on_api_error (bool): whether the operation should be retried if the API returns an API_ERROR code Returns: - requests.models.Response: The response returned from the request Raises: - ClientError: if the client token is not in the context (due to not being logged in) - ValueError: if a method is specified outside of the accepted GET, POST, DELETE - requests.HTTPError: if a status code is returned that is not `200` or `401` """ if server is None: server = self.api_server assert isinstance(server, str) # mypy assert if token is None: token = self.get_auth_token() # 'import requests' is expensive time-wise, we should do this just-in-time to keep # the 'import prefect' time low import requests url = urljoin(server, path.lstrip("/")).rstrip("/") params = params or {} headers = headers or {} if token: headers["Authorization"] = "Bearer {}".format(token) headers["X-PREFECT-CORE-VERSION"] = str(prefect.__version__) if self._attached_headers: headers.update(self._attached_headers) session = requests.Session() retry_total = 6 if prefect.config.backend == "cloud" else 1 retries = requests.packages.urllib3.util.retry.Retry( total=retry_total, backoff_factor=1, status_forcelist=[500, 502, 503, 504], method_whitelist=["DELETE", "GET", "POST"], ) session.mount("https://", requests.adapters.HTTPAdapter(max_retries=retries)) response = self._send_request( session=session, method=method, url=url, params=params, headers=headers ) # parse the response try: json_resp = response.json() except JSONDecodeError as exc: if prefect.config.backend == "cloud" and "Authorization" not in headers: raise ClientError( "Malformed response received from Cloud - please ensure that you " "have an API token properly configured." ) from exc else: raise ClientError("Malformed response received from API.") from exc # check if there was an API_ERROR code in the response if "API_ERROR" in str(json_resp.get("errors")) and retry_on_api_error: success, retry_count = False, 0 # retry up to six times while success is False and retry_count < 6: response = self._send_request( session=session, method=method, url=url, params=params, headers=headers, ) if "API_ERROR" in str(response.json().get("errors")): retry_count += 1 time.sleep(0.25 * (2 ** (retry_count - 1))) else: success = True return response def attach_headers(self, headers: dict) -> None: """ Set headers to be attached to this Client Args: - headers (dict): A dictionary of headers to attach to this client. These headers get added on to the existing dictionary of headers. """ self._attached_headers.update(headers) # ------------------------------------------------------------------------- # Auth # ------------------------------------------------------------------------- @property def _local_settings_path(self) -> Path: """ Returns the local settings directory corresponding to the current API servers """ path = "{home}/client/{server}".format( home=prefect.context.config.home_dir, server=slugify(self.api_server, regex_pattern=r"[^-\.a-z0-9]+"), ) return Path(os.path.expanduser(path)) / "settings.toml" def _save_local_settings(self, settings: dict) -> None: """ Writes settings to local storage """ self._local_settings_path.parent.mkdir(exist_ok=True, parents=True) with self._local_settings_path.open("w+") as f: toml.dump(settings, f) def _load_local_settings(self) -> dict: """ Loads settings from local storage """ if self._local_settings_path.exists(): with self._local_settings_path.open("r") as f: return toml.load(f) # type: ignore return {} def save_api_token(self) -> None: """ Saves the API token in local storage. """ settings = self._load_local_settings() settings["api_token"] = self._api_token self._save_local_settings(settings) def get_auth_token(self) -> str: """ Returns an auth token: - if no explicit access token is stored, returns the api token - if there is an access token: - if there's a refresh token and the access token expires in the next 30 seconds, then we refresh the access token and store the result - return the access token Returns: - str: the access token """ if not self._access_token: return self._api_token expiration = self._access_token_expires_at or pendulum.now() if self._refresh_token and pendulum.now().add(seconds=30) > expiration: self._refresh_access_token() return self._access_token def get_available_tenants(self) -> List[Dict]: """ Returns a list of available tenants. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - List[Dict]: a list of dictionaries containing the id, slug, and name of available tenants """ result = self.graphql( {"query": {"tenant(order_by: {slug: asc})": {"id", "slug", "name"}}}, # use the API token to see all available tenants token=self._api_token, ) # type: ignore return result.data.tenant # type: ignore def login_to_tenant(self, tenant_slug: str = None, tenant_id: str = None) -> bool: """ Log in to a specific tenant NOTE: this should only be called by users who have provided a USER-scoped API token. Args: - tenant_slug (str): the tenant's slug - tenant_id (str): the tenant's id Returns: - bool: True if the login was successful Raises: - ValueError: if at least one of `tenant_slug` or `tenant_id` isn't provided - ValueError: if the `tenant_id` is not a valid UUID - ValueError: if no matching tenants are found """ if tenant_slug is None and tenant_id is None: raise ValueError( "At least one of `tenant_slug` or `tenant_id` must be provided." ) elif tenant_id: try: uuid.UUID(tenant_id) except ValueError as exc: raise ValueError("The `tenant_id` must be a valid UUID.") from exc tenant = self.graphql( { "query($slug: String, $id: uuid)": { "tenant(where: {slug: { _eq: $slug }, id: { _eq: $id } })": {"id"} } }, variables=dict(slug=tenant_slug, id=tenant_id), # use the API token to query the tenant token=self._api_token, ) # type: ignore if not tenant.data.tenant: # type: ignore raise ValueError("No matching tenants found.") tenant_id = tenant.data.tenant[0].id # type: ignore if prefect.config.backend == "cloud": payload = self.graphql( { "mutation($input: switch_tenant_input!)": { "switch_tenant(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(tenant_id=tenant_id)), # Use the API token to switch tenants token=self._api_token, ) # type: ignore self._access_token = payload.data.switch_tenant.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.switch_tenant.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.switch_tenant.refresh_token # type: ignore self._active_tenant_id = tenant_id # save the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = self._active_tenant_id self._save_local_settings(settings) return True def logout_from_tenant(self) -> None: self._access_token = None self._refresh_token = None self._active_tenant_id = None # remove the tenant setting settings = self._load_local_settings() settings["active_tenant_id"] = None self._save_local_settings(settings) def _refresh_access_token(self) -> bool: """ Refresh the client's JWT access token. NOTE: this should only be called by users who have provided a USER-scoped API token. Returns: - bool: True if the refresh succeeds """ payload = self.graphql( { "mutation($input: refresh_token_input!)": { "refresh_token(input: $input)": { "access_token", "expires_at", "refresh_token", } } }, variables=dict(input=dict(access_token=self._access_token)), # pass the refresh token as the auth header token=self._refresh_token, ) # type: ignore self._access_token = payload.data.refresh_token.access_token # type: ignore self._access_token_expires_at = pendulum.parse( # type: ignore payload.data.refresh_token.expires_at # type: ignore ) # type: ignore self._refresh_token = payload.data.refresh_token.refresh_token # type: ignore return True # ------------------------------------------------------------------------- # Actions # ------------------------------------------------------------------------- def register( self, flow: "Flow", project_name: str = None, build: bool = True, set_schedule_active: bool = True, version_group_id: str = None, compressed: bool = True, no_url: bool = False, ) -> str: """ Push a new flow to Prefect Cloud Args: - flow (Flow): a flow to register - project_name (str, optional): the project that should contain this flow. - build (bool, optional): if `True`, the flow's environment is built prior to serialization; defaults to `True` - set_schedule_active (bool, optional): if `False`, will set the schedule to inactive in the database to prevent auto-scheduling runs (if the Flow has a schedule). Defaults to `True`. This can be changed later. - version_group_id (str, optional): the UUID version group ID to use for versioning this Flow in Cloud; if not provided, the version group ID associated with this Flow's project and name will be used. - compressed (bool, optional): if `True`, the serialized flow will be; defaults to `True` compressed - no_url (bool, optional): if `True`, the stdout from this function will not contain the URL link to the newly-registered flow in the Cloud UI Returns: - str: the ID of the newly-registered flow Raises: - ClientError: if the register failed """ required_parameters = {p for p in flow.parameters() if p.required} if flow.schedule is not None and required_parameters: required_names = {p.name for p in required_parameters} if not all( [ required_names <= set(c.parameter_defaults.keys()) for c in flow.schedule.clocks ] ): raise ClientError( "Flows with required parameters can not be scheduled automatically." ) if any(e.key for e in flow.edges) and flow.result is None: warnings.warn( "No result handler was specified on your Flow. Cloud features such as " "input caching and resuming task runs from failure may not work properly.", stacklevel=2, ) if compressed: create_mutation = { "mutation($input: create_flow_from_compressed_string_input!)": { "create_flow_from_compressed_string(input: $input)": {"id"} } } else: create_mutation = { "mutation($input: create_flow_input!)": { "create_flow(input: $input)": {"id"} } } project = None if project_name is None: raise TypeError( "'project_name' is a required field when registering a flow." ) query_project = { "query": { with_args("project", {"where": {"name": {"_eq": project_name}}}): { "id": True } } } project = self.graphql(query_project).data.project # type: ignore if not project: raise ValueError( "Project {} not found. Run `prefect create project '{}'` to create it.".format( project_name, project_name ) ) serialized_flow = flow.serialize(build=build) # type: Any # Set Docker storage image in environment metadata if provided if isinstance(flow.storage, prefect.environments.storage.Docker): flow.environment.metadata["image"] = flow.storage.name serialized_flow = flow.serialize(build=False) # If no image ever set, default metadata to all_extras image on current version if not flow.environment.metadata.get("image"): version = prefect.__version__.split("+")[0] flow.environment.metadata[ "image" ] = f"prefecthq/prefect:all_extras-{version}" serialized_flow = flow.serialize(build=False) # verify that the serialized flow can be deserialized try: prefect.serialization.flow.FlowSchema().load(serialized_flow) except Exception as exc: raise ValueError( "Flow could not be deserialized successfully. Error was: {}".format( repr(exc) ) ) from exc if compressed: serialized_flow = compress(serialized_flow) res = self.graphql( create_mutation, variables=dict( input=dict( project_id=(project[0].id if project else None), serialized_flow=serialized_flow, set_schedule_active=set_schedule_active, version_group_id=version_group_id, ) ), retry_on_api_error=False, ) # type: Any flow_id = ( res.data.create_flow_from_compressed_string.id if compressed else res.data.create_flow.id ) if not no_url: # Generate direct link to Cloud flow flow_url = self.get_cloud_url("flow", flow_id) prefix = "└── " print("Flow URL: {}".format(flow_url)) # Extra information to improve visibility msg = ( f" {prefix}ID: {flow_id}\n" f" {prefix}Project: {project_name}\n" f" {prefix}Labels: {list(flow.environment.labels)}" ) print(msg) return flow_id def get_cloud_url(self, subdirectory: str, id: str, as_user: bool = True) -> str: """ Convenience method for creating Prefect Cloud URLs for a given subdirectory. Args: - subdirectory (str): the subdirectory to use (e.g., `"flow-run"`) - id (str): the ID of the page - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the URL corresponding to the appropriate base URL, tenant slug, subdirectory and ID Example: ```python from prefect import Client client = Client() client.get_cloud_url("flow-run", "424242-ca-94611-111-55") # returns "https://cloud.prefect.io/my-tenant-slug/flow-run/424242-ca-94611-111-55" ``` """ # Generate direct link to UI if prefect.config.backend == "cloud": tenant_slug = self.get_default_tenant_slug(as_user=as_user) else: tenant_slug = "" base_url = ( re.sub("api-", "", prefect.config.cloud.api) if re.search("api-", prefect.config.cloud.api) else re.sub("api", "cloud", prefect.config.cloud.api) ) full_url = prefect.config.cloud.api if tenant_slug: full_url = "/".join([base_url.rstrip("/"), tenant_slug, subdirectory, id]) elif prefect.config.backend == "server": full_url = "/".join([prefect.config.server.ui.endpoint, subdirectory, id]) return full_url def get_default_tenant_slug(self, as_user: bool = True) -> str: """ Get the default tenant slug for the currently authenticated user Args: - as_user (bool, optional): whether this query is being made from a USER scoped token; defaults to `True`. Only used internally for queries made from RUNNERs Returns: - str: the slug of the current default tenant for this user """ if as_user: query = { "query": {"user": {"default_membership": {"tenant": "slug"}}} } # type: dict else: query = {"query": {"tenant": {"slug"}}} res = self.graphql(query) if as_user: user = res.get("data").user[0] slug = user.default_membership.tenant.slug else: slug = res.get("data").tenant[0].slug return slug def create_project(self, project_name: str, project_description: str = None) -> str: """ Create a new Project Args: - project_name (str): the project that should contain this flow - project_description (str, optional): the project description Returns: - str: the ID of the newly-created project Raises: - ClientError: if the project creation failed """ project_mutation = { "mutation($input: create_project_input!)": { "create_project(input: $input)": {"id"} } } res = self.graphql( project_mutation, variables=dict( input=dict( name=project_name, description=project_description, tenant_id=self._active_tenant_id, ) ), ) # type: Any return res.data.create_project.id def create_flow_run( self, flow_id: str = None, context: dict = None, parameters: dict = None, scheduled_start_time: datetime.datetime = None, idempotency_key: str = None, run_name: str = None, version_group_id: str = None, ) -> str: """ Create a new flow run for the given flow id. If `start_time` is not provided, the flow run will be scheduled to start immediately. If both `flow_id` and `version_group_id` are provided, only the `flow_id` will be used. Args: - flow_id (str, optional): the id of the Flow you wish to schedule - context (dict, optional): the run context - parameters (dict, optional): a dictionary of parameter values to pass to the flow run - scheduled_start_time (datetime, optional): the time to schedule the execution for; if not provided, defaults to now - idempotency_key (str, optional): an idempotency key; if provided, this run will be cached for 24 hours. Any subsequent attempts to create a run with the same idempotency key will return the ID of the originally created run (no new run will be created after the first). An error will be raised if parameters or context are provided and don't match the original. Each subsequent request will reset the TTL for 24 hours. - run_name (str, optional): The name assigned to this flow run - version_group_id (str, optional): if provided, the unique unarchived flow within this version group will be scheduled to run. This input can be used as a stable API for running flows which are regularly updated. Returns: - str: the ID of the newly-created flow run Raises: - ClientError: if the GraphQL query is bad for any reason """ create_mutation = { "mutation($input: create_flow_run_input!)": { "create_flow_run(input: $input)": {"id": True} } } if not flow_id and not version_group_id: raise ValueError("One of flow_id or version_group_id must be provided") if flow_id: inputs = dict(flow_id=flow_id) else: inputs = dict(version_group_id=version_group_id) # type: ignore if parameters is not None: inputs.update(parameters=parameters) # type: ignore if context is not None: inputs.update(context=context) # type: ignore if idempotency_key is not None: inputs.update(idempotency_key=idempotency_key) # type: ignore if scheduled_start_time is not None: inputs.update( scheduled_start_time=scheduled_start_time.isoformat() ) # type: ignore if run_name is not None: inputs.update(flow_run_name=run_name) # type: ignore res = self.graphql(create_mutation, variables=dict(input=inputs)) return res.data.create_flow_run.id # type: ignore def get_flow_run_info(self, flow_run_id: str) -> FlowRunInfoResult: """ Retrieves version and current state information for the given flow run. Args: - flow_run_id (str): the id of the flow run to get information for Returns: - GraphQLResult: an object representing information about the flow run Raises: - ClientError: if the GraphQL mutation is bad for any reason """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "id": True, "name": True, "flow_id": True, "parameters": True, "context": True, "version": True, "scheduled_start_time": True, "serialized_state": True, # load all task runs except dynamic task runs with_args("task_runs", {"where": {"map_index": {"_eq": -1}}}): { "id": True, "task": {"id": True, "slug": True}, "version": True, "serialized_state": True, }, } } } result = self.graphql(query).data.flow_run_by_pk # type: ignore if result is None: raise ClientError('Flow run ID not found: "{}"'.format(flow_run_id)) # convert scheduled_start_time from string to datetime result.scheduled_start_time = pendulum.parse(result.scheduled_start_time) # create "state" attribute from serialized_state result.state = prefect.engine.state.State.deserialize( result.pop("serialized_state") ) # reformat task_runs task_runs = [] for tr in result.task_runs: tr.state = prefect.engine.state.State.deserialize( tr.pop("serialized_state") ) task_info = tr.pop("task") tr.task_id = task_info["id"] tr.task_slug = task_info["slug"] task_runs.append(TaskRunInfoResult(**tr)) result.task_runs = task_runs result.context = ( result.context.to_dict() if result.context is not None else None ) result.parameters = ( result.parameters.to_dict() if result.parameters is not None else None ) return FlowRunInfoResult(**result) def update_flow_run_heartbeat(self, flow_run_id: str) -> None: """ Convenience method for heartbeating a flow run. Does NOT raise an error if the update fails. Args: - flow_run_id (str): the flow run ID to heartbeat """ mutation = { "mutation": { with_args( "update_flow_run_heartbeat", {"input": {"flow_run_id": flow_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def update_task_run_heartbeat(self, task_run_id: str) -> None: """ Convenience method for heartbeating a task run. Does NOT raise an error if the update fails. Args: - task_run_id (str): the task run ID to heartbeat """ mutation = { "mutation": { with_args( "update_task_run_heartbeat", {"input": {"task_run_id": task_run_id}} ): {"success"} } } self.graphql(mutation, raise_on_error=True) def get_flow_run_state(self, flow_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a flow run. Args: - flow_run_id (str): the id for this flow run Returns: - State: a Prefect State object """ query = { "query": { with_args("flow_run_by_pk", {"id": flow_run_id}): { "serialized_state": True, } } } flow_run = self.graphql(query).data.flow_run_by_pk return prefect.engine.state.State.deserialize(flow_run.serialized_state) def set_flow_run_state( self, flow_run_id: str, state: "prefect.engine.state.State", version: int = None, ) -> "prefect.engine.state.State": """ Sets new state for a flow run in the database. Args: - flow_run_id (str): the id of the flow run to set state for - state (State): the new state for this flow run - version (int, optional): the current version of the flow run state. This is optional but it can be supplied to enforce version-locking. Returns: - State: the state the current flow run should be considered in Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation($input: set_flow_run_states_input!)": { "set_flow_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, flow_run_id=flow_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_flow_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def get_latest_cached_states( self, task_id: str, cache_key: Optional[str], created_after: datetime.datetime ) -> List["prefect.engine.state.State"]: """ Pulls all Cached states for the given task that were created after the provided date. Args: - task_id (str): the task id for this task run - cache_key (Optional[str]): the cache key for this Task's cache; if `None`, the task id alone will be used - created_after (datetime.datetime): the earliest date the state should have been created at Returns: - List[State]: a list of Cached states created after the given date """ args = { "where": { "state": {"_eq": "Cached"}, "state_timestamp": {"_gte": created_after.isoformat()}, }, "order_by": {"state_timestamp": EnumValue("desc")}, "limit": 100, } # type: Dict[str, Any] # if a cache key was provided, match it against all tasks if cache_key is not None: args["where"].update({"cache_key": {"_eq": cache_key}}) # otherwise match against only this task, across all cache keys else: args["where"].update({"task_id": {"_eq": task_id}}) query = {"query": {with_args("task_run", args): "serialized_state"}} result = self.graphql(query) # type: Any deserializer = prefect.engine.state.State.deserialize valid_states = [ deserializer(res.serialized_state) for res in result.data.task_run ] return valid_states def get_task_run_info( self, flow_run_id: str, task_id: str, map_index: Optional[int] = None ) -> TaskRunInfoResult: """ Retrieves version and current state information for the given task run. Args: - flow_run_id (str): the id of the flow run that this task run lives in - task_id (str): the task id for this task run - map_index (int, optional): the mapping index for this task run; if `None`, it is assumed this task is _not_ mapped Returns: - NamedTuple: a tuple containing `id, task_id, version, state` Raises: - ClientError: if the GraphQL mutation is bad for any reason """ mutation = { "mutation": { with_args( "get_or_create_task_run", { "input": { "flow_run_id": flow_run_id, "task_id": task_id, "map_index": -1 if map_index is None else map_index, } }, ): { "id": True, } } } result = self.graphql(mutation) # type: Any if result is None: raise ClientError("Failed to create task run.") task_run_id = result.data.get_or_create_task_run.id query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "version": True, "serialized_state": True, "task": {"slug": True}, } } } task_run = self.graphql(query).data.task_run_by_pk # type: ignore if task_run is None: raise ClientError('Task run ID not found: "{}"'.format(task_run_id)) state = prefect.engine.state.State.deserialize(task_run.serialized_state) return TaskRunInfoResult( id=task_run_id, task_id=task_id, task_slug=task_run.task.slug, version=task_run.version, state=state, ) def set_task_run_name(self, task_run_id: str, name: str) -> bool: """ Set the name of a task run Args: - task_run_id (str): the id of a task run - name (str): a name for this task run Returns: - bool: whether or not the task run name was updated """ mutation = { "mutation($input: set_task_run_name_input!)": { "set_task_run_name(input: $input)": { "success": True, } } } result = self.graphql( mutation, variables=dict(input=dict(task_run_id=task_run_id, name=name)) ) return result.data.set_task_run_name.success def get_task_run_state(self, task_run_id: str) -> "prefect.engine.state.State": """ Retrieves the current state for a task run. Args: - task_run_id (str): the id for this task run Returns: - State: a Prefect State object """ query = { "query": { with_args("task_run_by_pk", {"id": task_run_id}): { "serialized_state": True, } } } task_run = self.graphql(query).data.task_run_by_pk return prefect.engine.state.State.deserialize(task_run.serialized_state) def set_task_run_state( self, task_run_id: str, state: "prefect.engine.state.State", version: int = None, cache_for: datetime.timedelta = None, ) -> "prefect.engine.state.State": """ Sets new state for a task run. Args: - task_run_id (str): the id of the task run to set state for - state (State): the new state for this task run - version (int, optional): the current version of the task run state. This is optional but it can be supplied to enforce version-locking. - cache_for (timedelta, optional): how long to store the result of this task for, using the serializer set in config; if not provided, no caching occurs Raises: - ClientError: if the GraphQL mutation is bad for any reason Returns: - State: the state the current task run should be considered in """ mutation = { "mutation($input: set_task_run_states_input!)": { "set_task_run_states(input: $input)": { "states": {"id", "status", "message"} } } } serialized_state = state.serialize() result = self.graphql( mutation, variables=dict( input=dict( states=[ dict( state=serialized_state, task_run_id=task_run_id, version=version, ) ] ) ), ) # type: Any state_payload = result.data.set_task_run_states.states[0] if state_payload.status == "QUEUED": # If appropriate, the state attribute of the Queued state can be # set by the caller of this method return prefect.engine.state.Queued( message=state_payload.get("message"), start_time=pendulum.now("UTC").add( seconds=prefect.context.config.cloud.queue_interval ), ) return state def set_secret(self, name: str, value: Any) -> None: """ Set a secret with the given name and value. Args: - name (str): the name of the secret; used for retrieving the secret during task runs - value (Any): the value of the secret Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the secret-setting was unsuccessful """ mutation = { "mutation($input: set_secret_input!)": { "set_secret(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(name=name, value=value)) ) # type: Any if not result.data.set_secret.success: raise ValueError("Setting secret failed.") def get_task_tag_limit(self, tag: str) -> Optional[int]: """ Retrieve the current task tag concurrency limit for a given tag. Args: - tag (str): the tag to update Raises: - ClientError: if the GraphQL query fails """ query = { "query": { with_args("task_tag_limit", {"where": {"tag": {"_eq": tag}}}): { "limit": True } } } result = self.graphql(query) # type: Any if result.data.task_tag_limit: return result.data.task_tag_limit[0].limit else: return None def update_task_tag_limit(self, tag: str, limit: int) -> None: """ Update the task tag concurrency limit for a given tag; requires tenant admin permissions. Args: - tag (str): the tag to update - limit (int): the concurrency limit to enforce on the tag; should be a value >= 0 Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag limit-setting was unsuccessful, or if a bad limit was provided """ if limit < 0: raise ValueError("Concurrency limits must be >= 0") mutation = { "mutation($input: update_task_tag_limit_input!)": { "update_task_tag_limit(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict(input=dict(tag=tag, limit=limit)) ) # type: Any if not result.data.update_task_tag_limit.id: raise ValueError("Updating the task tag concurrency limit failed.") def delete_task_tag_limit(self, limit_id: str) -> None: """ Deletes a given task tag concurrency limit; requires tenant admin permissions. Args: - limit_id (str): the ID of the tag to delete Raises: - ClientError: if the GraphQL mutation is bad for any reason - ValueError: if the tag deletion was unsuccessful, or if a bad tag ID was provided """ mutation = { "mutation($input: delete_task_tag_limit_input!)": { "delete_task_tag_limit(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(limit_id=limit_id)) ) # type: Any if not result.data.delete_task_tag_limit.success: raise ValueError("Deleting the task tag concurrency limit failed.") def write_run_logs(self, logs: List[Dict]) -> None: """ Uploads a collection of logs to Cloud. Args: - logs (List[Dict]): a list of log entries to add Raises: - ValueError: if uploading the logs fail """ mutation = { "mutation($input: write_run_logs_input!)": { "write_run_logs(input: $input)": {"success"} } } result = self.graphql( mutation, variables=dict(input=dict(logs=logs)) ) # type: Any if not result.data.write_run_logs.success: raise ValueError("Writing logs failed.") def register_agent( self, agent_type: str, name: str = None, labels: List[str] = None, agent_config_id: str = None, ) -> str: """ Register an agent with a backend API Args: - agent_type (str): The type of agent being registered - name: (str, optional): The name of the agent being registered - labels (List[str], optional): A list of any present labels on the agent being registered - agent_config_id (str, optional): The ID of an agent configuration to register with Returns: - The agent ID as a string """ mutation = { "mutation($input: register_agent_input!)": { "register_agent(input: $input)": {"id"} } } result = self.graphql( mutation, variables=dict( input=dict( type=agent_type, name=name, labels=labels or [], tenant_id=self._active_tenant_id, agent_config_id=agent_config_id, ) ), ) if not result.data.register_agent.id: raise ValueError("Error registering agent") return result.data.register_agent.id def get_agent_config(self, agent_config_id: str) -> dict: """ Get agent config settings Args: - agent_config_id (str): The ID of an agent configuration to retrieve Returns: - dict: the agent configuration's `settings` """ query = { "query": { with_args( "agent_config", {"where": {"id": {"_eq": agent_config_id}}} ): {"settings": True} } } result = self.graphql(query) # type: Any return result.data.agent_config[0].settings
get_json_state
Gets a json-encoded description of the simulation's state. As of now, it takes output and input capacities as arguments because the JSON state is described through relative values. (For instance, first output at 0.3 capacity). @param input_capacities An array containing the maximum capacities of the input. @param output_capacities An array containing the maximum capacities of the output.
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright (C) 2005 onwards University of Deusto # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. # # This software consists of contributions made by many individuals, # listed below: # # Author: Luis Rodriguez <luis.rodriguez@opendeusto.es> # import threading import time import json class Watertank(object): """ Watertank Model Output example: {"water": 0.0, "inputs": [0.5, 0.5], "temperatures": [716, 20], "outputs": [1.0]} Changes that have been applied lately to this model (Dec 2015) - There is no longer a separate temperatures mode. Now there is a single model with temperatures. - There are no longer temperature working ranges, temperature warnings, or temperature overloads. The model will not prevent the pumps from working. Instead, the temperature will increase indefinitely. The experiment client can thus deal with temperatures however it wishes (and it can in fact ignore them), with no effect. - As a result of the previous change, temperature is no longer reported as in the [0,1] range according to the range. Now it is reported in raw form. """ def __init__(self, tank_capacity, inputs, outputs, water_level): self.initialize(tank_capacity, inputs, outputs, water_level) def initialize(self, tank_capacity, inputs, outputs, water_level): """ Initializes the simulation with the specified data. @param tank_capacity Capacity of the water tank, in liters. @param Array containing the flow volume of the inputs (such as water pumps), in liters per second. The flow can be modified dynamically, but no inputs can be added. @param Array containing the outputs (such as a water hose or evaporation), in liters per second. The flow can be modified dynamically, but no inputs can be added. @param water_level The starting water level. Value from 0 to 1. """ self.tank_capacity = tank_capacity self.inputs = inputs self.outputs = outputs self.current_volume = water_level * tank_capacity self.firstPumpTemperature = 20 self.secondPumpTemperature = 20 self.firstPumpWorkRange = [20, 200] self.secondPumpWorkRange = [20, 200] self.pumpTemperatureVariationPerSeconds = 6 # Enough for 30 seconds? self.simlock = threading.RLock() self._thread = None self._autoupdating = False self._autoupdating_interval = 1000 def update(self, delta): """ Updates the simulation. Can be done automatically if the autoupdater is used. @param delta Delta in seconds. @see autoupdater_start """ total_output = 0 for out in self.outputs: total_output += out * delta # Calculates how much the pumps are putting in. total_input = 0 # Handle inputs pump1, pump2 = self.inputs # If the first pump is turned on we increase the temperature and the total water input if pump1 > 0: # We multiply by 1.1 so that its temperature raises faster. self.firstPumpTemperature += delta * self.pumpTemperatureVariationPerSeconds * 1.1 total_input += pump1 * delta else: self.firstPumpTemperature -= delta * self.pumpTemperatureVariationPerSeconds self.firstPumpTemperature = max(20, self.firstPumpTemperature) total_input -= pump1 * delta # If the second pump is turned on we increase the temperature and the total water input if pump2 > 0: self.secondPumpTemperature += delta * self.pumpTemperatureVariationPerSeconds total_input += pump2 * delta else: self.secondPumpTemperature -= delta * self.pumpTemperatureVariationPerSeconds self.secondPumpTemperature = max(20, self.secondPumpTemperature) total_input -= pump2 * delta increment = total_input - total_output with self.simlock: self.current_volume += increment # Ensure the volume stays realistic if self.current_volume >= self.tank_capacity: self.current_volume = self.tank_capacity elif self.current_volume < 0: self.current_volume = 0.0 def t_updater(self): """ This internal method is used by the autoupdating thread to update the simulation every few seconds (specified as the autoupdater interval). """ while self._autoupdating: time.sleep(self._autoupdating_interval) self.update(self._autoupdating_interval) def autoupdater_start(self, interval): """ Starts the autoupdating thread. That is, a thread that will call update every so often. If started, it should eventually be stopped. Otherwise, it will run forever in the background. @param interval Interval between updates, in seconds. @see autoupdater_stop """ self._autoupdating = True self._autoupdating_interval = interval self._thread = threading.Thread(None, self.t_updater) self._thread.start() def autoupdater_stop(self): """ Stops the autoupdating thread. This method is non-blocking. It will signal the thread to stop, but may take a while before it *really* does stop. There is a blocking version of this method. @see autoupdater_join """ self._autoupdating = False def autoupdater_join(self): """ Stops the autoupdating thread, and joins that thread until it really does stop. May block forever if for some reason the thread won't stop, but that should not happen. """ self._autoupdating = False self._thread.join(0) def set_input(self, input_number, input_flow): """ Sets the value for an input in the simulation. @param input_number Number identifying the input. The input should exist. @param input_flow New flow of the input, in liters per second. """ with self.simlock: self.inputs[input_number] = input_flow def set_output(self, output_number, output_flow): """ Sets the value for an output in the simulation. @param output_number Number identifying the output. The output should exist. @param output_flow New flow of the output, in liters per second. """ with self.simlock: self.outputs[output_number] = output_flow def set_inputs(self, inputs): """ Redefines the whole array of inputs. @param inputs Array containing the flow of every input. """ with self.simlock: self.inputs = inputs def set_outputs(self, outputs): """ Redefines the whole array of outputs. @param outputs Array containing the flow of every output. """ with self.simlock: self.outputs = outputs def get_temperatures(self): """ Get temperatures. :return: """ return [self.firstPumpTemperature, self.secondPumpTemperature] def get_water_volume(self): """ Gets the current water volume in liters. It will vary dynamically according to the simulation's state. """ with self.simlock: return self.current_volume def get_water_level(self): """ Gets the current water level, as a number from 0 to 1 (empty to full). It will vary dynamically according to the simulation's state. """ with self.simlock: return 1.0 * self.current_volume / self.tank_capacity # MASKED: get_json_state function (lines 211-239) if __name__ == '__main__': from mock import patch import unittest def fake_sleep(t): # TODO a = [1 for i in range(100000)] # very fast kludge to add minor delay b = len(a) pass class TestWatertankSimulation(unittest.TestCase): def test_nothing(self): pass def _get_state(self, w): js = w.get_json_state([20, 20], [100]) d = json.loads(js) return d @patch("time.sleep", fake_sleep) def test_waterlevel_increase_decrease(self): w = Watertank(1000, [100, 100], [100], 0.5) w.autoupdater_start(1) initial_level = self._get_state(w)["water"] i = 0 while (i < 15): time.sleep(0.5) i += 1 other_level = self._get_state(w)["water"] # Check that the water level did increase self.assertGreater(other_level, initial_level) w.set_outputs([400]) i = 0 while (i < 15): time.sleep(0.5) i += 1 dec_level = self._get_state(w)["water"] # Check that the water level did decrease self.assertGreater(other_level, dec_level) @patch("time.sleep", fake_sleep) def test_temperature_increase_decrease(self): w = Watertank(1000, [100, 100], [100], 0.5) w.autoupdater_start(1) t0 = self._get_state(w)["temperatures"][0] i = 0 while (i < 15): time.sleep(0.5) i += 1 t1 = self._get_state(w)["temperatures"][0] # Check that the water level did increase self.assertGreater(t1, t0) w.set_inputs([0, 0]) i = 0 while (i < 15): time.sleep(0.5) i += 1 t2 = self._get_state(w)["temperatures"][0] # Check that the water level did decrease self.assertGreater(t1, t2) # @patch("time.sleep", fake_sleep) # def test_first(self): # w = Watertank(1000, [100, 100], [100], 0.5) # w.autoupdater_start(1) # # i = 0 # while (i < 15): # print w.tank_capacity, w.get_water_level(), w.get_water_volume(), w.get_json_state([20, 20], [100]) # time.sleep(0.5) # i += 1 # # print "...." # i = 0 # w.set_outputs([100]) # w.set_inputs([10, 10]) # while (i < 30): # print w.tank_capacity, w.get_water_level(), w.get_water_volume(), w.get_json_state([20, 20], [100]) # time.sleep(0.5) # i += 1 # # w.autoupdater_join() # # @patch("time.sleep", fake_sleep) # def test_second(self): # w = Watertank(1000, [100, 100], [100], 0.5) # # i = 0 # while i < 15: # print w.tank_capacity, w.get_water_level(), w.get_water_volume(), w.get_json_state([20, 20], [100]) # w.update(1) # i += 1 # # print "...." # i = 0 # w.set_outputs([100]) # w.set_inputs([10, 10]) # while i < 15: # print w.tank_capacity, w.get_water_level(), w.get_water_volume(), w.get_json_state([20, 20], [100]) # w.update(1) # i += 1 unittest.main()
def get_json_state(self, input_capacities, output_capacities): """ Gets a json-encoded description of the simulation's state. As of now, it takes output and input capacities as arguments because the JSON state is described through relative values. (For instance, first output at 0.3 capacity). @param input_capacities An array containing the maximum capacities of the input. @param output_capacities An array containing the maximum capacities of the output. """ if len(self.inputs) != len(input_capacities): return "{}" inputs = [] for inp, cap in zip(self.inputs, input_capacities): inputs.append(1.0 * inp / cap) outputs = [] for inp, cap in zip(self.outputs, output_capacities): outputs.append(1.0 * inp / cap) state = {"water": self.get_water_level(), "inputs": inputs, "outputs": outputs} # Report the RAW temperature temperatures = [0, 0] temperatures[0] = self.firstPumpTemperature temperatures[1] = self.secondPumpTemperature state["temperatures"] = temperatures return json.dumps(state)
211
239
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright (C) 2005 onwards University of Deusto # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. # # This software consists of contributions made by many individuals, # listed below: # # Author: Luis Rodriguez <luis.rodriguez@opendeusto.es> # import threading import time import json class Watertank(object): """ Watertank Model Output example: {"water": 0.0, "inputs": [0.5, 0.5], "temperatures": [716, 20], "outputs": [1.0]} Changes that have been applied lately to this model (Dec 2015) - There is no longer a separate temperatures mode. Now there is a single model with temperatures. - There are no longer temperature working ranges, temperature warnings, or temperature overloads. The model will not prevent the pumps from working. Instead, the temperature will increase indefinitely. The experiment client can thus deal with temperatures however it wishes (and it can in fact ignore them), with no effect. - As a result of the previous change, temperature is no longer reported as in the [0,1] range according to the range. Now it is reported in raw form. """ def __init__(self, tank_capacity, inputs, outputs, water_level): self.initialize(tank_capacity, inputs, outputs, water_level) def initialize(self, tank_capacity, inputs, outputs, water_level): """ Initializes the simulation with the specified data. @param tank_capacity Capacity of the water tank, in liters. @param Array containing the flow volume of the inputs (such as water pumps), in liters per second. The flow can be modified dynamically, but no inputs can be added. @param Array containing the outputs (such as a water hose or evaporation), in liters per second. The flow can be modified dynamically, but no inputs can be added. @param water_level The starting water level. Value from 0 to 1. """ self.tank_capacity = tank_capacity self.inputs = inputs self.outputs = outputs self.current_volume = water_level * tank_capacity self.firstPumpTemperature = 20 self.secondPumpTemperature = 20 self.firstPumpWorkRange = [20, 200] self.secondPumpWorkRange = [20, 200] self.pumpTemperatureVariationPerSeconds = 6 # Enough for 30 seconds? self.simlock = threading.RLock() self._thread = None self._autoupdating = False self._autoupdating_interval = 1000 def update(self, delta): """ Updates the simulation. Can be done automatically if the autoupdater is used. @param delta Delta in seconds. @see autoupdater_start """ total_output = 0 for out in self.outputs: total_output += out * delta # Calculates how much the pumps are putting in. total_input = 0 # Handle inputs pump1, pump2 = self.inputs # If the first pump is turned on we increase the temperature and the total water input if pump1 > 0: # We multiply by 1.1 so that its temperature raises faster. self.firstPumpTemperature += delta * self.pumpTemperatureVariationPerSeconds * 1.1 total_input += pump1 * delta else: self.firstPumpTemperature -= delta * self.pumpTemperatureVariationPerSeconds self.firstPumpTemperature = max(20, self.firstPumpTemperature) total_input -= pump1 * delta # If the second pump is turned on we increase the temperature and the total water input if pump2 > 0: self.secondPumpTemperature += delta * self.pumpTemperatureVariationPerSeconds total_input += pump2 * delta else: self.secondPumpTemperature -= delta * self.pumpTemperatureVariationPerSeconds self.secondPumpTemperature = max(20, self.secondPumpTemperature) total_input -= pump2 * delta increment = total_input - total_output with self.simlock: self.current_volume += increment # Ensure the volume stays realistic if self.current_volume >= self.tank_capacity: self.current_volume = self.tank_capacity elif self.current_volume < 0: self.current_volume = 0.0 def t_updater(self): """ This internal method is used by the autoupdating thread to update the simulation every few seconds (specified as the autoupdater interval). """ while self._autoupdating: time.sleep(self._autoupdating_interval) self.update(self._autoupdating_interval) def autoupdater_start(self, interval): """ Starts the autoupdating thread. That is, a thread that will call update every so often. If started, it should eventually be stopped. Otherwise, it will run forever in the background. @param interval Interval between updates, in seconds. @see autoupdater_stop """ self._autoupdating = True self._autoupdating_interval = interval self._thread = threading.Thread(None, self.t_updater) self._thread.start() def autoupdater_stop(self): """ Stops the autoupdating thread. This method is non-blocking. It will signal the thread to stop, but may take a while before it *really* does stop. There is a blocking version of this method. @see autoupdater_join """ self._autoupdating = False def autoupdater_join(self): """ Stops the autoupdating thread, and joins that thread until it really does stop. May block forever if for some reason the thread won't stop, but that should not happen. """ self._autoupdating = False self._thread.join(0) def set_input(self, input_number, input_flow): """ Sets the value for an input in the simulation. @param input_number Number identifying the input. The input should exist. @param input_flow New flow of the input, in liters per second. """ with self.simlock: self.inputs[input_number] = input_flow def set_output(self, output_number, output_flow): """ Sets the value for an output in the simulation. @param output_number Number identifying the output. The output should exist. @param output_flow New flow of the output, in liters per second. """ with self.simlock: self.outputs[output_number] = output_flow def set_inputs(self, inputs): """ Redefines the whole array of inputs. @param inputs Array containing the flow of every input. """ with self.simlock: self.inputs = inputs def set_outputs(self, outputs): """ Redefines the whole array of outputs. @param outputs Array containing the flow of every output. """ with self.simlock: self.outputs = outputs def get_temperatures(self): """ Get temperatures. :return: """ return [self.firstPumpTemperature, self.secondPumpTemperature] def get_water_volume(self): """ Gets the current water volume in liters. It will vary dynamically according to the simulation's state. """ with self.simlock: return self.current_volume def get_water_level(self): """ Gets the current water level, as a number from 0 to 1 (empty to full). It will vary dynamically according to the simulation's state. """ with self.simlock: return 1.0 * self.current_volume / self.tank_capacity def get_json_state(self, input_capacities, output_capacities): """ Gets a json-encoded description of the simulation's state. As of now, it takes output and input capacities as arguments because the JSON state is described through relative values. (For instance, first output at 0.3 capacity). @param input_capacities An array containing the maximum capacities of the input. @param output_capacities An array containing the maximum capacities of the output. """ if len(self.inputs) != len(input_capacities): return "{}" inputs = [] for inp, cap in zip(self.inputs, input_capacities): inputs.append(1.0 * inp / cap) outputs = [] for inp, cap in zip(self.outputs, output_capacities): outputs.append(1.0 * inp / cap) state = {"water": self.get_water_level(), "inputs": inputs, "outputs": outputs} # Report the RAW temperature temperatures = [0, 0] temperatures[0] = self.firstPumpTemperature temperatures[1] = self.secondPumpTemperature state["temperatures"] = temperatures return json.dumps(state) if __name__ == '__main__': from mock import patch import unittest def fake_sleep(t): # TODO a = [1 for i in range(100000)] # very fast kludge to add minor delay b = len(a) pass class TestWatertankSimulation(unittest.TestCase): def test_nothing(self): pass def _get_state(self, w): js = w.get_json_state([20, 20], [100]) d = json.loads(js) return d @patch("time.sleep", fake_sleep) def test_waterlevel_increase_decrease(self): w = Watertank(1000, [100, 100], [100], 0.5) w.autoupdater_start(1) initial_level = self._get_state(w)["water"] i = 0 while (i < 15): time.sleep(0.5) i += 1 other_level = self._get_state(w)["water"] # Check that the water level did increase self.assertGreater(other_level, initial_level) w.set_outputs([400]) i = 0 while (i < 15): time.sleep(0.5) i += 1 dec_level = self._get_state(w)["water"] # Check that the water level did decrease self.assertGreater(other_level, dec_level) @patch("time.sleep", fake_sleep) def test_temperature_increase_decrease(self): w = Watertank(1000, [100, 100], [100], 0.5) w.autoupdater_start(1) t0 = self._get_state(w)["temperatures"][0] i = 0 while (i < 15): time.sleep(0.5) i += 1 t1 = self._get_state(w)["temperatures"][0] # Check that the water level did increase self.assertGreater(t1, t0) w.set_inputs([0, 0]) i = 0 while (i < 15): time.sleep(0.5) i += 1 t2 = self._get_state(w)["temperatures"][0] # Check that the water level did decrease self.assertGreater(t1, t2) # @patch("time.sleep", fake_sleep) # def test_first(self): # w = Watertank(1000, [100, 100], [100], 0.5) # w.autoupdater_start(1) # # i = 0 # while (i < 15): # print w.tank_capacity, w.get_water_level(), w.get_water_volume(), w.get_json_state([20, 20], [100]) # time.sleep(0.5) # i += 1 # # print "...." # i = 0 # w.set_outputs([100]) # w.set_inputs([10, 10]) # while (i < 30): # print w.tank_capacity, w.get_water_level(), w.get_water_volume(), w.get_json_state([20, 20], [100]) # time.sleep(0.5) # i += 1 # # w.autoupdater_join() # # @patch("time.sleep", fake_sleep) # def test_second(self): # w = Watertank(1000, [100, 100], [100], 0.5) # # i = 0 # while i < 15: # print w.tank_capacity, w.get_water_level(), w.get_water_volume(), w.get_json_state([20, 20], [100]) # w.update(1) # i += 1 # # print "...." # i = 0 # w.set_outputs([100]) # w.set_inputs([10, 10]) # while i < 15: # print w.tank_capacity, w.get_water_level(), w.get_water_volume(), w.get_json_state([20, 20], [100]) # w.update(1) # i += 1 unittest.main()
do_deactivate_realm
Deactivate this realm. Do NOT deactivate the users -- we need to be able to tell the difference between users that were intentionally deactivated, e.g. by a realm admin, and users who can't currently use Zulip because their realm has been deactivated.
import datetime import itertools import logging import os import platform import time from collections import defaultdict from operator import itemgetter from typing import ( AbstractSet, Any, Callable, Dict, Iterable, List, Mapping, MutableMapping, Optional, Sequence, Set, Tuple, Union, ) import django.db.utils import ujson from django.conf import settings from django.contrib.contenttypes.models import ContentType from django.core.exceptions import ValidationError from django.core.files import File from django.db import IntegrityError, connection, transaction from django.db.models import Count, Exists, F, Max, OuterRef, Q, Sum from django.db.models.query import QuerySet from django.utils.html import escape from django.utils.timezone import now as timezone_now from django.utils.translation import override as override_language from django.utils.translation import ugettext as _ from psycopg2.extras import execute_values from psycopg2.sql import SQL from typing_extensions import TypedDict from analytics.lib.counts import COUNT_STATS, RealmCount, do_increment_logging_stat from analytics.models import StreamCount from confirmation import settings as confirmation_settings from confirmation.models import ( Confirmation, confirmation_url, create_confirmation_link, generate_key, ) from zerver.decorator import statsd_increment from zerver.lib import retention as retention from zerver.lib.addressee import Addressee from zerver.lib.alert_words import ( add_user_alert_words, get_alert_word_automaton, remove_user_alert_words, ) from zerver.lib.avatar import avatar_url, avatar_url_from_dict from zerver.lib.bot_config import ConfigError, get_bot_config, get_bot_configs, set_bot_config from zerver.lib.bulk_create import bulk_create_users from zerver.lib.cache import ( bot_dict_fields, cache_delete, cache_delete_many, cache_set, cache_set_many, cache_with_key, delete_user_profile_caches, display_recipient_cache_key, flush_user_profile, to_dict_cache_key_id, user_profile_by_api_key_cache_key, user_profile_by_email_cache_key, ) from zerver.lib.context_managers import lockfile from zerver.lib.create_user import create_user, get_display_email_address from zerver.lib.email_mirror_helpers import encode_email_address, encode_email_address_helper from zerver.lib.email_notifications import enqueue_welcome_emails from zerver.lib.email_validation import ( email_reserved_for_system_bots_error, get_existing_user_errors, get_realm_email_validator, validate_email_is_valid, ) from zerver.lib.emoji import get_emoji_file_name from zerver.lib.exceptions import ( ErrorCode, JsonableError, MarkdownRenderingException, StreamDoesNotExistError, StreamWithIDDoesNotExistError, ) from zerver.lib.export import get_realm_exports_serialized from zerver.lib.external_accounts import DEFAULT_EXTERNAL_ACCOUNTS from zerver.lib.hotspots import get_next_hotspots from zerver.lib.i18n import get_language_name from zerver.lib.markdown import MentionData, topic_links from zerver.lib.markdown import version as markdown_version from zerver.lib.message import ( MessageDict, access_message, render_markdown, truncate_body, truncate_topic, update_first_visible_message_id, ) from zerver.lib.pysa import mark_sanitized from zerver.lib.queue import queue_json_publish from zerver.lib.realm_icon import realm_icon_url from zerver.lib.realm_logo import get_realm_logo_data from zerver.lib.retention import move_messages_to_archive from zerver.lib.send_email import ( FromAddress, clear_scheduled_emails, clear_scheduled_invitation_emails, send_email, send_email_to_admins, ) from zerver.lib.server_initialization import create_internal_realm, server_initialized from zerver.lib.sessions import delete_user_sessions from zerver.lib.storage import static_path from zerver.lib.stream_recipient import StreamRecipientMap from zerver.lib.stream_subscription import ( get_active_subscriptions_for_stream_id, get_active_subscriptions_for_stream_ids, get_bulk_stream_subscriber_info, get_stream_subscriptions_for_user, get_stream_subscriptions_for_users, get_subscribed_stream_ids_for_user, num_subscribers_for_stream_id, ) from zerver.lib.stream_topic import StreamTopicTarget from zerver.lib.streams import ( access_stream_for_send_message, check_stream_name, create_stream_if_needed, get_default_value_for_history_public_to_subscribers, render_stream_description, send_stream_creation_event, subscribed_to_stream, ) from zerver.lib.timestamp import datetime_to_timestamp, timestamp_to_datetime from zerver.lib.topic import ( LEGACY_PREV_TOPIC, ORIG_TOPIC, TOPIC_LINKS, TOPIC_NAME, filter_by_exact_message_topic, filter_by_topic_name_via_message, save_message_for_edit_use_case, update_messages_for_topic_edit, ) from zerver.lib.topic_mutes import add_topic_mute, get_topic_mutes, remove_topic_mute from zerver.lib.types import ProfileFieldData from zerver.lib.upload import ( claim_attachment, delete_avatar_image, delete_export_tarball, delete_message_image, upload_emoji_image, ) from zerver.lib.user_groups import access_user_group_by_id, create_user_group from zerver.lib.user_status import update_user_status from zerver.lib.users import ( check_bot_name_available, check_full_name, format_user_row, get_api_key, user_profile_to_user_row, ) from zerver.lib.utils import generate_api_key, log_statsd_event from zerver.lib.validator import check_widget_content from zerver.lib.widget import do_widget_post_save_actions from zerver.models import ( MAX_MESSAGE_LENGTH, Attachment, Client, CustomProfileField, CustomProfileFieldValue, DefaultStream, DefaultStreamGroup, EmailChangeStatus, Message, MultiuseInvite, PreregistrationUser, Reaction, Realm, RealmAuditLog, RealmDomain, RealmEmoji, RealmFilter, Recipient, ScheduledEmail, ScheduledMessage, Service, Stream, SubMessage, Subscription, UserActivity, UserActivityInterval, UserGroup, UserGroupMembership, UserHotspot, UserMessage, UserPresence, UserProfile, UserStatus, active_non_guest_user_ids, active_user_ids, custom_profile_fields_for_realm, filter_to_valid_prereg_users, get_active_streams, get_bot_dicts_in_realm, get_bot_services, get_client, get_default_stream_groups, get_huddle_recipient, get_huddle_user_ids, get_old_unclaimed_attachments, get_stream, get_stream_by_id_in_realm, get_stream_cache_key, get_system_bot, get_user_by_delivery_email, get_user_by_id_in_realm_including_cross_realm, get_user_profile_by_id, is_cross_realm_bot_email, query_for_ids, realm_filters_for_realm, stream_name_in_use, validate_attachment_request, ) from zerver.tornado.event_queue import send_event if settings.BILLING_ENABLED: from corporate.lib.stripe import downgrade_now, update_license_ledger_if_needed # This will be used to type annotate parameters in a function if the function # works on both str and unicode in python 2 but in python 3 it only works on str. SizedTextIterable = Union[Sequence[str], AbstractSet[str]] ONBOARDING_TOTAL_MESSAGES = 1000 ONBOARDING_UNREAD_MESSAGES = 20 STREAM_ASSIGNMENT_COLORS = [ "#76ce90", "#fae589", "#a6c7e5", "#e79ab5", "#bfd56f", "#f4ae55", "#b0a5fd", "#addfe5", "#f5ce6e", "#c2726a", "#94c849", "#bd86e5", "#ee7e4a", "#a6dcbf", "#95a5fd", "#53a063", "#9987e1", "#e4523d", "#c2c2c2", "#4f8de4", "#c6a8ad", "#e7cc4d", "#c8bebf", "#a47462"] def subscriber_info(user_id: int) -> Dict[str, Any]: return { 'id': user_id, 'flags': ['read'] } # Store an event in the log for re-importing messages def log_event(event: MutableMapping[str, Any]) -> None: if settings.EVENT_LOG_DIR is None: return if "timestamp" not in event: event["timestamp"] = time.time() if not os.path.exists(settings.EVENT_LOG_DIR): os.mkdir(settings.EVENT_LOG_DIR) template = os.path.join(settings.EVENT_LOG_DIR, '%s.' + platform.node() + timezone_now().strftime('.%Y-%m-%d')) with lockfile(template % ('lock',)): with open(template % ('events',), 'a') as log: log.write(ujson.dumps(event) + '\n') def can_access_stream_user_ids(stream: Stream) -> Set[int]: # return user ids of users who can access the attributes of # a stream, such as its name/description. if stream.is_public(): # For a public stream, this is everyone in the realm # except unsubscribed guest users return public_stream_user_ids(stream) else: # for a private stream, it's subscribers plus realm admins. return private_stream_user_ids( stream.id) | {user.id for user in stream.realm.get_admin_users_and_bots()} def private_stream_user_ids(stream_id: int) -> Set[int]: # TODO: Find similar queries elsewhere and de-duplicate this code. subscriptions = get_active_subscriptions_for_stream_id(stream_id) return {sub['user_profile_id'] for sub in subscriptions.values('user_profile_id')} def public_stream_user_ids(stream: Stream) -> Set[int]: guest_subscriptions = get_active_subscriptions_for_stream_id( stream.id).filter(user_profile__role=UserProfile.ROLE_GUEST) guest_subscriptions = {sub['user_profile_id'] for sub in guest_subscriptions.values('user_profile_id')} return set(active_non_guest_user_ids(stream.realm_id)) | guest_subscriptions def bot_owner_user_ids(user_profile: UserProfile) -> Set[int]: is_private_bot = ( user_profile.default_sending_stream and user_profile.default_sending_stream.invite_only or user_profile.default_events_register_stream and user_profile.default_events_register_stream.invite_only) if is_private_bot: return {user_profile.bot_owner_id} else: users = {user.id for user in user_profile.realm.get_human_admin_users()} users.add(user_profile.bot_owner_id) return users def realm_user_count(realm: Realm) -> int: return UserProfile.objects.filter(realm=realm, is_active=True, is_bot=False).count() def realm_user_count_by_role(realm: Realm) -> Dict[str, Any]: human_counts = {UserProfile.ROLE_REALM_ADMINISTRATOR: 0, UserProfile.ROLE_REALM_OWNER: 0, UserProfile.ROLE_MEMBER: 0, UserProfile.ROLE_GUEST: 0} for value_dict in list(UserProfile.objects.filter( realm=realm, is_bot=False, is_active=True).values('role').annotate(Count('role'))): human_counts[value_dict['role']] = value_dict['role__count'] bot_count = UserProfile.objects.filter(realm=realm, is_bot=True, is_active=True).count() return { RealmAuditLog.ROLE_COUNT_HUMANS: human_counts, RealmAuditLog.ROLE_COUNT_BOTS: bot_count, } def get_signups_stream(realm: Realm) -> Stream: # This one-liner helps us work around a lint rule. return get_stream("signups", realm) def notify_new_user(user_profile: UserProfile) -> None: sender_email = settings.NOTIFICATION_BOT sender = get_system_bot(sender_email) user_count = realm_user_count(user_profile.realm) signup_notifications_stream = user_profile.realm.get_signup_notifications_stream() # Send notification to realm signup notifications stream if it exists # Don't send notification for the first user in a realm if signup_notifications_stream is not None and user_count > 1: with override_language(user_profile.realm.default_language): message = _("{user} just signed up for Zulip. (total: {user_count})").format( user=f"@_**{user_profile.full_name}|{user_profile.id}**", user_count=user_count ) internal_send_stream_message( user_profile.realm, sender, signup_notifications_stream, _("signups"), message ) # We also send a notification to the Zulip administrative realm admin_realm = sender.realm try: # Check whether the stream exists signups_stream = get_signups_stream(admin_realm) with override_language(admin_realm.default_language): # We intentionally use the same strings as above to avoid translation burden. message = _("{user} just signed up for Zulip. (total: {user_count})").format( user=f"{user_profile.full_name} <`{user_profile.email}`>", user_count=user_count ) internal_send_stream_message( admin_realm, sender, signups_stream, user_profile.realm.display_subdomain, message ) except Stream.DoesNotExist: # If the signups stream hasn't been created in the admin # realm, don't auto-create it to send to it; just do nothing. pass def notify_invites_changed(user_profile: UserProfile) -> None: event = dict(type="invites_changed") admin_ids = [user.id for user in user_profile.realm.get_admin_users_and_bots()] send_event(user_profile.realm, event, admin_ids) def add_new_user_history(user_profile: UserProfile, streams: Iterable[Stream]) -> None: """Give you the last ONBOARDING_TOTAL_MESSAGES messages on your public streams, so you have something to look at in your home view once you finish the tutorial. The most recent ONBOARDING_UNREAD_MESSAGES are marked unread. """ one_week_ago = timezone_now() - datetime.timedelta(weeks=1) recipient_ids = [stream.recipient_id for stream in streams if not stream.invite_only] recent_messages = Message.objects.filter(recipient_id__in=recipient_ids, date_sent__gt=one_week_ago).order_by("-id") message_ids_to_use = list(reversed(recent_messages.values_list( 'id', flat=True)[0:ONBOARDING_TOTAL_MESSAGES])) if len(message_ids_to_use) == 0: return # Handle the race condition where a message arrives between # bulk_add_subscriptions above and the Message query just above already_ids = set(UserMessage.objects.filter(message_id__in=message_ids_to_use, user_profile=user_profile).values_list("message_id", flat=True)) # Mark the newest ONBOARDING_UNREAD_MESSAGES as unread. marked_unread = 0 ums_to_create = [] for message_id in reversed(message_ids_to_use): if message_id in already_ids: continue um = UserMessage(user_profile=user_profile, message_id=message_id) if marked_unread < ONBOARDING_UNREAD_MESSAGES: marked_unread += 1 else: um.flags = UserMessage.flags.read ums_to_create.append(um) UserMessage.objects.bulk_create(reversed(ums_to_create)) # Does the processing for a new user account: # * Subscribes to default/invitation streams # * Fills in some recent historical messages # * Notifies other users in realm and Zulip about the signup # * Deactivates PreregistrationUser objects # * subscribe the user to newsletter if newsletter_data is specified def process_new_human_user(user_profile: UserProfile, prereg_user: Optional[PreregistrationUser]=None, newsletter_data: Optional[Mapping[str, str]]=None, default_stream_groups: Sequence[DefaultStreamGroup]=[], realm_creation: bool=False) -> None: mit_beta_user = user_profile.realm.is_zephyr_mirror_realm if prereg_user is not None: prereg_user.status = confirmation_settings.STATUS_ACTIVE prereg_user.save(update_fields=['status']) streams = prereg_user.streams.all() acting_user: Optional[UserProfile] = prereg_user.referred_by else: streams = [] acting_user = None # If the user's invitation didn't explicitly list some streams, we # add the default streams if len(streams) == 0: streams = get_default_subs(user_profile) for default_stream_group in default_stream_groups: default_stream_group_streams = default_stream_group.streams.all() for stream in default_stream_group_streams: if stream not in streams: streams.append(stream) bulk_add_subscriptions(streams, [user_profile], acting_user=acting_user) add_new_user_history(user_profile, streams) # mit_beta_users don't have a referred_by field if not mit_beta_user and prereg_user is not None and prereg_user.referred_by is not None: # This is a cross-realm private message. with override_language(prereg_user.referred_by.default_language): internal_send_private_message( user_profile.realm, get_system_bot(settings.NOTIFICATION_BOT), prereg_user.referred_by, _("{user} accepted your invitation to join Zulip!").format(user=f"{user_profile.full_name} <`{user_profile.email}`>") ) # Mark any other PreregistrationUsers that are STATUS_ACTIVE as # inactive so we can keep track of the PreregistrationUser we # actually used for analytics if prereg_user is not None: PreregistrationUser.objects.filter( email__iexact=user_profile.delivery_email).exclude(id=prereg_user.id)\ .update(status=confirmation_settings.STATUS_REVOKED) if prereg_user.referred_by is not None: notify_invites_changed(user_profile) else: PreregistrationUser.objects.filter(email__iexact=user_profile.delivery_email)\ .update(status=confirmation_settings.STATUS_REVOKED) notify_new_user(user_profile) # Clear any scheduled invitation emails to prevent them # from being sent after the user is created. clear_scheduled_invitation_emails(user_profile.delivery_email) if user_profile.realm.send_welcome_emails: enqueue_welcome_emails(user_profile, realm_creation) # We have an import loop here; it's intentional, because we want # to keep all the onboarding code in zerver/lib/onboarding.py. from zerver.lib.onboarding import send_initial_pms send_initial_pms(user_profile) if newsletter_data is not None: # If the user was created automatically via the API, we may # not want to register them for the newsletter queue_json_publish( "signups", { 'email_address': user_profile.delivery_email, 'user_id': user_profile.id, 'merge_fields': { 'NAME': user_profile.full_name, 'REALM_ID': user_profile.realm_id, 'OPTIN_IP': newsletter_data["IP"], 'OPTIN_TIME': datetime.datetime.isoformat(timezone_now().replace(microsecond=0)), }, }, lambda event: None) def notify_created_user(user_profile: UserProfile) -> None: user_row = user_profile_to_user_row(user_profile) person = format_user_row(user_profile.realm, user_profile, user_row, # Since we don't know what the client # supports at this point in the code, we # just assume client_gravatar and # user_avatar_url_field_optional = False :( client_gravatar=False, user_avatar_url_field_optional=False, # We assume there's no custom profile # field data for a new user; initial # values are expected to be added in a # later event. custom_profile_field_data={}) event: Dict[str, Any] = dict(type="realm_user", op="add", person=person) send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id)) def created_bot_event(user_profile: UserProfile) -> Dict[str, Any]: def stream_name(stream: Optional[Stream]) -> Optional[str]: if not stream: return None return stream.name default_sending_stream_name = stream_name(user_profile.default_sending_stream) default_events_register_stream_name = stream_name(user_profile.default_events_register_stream) bot = dict(email=user_profile.email, user_id=user_profile.id, full_name=user_profile.full_name, bot_type=user_profile.bot_type, is_active=user_profile.is_active, api_key=get_api_key(user_profile), default_sending_stream=default_sending_stream_name, default_events_register_stream=default_events_register_stream_name, default_all_public_streams=user_profile.default_all_public_streams, avatar_url=avatar_url(user_profile), services = get_service_dicts_for_bot(user_profile.id), ) # Set the owner key only when the bot has an owner. # The default bots don't have an owner. So don't # set the owner key while reactivating them. if user_profile.bot_owner is not None: bot['owner_id'] = user_profile.bot_owner.id return dict(type="realm_bot", op="add", bot=bot) def notify_created_bot(user_profile: UserProfile) -> None: event = created_bot_event(user_profile) send_event(user_profile.realm, event, bot_owner_user_ids(user_profile)) def create_users(realm: Realm, name_list: Iterable[Tuple[str, str]], bot_type: Optional[int]=None) -> None: user_set = set() for full_name, email in name_list: user_set.add((email, full_name, True)) bulk_create_users(realm, user_set, bot_type) def do_create_user(email: str, password: Optional[str], realm: Realm, full_name: str, bot_type: Optional[int]=None, role: Optional[int]=None, bot_owner: Optional[UserProfile]=None, tos_version: Optional[str]=None, timezone: str="", avatar_source: str=UserProfile.AVATAR_FROM_GRAVATAR, default_sending_stream: Optional[Stream]=None, default_events_register_stream: Optional[Stream]=None, default_all_public_streams: Optional[bool]=None, prereg_user: Optional[PreregistrationUser]=None, newsletter_data: Optional[Dict[str, str]]=None, default_stream_groups: Sequence[DefaultStreamGroup]=[], source_profile: Optional[UserProfile]=None, realm_creation: bool=False, acting_user: Optional[UserProfile]=None) -> UserProfile: user_profile = create_user(email=email, password=password, realm=realm, full_name=full_name, role=role, bot_type=bot_type, bot_owner=bot_owner, tos_version=tos_version, timezone=timezone, avatar_source=avatar_source, default_sending_stream=default_sending_stream, default_events_register_stream=default_events_register_stream, default_all_public_streams=default_all_public_streams, source_profile=source_profile) event_time = user_profile.date_joined if not acting_user: acting_user = user_profile RealmAuditLog.objects.create( realm=user_profile.realm, acting_user=acting_user, modified_user=user_profile, event_type=RealmAuditLog.USER_CREATED, event_time=event_time, extra_data=ujson.dumps({ RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm), })) do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'], user_profile.is_bot, event_time) if settings.BILLING_ENABLED: update_license_ledger_if_needed(user_profile.realm, event_time) # Note that for bots, the caller will send an additional event # with bot-specific info like services. notify_created_user(user_profile) if bot_type is None: process_new_human_user(user_profile, prereg_user=prereg_user, newsletter_data=newsletter_data, default_stream_groups=default_stream_groups, realm_creation=realm_creation) return user_profile def do_activate_user(user_profile: UserProfile, acting_user: Optional[UserProfile]=None) -> None: user_profile.is_active = True user_profile.is_mirror_dummy = False user_profile.set_unusable_password() user_profile.date_joined = timezone_now() user_profile.tos_version = settings.TOS_VERSION user_profile.save(update_fields=["is_active", "date_joined", "password", "is_mirror_dummy", "tos_version"]) event_time = user_profile.date_joined RealmAuditLog.objects.create( realm=user_profile.realm, modified_user=user_profile, acting_user=acting_user, event_type=RealmAuditLog.USER_ACTIVATED, event_time=event_time, extra_data=ujson.dumps({ RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm), })) do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'], user_profile.is_bot, event_time) if settings.BILLING_ENABLED: update_license_ledger_if_needed(user_profile.realm, event_time) notify_created_user(user_profile) def do_reactivate_user(user_profile: UserProfile, acting_user: Optional[UserProfile]=None) -> None: # Unlike do_activate_user, this is meant for re-activating existing users, # so it doesn't reset their password, etc. user_profile.is_active = True user_profile.save(update_fields=["is_active"]) event_time = timezone_now() RealmAuditLog.objects.create( realm=user_profile.realm, modified_user=user_profile, acting_user=acting_user, event_type=RealmAuditLog.USER_REACTIVATED, event_time=event_time, extra_data=ujson.dumps({ RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm), })) do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'], user_profile.is_bot, event_time) if settings.BILLING_ENABLED: update_license_ledger_if_needed(user_profile.realm, event_time) notify_created_user(user_profile) if user_profile.is_bot: notify_created_bot(user_profile) def active_humans_in_realm(realm: Realm) -> Sequence[UserProfile]: return UserProfile.objects.filter(realm=realm, is_active=True, is_bot=False) def do_set_realm_property(realm: Realm, name: str, value: Any, acting_user: Optional[UserProfile] = None) -> None: """Takes in a realm object, the name of an attribute to update, the value to update and and the user who initiated the update. """ property_type = Realm.property_types[name] assert isinstance(value, property_type), ( f'Cannot update {name}: {value} is not an instance of {property_type}') old_value = getattr(realm, name) setattr(realm, name, value) realm.save(update_fields=[name]) event = dict( type='realm', op='update', property=name, value=value, ) send_event(realm, event, active_user_ids(realm.id)) event_time = timezone_now() RealmAuditLog.objects.create( realm=realm, event_type=RealmAuditLog.REALM_PROPERTY_CHANGED, event_time=event_time, acting_user=acting_user, extra_data=ujson.dumps({ RealmAuditLog.OLD_VALUE: {'property': name, 'value': old_value}, RealmAuditLog.NEW_VALUE: {'property': name, 'value': value} })) if name == "email_address_visibility": if Realm.EMAIL_ADDRESS_VISIBILITY_EVERYONE not in [old_value, value]: # We use real email addresses on UserProfile.email only if # EMAIL_ADDRESS_VISIBILITY_EVERYONE is configured, so # changes between values that will not require changing # that field, so we can save work and return here. return user_profiles = UserProfile.objects.filter(realm=realm, is_bot=False) for user_profile in user_profiles: user_profile.email = get_display_email_address(user_profile, realm) # TODO: Design a bulk event for this or force-reload all clients send_user_email_update_event(user_profile) UserProfile.objects.bulk_update(user_profiles, ['email']) for user_profile in user_profiles: flush_user_profile(sender=UserProfile, instance=user_profile) def do_set_realm_authentication_methods(realm: Realm, authentication_methods: Dict[str, bool], acting_user: Optional[UserProfile]=None) -> None: old_value = realm.authentication_methods_dict() for key, value in list(authentication_methods.items()): index = getattr(realm.authentication_methods, key).number realm.authentication_methods.set_bit(index, int(value)) realm.save(update_fields=['authentication_methods']) updated_value = realm.authentication_methods_dict() RealmAuditLog.objects.create( realm=realm, event_type=RealmAuditLog.REALM_PROPERTY_CHANGED, event_time=timezone_now(), acting_user=acting_user, extra_data=ujson.dumps({ RealmAuditLog.OLD_VALUE: {'property': 'authentication_methods', 'value': old_value}, RealmAuditLog.NEW_VALUE: {'property': 'authentication_methods', 'value': updated_value} })) event = dict( type="realm", op="update_dict", property='default', data=dict(authentication_methods=updated_value), ) send_event(realm, event, active_user_ids(realm.id)) def do_set_realm_message_editing(realm: Realm, allow_message_editing: bool, message_content_edit_limit_seconds: int, allow_community_topic_editing: bool) -> None: realm.allow_message_editing = allow_message_editing realm.message_content_edit_limit_seconds = message_content_edit_limit_seconds realm.allow_community_topic_editing = allow_community_topic_editing realm.save(update_fields=['allow_message_editing', 'allow_community_topic_editing', 'message_content_edit_limit_seconds', ], ) event = dict( type="realm", op="update_dict", property="default", data=dict(allow_message_editing=allow_message_editing, message_content_edit_limit_seconds=message_content_edit_limit_seconds, allow_community_topic_editing=allow_community_topic_editing), ) send_event(realm, event, active_user_ids(realm.id)) def do_set_realm_message_deleting(realm: Realm, message_content_delete_limit_seconds: int) -> None: realm.message_content_delete_limit_seconds = message_content_delete_limit_seconds realm.save(update_fields=['message_content_delete_limit_seconds']) event = dict( type="realm", op="update_dict", property="default", data=dict(message_content_delete_limit_seconds=message_content_delete_limit_seconds), ) send_event(realm, event, active_user_ids(realm.id)) def do_set_realm_notifications_stream(realm: Realm, stream: Optional[Stream], stream_id: int) -> None: realm.notifications_stream = stream realm.save(update_fields=['notifications_stream']) event = dict( type="realm", op="update", property="notifications_stream_id", value=stream_id, ) send_event(realm, event, active_user_ids(realm.id)) def do_set_realm_signup_notifications_stream(realm: Realm, stream: Optional[Stream], stream_id: int) -> None: realm.signup_notifications_stream = stream realm.save(update_fields=['signup_notifications_stream']) event = dict( type="realm", op="update", property="signup_notifications_stream_id", value=stream_id, ) send_event(realm, event, active_user_ids(realm.id)) # MASKED: do_deactivate_realm function (lines 795-827) def do_reactivate_realm(realm: Realm) -> None: realm.deactivated = False realm.save(update_fields=["deactivated"]) event_time = timezone_now() RealmAuditLog.objects.create( realm=realm, event_type=RealmAuditLog.REALM_REACTIVATED, event_time=event_time, extra_data=ujson.dumps({ RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(realm), })) def do_change_realm_subdomain(realm: Realm, new_subdomain: str) -> None: realm.string_id = new_subdomain realm.save(update_fields=["string_id"]) def do_scrub_realm(realm: Realm, acting_user: Optional[UserProfile]=None) -> None: users = UserProfile.objects.filter(realm=realm) for user in users: do_delete_messages_by_sender(user) do_delete_avatar_image(user, acting_user=acting_user) user.full_name = f"Scrubbed {generate_key()[:15]}" scrubbed_email = f"scrubbed-{generate_key()[:15]}@{realm.host}" user.email = scrubbed_email user.delivery_email = scrubbed_email user.save(update_fields=["full_name", "email", "delivery_email"]) do_remove_realm_custom_profile_fields(realm) Attachment.objects.filter(realm=realm).delete() RealmAuditLog.objects.create(realm=realm, event_time=timezone_now(), acting_user=acting_user, event_type=RealmAuditLog.REALM_SCRUBBED) def do_deactivate_user(user_profile: UserProfile, acting_user: Optional[UserProfile]=None, _cascade: bool=True) -> None: if not user_profile.is_active: return if user_profile.realm.is_zephyr_mirror_realm: # nocoverage # For zephyr mirror users, we need to make them a mirror dummy # again; otherwise, other users won't get the correct behavior # when trying to send messages to this person inside Zulip. # # Ideally, we need to also ensure their zephyr mirroring bot # isn't running, but that's a separate issue. user_profile.is_mirror_dummy = True user_profile.is_active = False user_profile.save(update_fields=["is_active"]) delete_user_sessions(user_profile) clear_scheduled_emails([user_profile.id]) event_time = timezone_now() RealmAuditLog.objects.create( realm=user_profile.realm, modified_user=user_profile, acting_user=acting_user, event_type=RealmAuditLog.USER_DEACTIVATED, event_time=event_time, extra_data=ujson.dumps({ RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm), })) do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'], user_profile.is_bot, event_time, increment=-1) if settings.BILLING_ENABLED: update_license_ledger_if_needed(user_profile.realm, event_time) event = dict(type="realm_user", op="remove", person=dict(user_id=user_profile.id, full_name=user_profile.full_name)) send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id)) if user_profile.is_bot: event = dict(type="realm_bot", op="remove", bot=dict(user_id=user_profile.id, full_name=user_profile.full_name)) send_event(user_profile.realm, event, bot_owner_user_ids(user_profile)) if _cascade: bot_profiles = UserProfile.objects.filter(is_bot=True, is_active=True, bot_owner=user_profile) for profile in bot_profiles: do_deactivate_user(profile, acting_user=acting_user, _cascade=False) def do_deactivate_stream(stream: Stream, log: bool=True, acting_user: Optional[UserProfile]=None) -> None: # Get the affected user ids *before* we deactivate everybody. affected_user_ids = can_access_stream_user_ids(stream) get_active_subscriptions_for_stream_id(stream.id).update(active=False) was_invite_only = stream.invite_only stream.deactivated = True stream.invite_only = True # Preserve as much as possible the original stream name while giving it a # special prefix that both indicates that the stream is deactivated and # frees up the original name for reuse. old_name = stream.name new_name = ("!DEACTIVATED:" + old_name)[:Stream.MAX_NAME_LENGTH] for i in range(20): if stream_name_in_use(new_name, stream.realm_id): # This stream has already been deactivated, keep prepending !s until # we have a unique stream name or you've hit a rename limit. new_name = ("!" + new_name)[:Stream.MAX_NAME_LENGTH] else: break # If you don't have a unique name at this point, this will fail later in the # code path. stream.name = new_name[:Stream.MAX_NAME_LENGTH] stream.save(update_fields=['name', 'deactivated', 'invite_only']) # If this is a default stream, remove it, properly sending a # notification to browser clients. if DefaultStream.objects.filter(realm_id=stream.realm_id, stream_id=stream.id).exists(): do_remove_default_stream(stream) default_stream_groups_for_stream = DefaultStreamGroup.objects.filter(streams__id=stream.id) for group in default_stream_groups_for_stream: do_remove_streams_from_default_stream_group(stream.realm, group, [stream]) # Remove the old stream information from remote cache. old_cache_key = get_stream_cache_key(old_name, stream.realm_id) cache_delete(old_cache_key) stream_dict = stream.to_dict() stream_dict.update(dict(name=old_name, invite_only=was_invite_only)) event = dict(type="stream", op="delete", streams=[stream_dict]) send_event(stream.realm, event, affected_user_ids) event_time = timezone_now() RealmAuditLog.objects.create(realm=stream.realm, acting_user=acting_user, modified_stream=stream, event_type=RealmAuditLog.STREAM_DEACTIVATED, event_time=event_time) def send_user_email_update_event(user_profile: UserProfile) -> None: payload = dict(user_id=user_profile.id, new_email=user_profile.email) send_event(user_profile.realm, dict(type='realm_user', op='update', person=payload), active_user_ids(user_profile.realm_id)) def do_change_user_delivery_email(user_profile: UserProfile, new_email: str) -> None: delete_user_profile_caches([user_profile]) user_profile.delivery_email = new_email if user_profile.email_address_is_realm_public(): user_profile.email = new_email user_profile.save(update_fields=["email", "delivery_email"]) else: user_profile.save(update_fields=["delivery_email"]) # We notify just the target user (and eventually org admins, only # when email_address_visibility=EMAIL_ADDRESS_VISIBILITY_ADMINS) # about their new delivery email, since that field is private. payload = dict(user_id=user_profile.id, delivery_email=new_email) event = dict(type='realm_user', op='update', person=payload) send_event(user_profile.realm, event, [user_profile.id]) if user_profile.avatar_source == UserProfile.AVATAR_FROM_GRAVATAR: # If the user is using Gravatar to manage their email address, # their Gravatar just changed, and we need to notify other # clients. notify_avatar_url_change(user_profile) if user_profile.email_address_is_realm_public(): # Additionally, if we're also changing the publicly visible # email, we send a new_email event as well. send_user_email_update_event(user_profile) event_time = timezone_now() RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=user_profile, modified_user=user_profile, event_type=RealmAuditLog.USER_EMAIL_CHANGED, event_time=event_time) def do_start_email_change_process(user_profile: UserProfile, new_email: str) -> None: old_email = user_profile.delivery_email obj = EmailChangeStatus.objects.create(new_email=new_email, old_email=old_email, user_profile=user_profile, realm=user_profile.realm) activation_url = create_confirmation_link(obj, Confirmation.EMAIL_CHANGE) from zerver.context_processors import common_context context = common_context(user_profile) context.update({ 'old_email': old_email, 'new_email': new_email, 'activate_url': activation_url, }) language = user_profile.default_language send_email('zerver/emails/confirm_new_email', to_emails=[new_email], from_name=FromAddress.security_email_from_name(language=language), from_address=FromAddress.tokenized_no_reply_address(), language=language, context=context, realm=user_profile.realm) def compute_irc_user_fullname(email: str) -> str: return email.split("@")[0] + " (IRC)" def compute_jabber_user_fullname(email: str) -> str: return email.split("@")[0] + " (XMPP)" @cache_with_key(lambda realm, email, f: user_profile_by_email_cache_key(email), timeout=3600*24*7) def create_mirror_user_if_needed(realm: Realm, email: str, email_to_fullname: Callable[[str], str]) -> UserProfile: try: return get_user_by_delivery_email(email, realm) except UserProfile.DoesNotExist: try: # Forge a user for this person return create_user( email=email, password=None, realm=realm, full_name=email_to_fullname(email), active=False, is_mirror_dummy=True, ) except IntegrityError: return get_user_by_delivery_email(email, realm) def send_welcome_bot_response(message: MutableMapping[str, Any]) -> None: welcome_bot = get_system_bot(settings.WELCOME_BOT) human_recipient_id = message['message'].sender.recipient_id if Message.objects.filter(sender=welcome_bot, recipient_id=human_recipient_id).count() < 2: content = ( _("Congratulations on your first reply!") + " " ":tada:" "\n" "\n" + _("Feel free to continue using this space to practice your new messaging " "skills. Or, try clicking on some of the stream names to your left!") ) internal_send_private_message( message['realm'], welcome_bot, message['message'].sender, content) def render_incoming_message(message: Message, content: str, user_ids: Set[int], realm: Realm, mention_data: Optional[MentionData]=None, email_gateway: bool=False) -> str: realm_alert_words_automaton = get_alert_word_automaton(realm) try: rendered_content = render_markdown( message=message, content=content, realm=realm, realm_alert_words_automaton = realm_alert_words_automaton, mention_data=mention_data, email_gateway=email_gateway, ) except MarkdownRenderingException: raise JsonableError(_('Unable to render message')) return rendered_content class RecipientInfoResult(TypedDict): active_user_ids: Set[int] push_notify_user_ids: Set[int] stream_email_user_ids: Set[int] stream_push_user_ids: Set[int] wildcard_mention_user_ids: Set[int] um_eligible_user_ids: Set[int] long_term_idle_user_ids: Set[int] default_bot_user_ids: Set[int] service_bot_tuples: List[Tuple[int, int]] def get_recipient_info(recipient: Recipient, sender_id: int, stream_topic: Optional[StreamTopicTarget], possibly_mentioned_user_ids: AbstractSet[int]=set(), possible_wildcard_mention: bool=True) -> RecipientInfoResult: stream_push_user_ids: Set[int] = set() stream_email_user_ids: Set[int] = set() wildcard_mention_user_ids: Set[int] = set() if recipient.type == Recipient.PERSONAL: # The sender and recipient may be the same id, so # de-duplicate using a set. message_to_user_ids = list({recipient.type_id, sender_id}) assert(len(message_to_user_ids) in [1, 2]) elif recipient.type == Recipient.STREAM: # Anybody calling us w/r/t a stream message needs to supply # stream_topic. We may eventually want to have different versions # of this function for different message types. assert(stream_topic is not None) user_ids_muting_topic = stream_topic.user_ids_muting_topic() subscription_rows = stream_topic.get_active_subscriptions().annotate( user_profile_email_notifications=F('user_profile__enable_stream_email_notifications'), user_profile_push_notifications=F('user_profile__enable_stream_push_notifications'), user_profile_wildcard_mentions_notify=F( 'user_profile__wildcard_mentions_notify'), ).values( 'user_profile_id', 'push_notifications', 'email_notifications', 'wildcard_mentions_notify', 'user_profile_email_notifications', 'user_profile_push_notifications', 'user_profile_wildcard_mentions_notify', 'is_muted', ).order_by('user_profile_id') message_to_user_ids = [ row['user_profile_id'] for row in subscription_rows ] def should_send(setting: str, row: Dict[str, Any]) -> bool: # This implements the structure that the UserProfile stream notification settings # are defaults, which can be overridden by the stream-level settings (if those # values are not null). if row['is_muted']: return False if row['user_profile_id'] in user_ids_muting_topic: return False if row[setting] is not None: return row[setting] return row['user_profile_' + setting] stream_push_user_ids = { row['user_profile_id'] for row in subscription_rows # Note: muting a stream overrides stream_push_notify if should_send('push_notifications', row) } stream_email_user_ids = { row['user_profile_id'] for row in subscription_rows # Note: muting a stream overrides stream_email_notify if should_send('email_notifications', row) } if possible_wildcard_mention: # If there's a possible wildcard mention, we need to # determine which users would receive a wildcard mention # notification for this message should the message indeed # contain a wildcard mention. # # We don't have separate values for push/email # notifications here; at this stage, we're just # determining whether this wildcard mention should be # treated as a mention (and follow the user's mention # notification preferences) or a normal message. wildcard_mention_user_ids = { row['user_profile_id'] for row in subscription_rows if should_send("wildcard_mentions_notify", row) } elif recipient.type == Recipient.HUDDLE: message_to_user_ids = get_huddle_user_ids(recipient) else: raise ValueError('Bad recipient type') message_to_user_id_set = set(message_to_user_ids) user_ids = set(message_to_user_id_set) # Important note: Because we haven't rendered markdown yet, we # don't yet know which of these possibly-mentioned users was # actually mentioned in the message (in other words, the # mention syntax might have been in a code block or otherwise # escaped). `get_ids_for` will filter these extra user rows # for our data structures not related to bots user_ids |= possibly_mentioned_user_ids if user_ids: query = UserProfile.objects.filter( is_active=True, ).values( 'id', 'enable_online_push_notifications', 'is_bot', 'bot_type', 'long_term_idle', ) # query_for_ids is fast highly optimized for large queries, and we # need this codepath to be fast (it's part of sending messages) query = query_for_ids( query=query, user_ids=sorted(list(user_ids)), field='id', ) rows = list(query) else: # TODO: We should always have at least one user_id as a recipient # of any message we send. Right now the exception to this # rule is `notify_new_user`, which, at least in a possibly # contrived test scenario, can attempt to send messages # to an inactive bot. When we plug that hole, we can avoid # this `else` clause and just `assert(user_ids)`. # # UPDATE: It's February 2020 (and a couple years after the above # comment was written). We have simplified notify_new_user # so that it should be a little easier to reason about. # There is currently some cleanup to how we handle cross # realm bots that is still under development. Once that # effort is complete, we should be able to address this # to-do. rows = [] def get_ids_for(f: Callable[[Dict[str, Any]], bool]) -> Set[int]: """Only includes users on the explicit message to line""" return { row['id'] for row in rows if f(row) } & message_to_user_id_set def is_service_bot(row: Dict[str, Any]) -> bool: return row['is_bot'] and (row['bot_type'] in UserProfile.SERVICE_BOT_TYPES) active_user_ids = get_ids_for(lambda r: True) push_notify_user_ids = get_ids_for( lambda r: r['enable_online_push_notifications'], ) # Service bots don't get UserMessage rows. um_eligible_user_ids = get_ids_for( lambda r: not is_service_bot(r), ) long_term_idle_user_ids = get_ids_for( lambda r: r['long_term_idle'], ) # These two bot data structures need to filter from the full set # of users who either are receiving the message or might have been # mentioned in it, and so can't use get_ids_for. # # Further in the do_send_messages code path, once # `mentioned_user_ids` has been computed via markdown, we'll filter # these data structures for just those users who are either a # direct recipient or were mentioned; for now, we're just making # sure we have the data we need for that without extra database # queries. default_bot_user_ids = { row['id'] for row in rows if row['is_bot'] and row['bot_type'] == UserProfile.DEFAULT_BOT } service_bot_tuples = [ (row['id'], row['bot_type']) for row in rows if is_service_bot(row) ] info: RecipientInfoResult = dict( active_user_ids=active_user_ids, push_notify_user_ids=push_notify_user_ids, stream_push_user_ids=stream_push_user_ids, stream_email_user_ids=stream_email_user_ids, wildcard_mention_user_ids=wildcard_mention_user_ids, um_eligible_user_ids=um_eligible_user_ids, long_term_idle_user_ids=long_term_idle_user_ids, default_bot_user_ids=default_bot_user_ids, service_bot_tuples=service_bot_tuples, ) return info def get_service_bot_events(sender: UserProfile, service_bot_tuples: List[Tuple[int, int]], mentioned_user_ids: Set[int], active_user_ids: Set[int], recipient_type: int) -> Dict[str, List[Dict[str, Any]]]: event_dict: Dict[str, List[Dict[str, Any]]] = defaultdict(list) # Avoid infinite loops by preventing messages sent by bots from generating # Service events. if sender.is_bot: return event_dict def maybe_add_event(user_profile_id: int, bot_type: int) -> None: if bot_type == UserProfile.OUTGOING_WEBHOOK_BOT: queue_name = 'outgoing_webhooks' elif bot_type == UserProfile.EMBEDDED_BOT: queue_name = 'embedded_bots' else: logging.error( 'Unexpected bot_type for Service bot id=%s: %s', user_profile_id, bot_type, ) return is_stream = (recipient_type == Recipient.STREAM) # Important note: service_bot_tuples may contain service bots # who were not actually mentioned in the message (e.g. if # mention syntax for that bot appeared in a code block). # Thus, it is important to filter any users who aren't part of # either mentioned_user_ids (the actual mentioned users) or # active_user_ids (the actual recipients). # # So even though this is implied by the logic below, we filter # these not-actually-mentioned users here, to help keep this # function future-proof. if user_profile_id not in mentioned_user_ids and user_profile_id not in active_user_ids: return # Mention triggers, for stream messages if is_stream and user_profile_id in mentioned_user_ids: trigger = 'mention' # PM triggers for personal and huddle messages elif (not is_stream) and (user_profile_id in active_user_ids): trigger = 'private_message' else: return event_dict[queue_name].append({ 'trigger': trigger, 'user_profile_id': user_profile_id, }) for user_profile_id, bot_type in service_bot_tuples: maybe_add_event( user_profile_id=user_profile_id, bot_type=bot_type, ) return event_dict def do_schedule_messages(messages: Sequence[Mapping[str, Any]]) -> List[int]: scheduled_messages: List[ScheduledMessage] = [] for message in messages: scheduled_message = ScheduledMessage() scheduled_message.sender = message['message'].sender scheduled_message.recipient = message['message'].recipient topic_name = message['message'].topic_name() scheduled_message.set_topic_name(topic_name=topic_name) scheduled_message.content = message['message'].content scheduled_message.sending_client = message['message'].sending_client scheduled_message.stream = message['stream'] scheduled_message.realm = message['realm'] scheduled_message.scheduled_timestamp = message['deliver_at'] if message['delivery_type'] == 'send_later': scheduled_message.delivery_type = ScheduledMessage.SEND_LATER elif message['delivery_type'] == 'remind': scheduled_message.delivery_type = ScheduledMessage.REMIND scheduled_messages.append(scheduled_message) ScheduledMessage.objects.bulk_create(scheduled_messages) return [scheduled_message.id for scheduled_message in scheduled_messages] def do_send_messages(messages_maybe_none: Sequence[Optional[MutableMapping[str, Any]]], email_gateway: bool=False, mark_as_read: Sequence[int]=[]) -> List[int]: """See https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html for high-level documentation on this subsystem. """ # Filter out messages which didn't pass internal_prep_message properly messages = [message for message in messages_maybe_none if message is not None] # Filter out zephyr mirror anomalies where the message was already sent already_sent_ids: List[int] = [] new_messages: List[MutableMapping[str, Any]] = [] for message in messages: if isinstance(message['message'], int): already_sent_ids.append(message['message']) else: new_messages.append(message) messages = new_messages links_for_embed: Set[str] = set() # For consistency, changes to the default values for these gets should also be applied # to the default args in do_send_message for message in messages: message['rendered_content'] = message.get('rendered_content', None) message['stream'] = message.get('stream', None) message['local_id'] = message.get('local_id', None) message['sender_queue_id'] = message.get('sender_queue_id', None) message['realm'] = message.get('realm', message['message'].sender.realm) mention_data = MentionData( realm_id=message['realm'].id, content=message['message'].content, ) message['mention_data'] = mention_data if message['message'].is_stream_message(): stream_id = message['message'].recipient.type_id stream_topic: Optional[StreamTopicTarget] = StreamTopicTarget( stream_id=stream_id, topic_name=message['message'].topic_name(), ) else: stream_topic = None info = get_recipient_info( recipient=message['message'].recipient, sender_id=message['message'].sender_id, stream_topic=stream_topic, possibly_mentioned_user_ids=mention_data.get_user_ids(), possible_wildcard_mention=mention_data.message_has_wildcards(), ) message['active_user_ids'] = info['active_user_ids'] message['push_notify_user_ids'] = info['push_notify_user_ids'] message['stream_push_user_ids'] = info['stream_push_user_ids'] message['stream_email_user_ids'] = info['stream_email_user_ids'] message['um_eligible_user_ids'] = info['um_eligible_user_ids'] message['long_term_idle_user_ids'] = info['long_term_idle_user_ids'] message['default_bot_user_ids'] = info['default_bot_user_ids'] message['service_bot_tuples'] = info['service_bot_tuples'] # Render our messages. assert message['message'].rendered_content is None rendered_content = render_incoming_message( message['message'], message['message'].content, message['active_user_ids'], message['realm'], mention_data=message['mention_data'], email_gateway=email_gateway, ) message['message'].rendered_content = rendered_content message['message'].rendered_content_version = markdown_version links_for_embed |= message['message'].links_for_preview # Add members of the mentioned user groups into `mentions_user_ids`. for group_id in message['message'].mentions_user_group_ids: members = message['mention_data'].get_group_members(group_id) message['message'].mentions_user_ids.update(members) # Only send data to Tornado about wildcard mentions if message # rendering determined the message had an actual wildcard # mention in it (and not e.g. wildcard mention syntax inside a # code block). if message['message'].mentions_wildcard: message['wildcard_mention_user_ids'] = info['wildcard_mention_user_ids'] else: message['wildcard_mention_user_ids'] = [] ''' Once we have the actual list of mentioned ids from message rendering, we can patch in "default bots" (aka normal bots) who were directly mentioned in this message as eligible to get UserMessage rows. ''' mentioned_user_ids = message['message'].mentions_user_ids default_bot_user_ids = message['default_bot_user_ids'] mentioned_bot_user_ids = default_bot_user_ids & mentioned_user_ids message['um_eligible_user_ids'] |= mentioned_bot_user_ids # Save the message receipts in the database user_message_flags: Dict[int, Dict[int, List[str]]] = defaultdict(dict) with transaction.atomic(): Message.objects.bulk_create([message['message'] for message in messages]) # Claim attachments in message for message in messages: if do_claim_attachments(message['message'], message['message'].potential_attachment_path_ids): message['message'].has_attachment = True message['message'].save(update_fields=['has_attachment']) ums: List[UserMessageLite] = [] for message in messages: # Service bots (outgoing webhook bots and embedded bots) don't store UserMessage rows; # they will be processed later. mentioned_user_ids = message['message'].mentions_user_ids user_messages = create_user_messages( message=message['message'], um_eligible_user_ids=message['um_eligible_user_ids'], long_term_idle_user_ids=message['long_term_idle_user_ids'], stream_push_user_ids = message['stream_push_user_ids'], stream_email_user_ids = message['stream_email_user_ids'], mentioned_user_ids=mentioned_user_ids, mark_as_read=mark_as_read, ) for um in user_messages: user_message_flags[message['message'].id][um.user_profile_id] = um.flags_list() ums.extend(user_messages) message['message'].service_queue_events = get_service_bot_events( sender=message['message'].sender, service_bot_tuples=message['service_bot_tuples'], mentioned_user_ids=mentioned_user_ids, active_user_ids=message['active_user_ids'], recipient_type=message['message'].recipient.type, ) bulk_insert_ums(ums) for message in messages: do_widget_post_save_actions(message) for message in messages: realm_id: Optional[int] = None if message['message'].is_stream_message(): if message['stream'] is None: stream_id = message['message'].recipient.type_id message['stream'] = Stream.objects.select_related().get(id=stream_id) assert message['stream'] is not None # assert needed because stubs for django are missing realm_id = message['stream'].realm_id # Deliver events to the real-time push system, as well as # enqueuing any additional processing triggered by the message. wide_message_dict = MessageDict.wide_dict(message['message'], realm_id) user_flags = user_message_flags.get(message['message'].id, {}) sender = message['message'].sender message_type = wide_message_dict['type'] presence_idle_user_ids = get_active_presence_idle_user_ids( realm=sender.realm, sender_id=sender.id, message_type=message_type, active_user_ids=message['active_user_ids'], user_flags=user_flags, ) event = dict( type='message', message=message['message'].id, message_dict=wide_message_dict, presence_idle_user_ids=presence_idle_user_ids, ) ''' TODO: We may want to limit user_ids to only those users who have UserMessage rows, if only for minor performance reasons. For now we queue events for all subscribers/sendees of the message, since downstream code may still do notifications that don't require UserMessage rows. Our automated tests have gotten better on this codepath, but we may have coverage gaps, so we should be careful about changing the next line. ''' user_ids = message['active_user_ids'] | set(user_flags.keys()) users = [ dict( id=user_id, flags=user_flags.get(user_id, []), always_push_notify=(user_id in message['push_notify_user_ids']), stream_push_notify=(user_id in message['stream_push_user_ids']), stream_email_notify=(user_id in message['stream_email_user_ids']), wildcard_mention_notify=(user_id in message['wildcard_mention_user_ids']), ) for user_id in user_ids ] if message['message'].is_stream_message(): # Note: This is where authorization for single-stream # get_updates happens! We only attach stream data to the # notify new_message request if it's a public stream, # ensuring that in the tornado server, non-public stream # messages are only associated to their subscribed users. assert message['stream'] is not None # assert needed because stubs for django are missing if message['stream'].is_public(): event['realm_id'] = message['stream'].realm_id event['stream_name'] = message['stream'].name if message['stream'].invite_only: event['invite_only'] = True if message['stream'].first_message_id is None: message['stream'].first_message_id = message['message'].id message['stream'].save(update_fields=["first_message_id"]) if message['local_id'] is not None: event['local_id'] = message['local_id'] if message['sender_queue_id'] is not None: event['sender_queue_id'] = message['sender_queue_id'] send_event(message['realm'], event, users) if links_for_embed: event_data = { 'message_id': message['message'].id, 'message_content': message['message'].content, 'message_realm_id': message['realm'].id, 'urls': links_for_embed} queue_json_publish('embed_links', event_data) if message['message'].recipient.type == Recipient.PERSONAL: welcome_bot_id = get_system_bot(settings.WELCOME_BOT).id if (welcome_bot_id in message['active_user_ids'] and welcome_bot_id != message['message'].sender_id): send_welcome_bot_response(message) for queue_name, events in message['message'].service_queue_events.items(): for event in events: queue_json_publish( queue_name, { "message": wide_message_dict, "trigger": event['trigger'], "user_profile_id": event["user_profile_id"], }, ) # Note that this does not preserve the order of message ids # returned. In practice, this shouldn't matter, as we only # mirror single zephyr messages at a time and don't otherwise # intermingle sending zephyr messages with other messages. return already_sent_ids + [message['message'].id for message in messages] class UserMessageLite: ''' The Django ORM is too slow for bulk operations. This class is optimized for the simple use case of inserting a bunch of rows into zerver_usermessage. ''' def __init__(self, user_profile_id: int, message_id: int, flags: int) -> None: self.user_profile_id = user_profile_id self.message_id = message_id self.flags = flags def flags_list(self) -> List[str]: return UserMessage.flags_list_for_flags(self.flags) def create_user_messages(message: Message, um_eligible_user_ids: AbstractSet[int], long_term_idle_user_ids: AbstractSet[int], stream_push_user_ids: AbstractSet[int], stream_email_user_ids: AbstractSet[int], mentioned_user_ids: AbstractSet[int], mark_as_read: Sequence[int] = []) -> List[UserMessageLite]: ums_to_create = [] for user_profile_id in um_eligible_user_ids: um = UserMessageLite( user_profile_id=user_profile_id, message_id=message.id, flags=0, ) ums_to_create.append(um) # These properties on the Message are set via # render_markdown by code in the markdown inline patterns wildcard = message.mentions_wildcard ids_with_alert_words = message.user_ids_with_alert_words for um in ums_to_create: if (um.user_profile_id == message.sender.id and message.sent_by_human()) or \ um.user_profile_id in mark_as_read: um.flags |= UserMessage.flags.read if wildcard: um.flags |= UserMessage.flags.wildcard_mentioned if um.user_profile_id in mentioned_user_ids: um.flags |= UserMessage.flags.mentioned if um.user_profile_id in ids_with_alert_words: um.flags |= UserMessage.flags.has_alert_word if message.recipient.type in [Recipient.HUDDLE, Recipient.PERSONAL]: um.flags |= UserMessage.flags.is_private # For long_term_idle (aka soft-deactivated) users, we are allowed # to optimize by lazily not creating UserMessage rows that would # have the default 0 flag set (since the soft-reactivation logic # knows how to create those when the user comes back). We need to # create the UserMessage rows for these long_term_idle users # non-lazily in a few cases: # # * There are nonzero flags (e.g. the user was mentioned), since # that case is rare and this saves a lot of complexity in # soft-reactivation. # # * If the user is going to be notified (e.g. they get push/email # notifications for every message on a stream), since in that # case the notifications code will call `access_message` on the # message to re-verify permissions, and for private streams, # will get an error if the UserMessage row doesn't exist yet. # # See https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html#soft-deactivation # for details on this system. user_messages = [] for um in ums_to_create: if (um.user_profile_id in long_term_idle_user_ids and um.user_profile_id not in stream_push_user_ids and um.user_profile_id not in stream_email_user_ids and message.is_stream_message() and int(um.flags) == 0): continue user_messages.append(um) return user_messages def bulk_insert_ums(ums: List[UserMessageLite]) -> None: ''' Doing bulk inserts this way is much faster than using Django, since we don't have any ORM overhead. Profiling with 1000 users shows a speedup of 0.436 -> 0.027 seconds, so we're talking about a 15x speedup. ''' if not ums: return vals = [ (um.user_profile_id, um.message_id, um.flags) for um in ums ] query = SQL(''' INSERT into zerver_usermessage (user_profile_id, message_id, flags) VALUES %s ''') with connection.cursor() as cursor: execute_values(cursor.cursor, query, vals) def do_add_submessage(realm: Realm, sender_id: int, message_id: int, msg_type: str, content: str, ) -> None: submessage = SubMessage( sender_id=sender_id, message_id=message_id, msg_type=msg_type, content=content, ) submessage.save() event = dict( type="submessage", msg_type=msg_type, message_id=message_id, submessage_id=submessage.id, sender_id=sender_id, content=content, ) ums = UserMessage.objects.filter(message_id=message_id) target_user_ids = [um.user_profile_id for um in ums] send_event(realm, event, target_user_ids) def notify_reaction_update(user_profile: UserProfile, message: Message, reaction: Reaction, op: str) -> None: user_dict = {'user_id': user_profile.id, 'email': user_profile.email, 'full_name': user_profile.full_name} event: Dict[str, Any] = { 'type': 'reaction', 'op': op, 'user_id': user_profile.id, # TODO: We plan to remove this redundant user_dict object once # clients are updated to support accessing use user_id. See # https://github.com/zulip/zulip/pull/14711 for details. 'user': user_dict, 'message_id': message.id, 'emoji_name': reaction.emoji_name, 'emoji_code': reaction.emoji_code, 'reaction_type': reaction.reaction_type, } # Update the cached message since new reaction is added. update_to_dict_cache([message]) # Recipients for message update events, including reactions, are # everyone who got the original message. This means reactions # won't live-update in preview narrows, but it's the right # performance tradeoff, since otherwise we'd need to send all # reactions to public stream messages to every browser for every # client in the organization, which doesn't scale. # # However, to ensure that reactions do live-update for any user # who has actually participated in reacting to a message, we add a # "historical" UserMessage row for any user who reacts to message, # subscribing them to future notifications. ums = UserMessage.objects.filter(message=message.id) send_event(user_profile.realm, event, [um.user_profile_id for um in ums]) def do_add_reaction(user_profile: UserProfile, message: Message, emoji_name: str, emoji_code: str, reaction_type: str) -> None: reaction = Reaction(user_profile=user_profile, message=message, emoji_name=emoji_name, emoji_code=emoji_code, reaction_type=reaction_type) try: reaction.save() except django.db.utils.IntegrityError: # nocoverage # This can happen when a race results in the check in views # code not catching an attempt to double-add a reaction, or # perhaps if the emoji_name/emoji_code mapping is busted. raise JsonableError(_("Reaction already exists.")) notify_reaction_update(user_profile, message, reaction, "add") def do_remove_reaction(user_profile: UserProfile, message: Message, emoji_code: str, reaction_type: str) -> None: reaction = Reaction.objects.filter(user_profile=user_profile, message=message, emoji_code=emoji_code, reaction_type=reaction_type).get() reaction.delete() notify_reaction_update(user_profile, message, reaction, "remove") def do_send_typing_notification( realm: Realm, sender: UserProfile, recipient_user_profiles: List[UserProfile], operator: str) -> None: sender_dict = {'user_id': sender.id, 'email': sender.email} # Include a list of recipients in the event body to help identify where the typing is happening recipient_dicts = [{'user_id': profile.id, 'email': profile.email} for profile in recipient_user_profiles] event = dict( type='typing', op=operator, sender=sender_dict, recipients=recipient_dicts, ) # Only deliver the notification to active user recipients user_ids_to_notify = [ user.id for user in recipient_user_profiles if user.is_active ] send_event(realm, event, user_ids_to_notify) # check_send_typing_notification: # Checks the typing notification and sends it def check_send_typing_notification(sender: UserProfile, user_ids: List[int], operator: str) -> None: realm = sender.realm if len(user_ids) == 0: raise JsonableError(_('Missing parameter: \'to\' (recipient)')) elif operator not in ('start', 'stop'): raise JsonableError(_('Invalid \'op\' value (should be start or stop)')) ''' The next chunk of code will go away when we upgrade old mobile users away from versions of mobile that send emails. For the small number of very outdated mobile clients, we do double work here in terms of fetching users, but this structure reduces lots of other unnecessary duplicated code and will make it convenient to mostly delete code when we desupport old versions of the app.''' if sender.id not in user_ids: user_ids.append(sender.id) # If any of the user_ids being sent in are invalid, we will # just reject the whole request, since a partial list of user_ids # can create confusion related to huddles. Plus it's a good # sign that a client is confused (or possibly even malicious) if # we get bad user_ids. user_profiles = [] for user_id in user_ids: try: # We include cross-bot realms as possible recipients, # so that clients can know which huddle conversation # is relevant here. user_profile = get_user_by_id_in_realm_including_cross_realm( user_id, sender.realm) except UserProfile.DoesNotExist: raise JsonableError(_("Invalid user ID {}").format(user_id)) user_profiles.append(user_profile) do_send_typing_notification( realm=realm, sender=sender, recipient_user_profiles=user_profiles, operator=operator, ) def ensure_stream(realm: Realm, stream_name: str, invite_only: bool=False, stream_description: str="", acting_user: Optional[UserProfile]=None) -> Stream: return create_stream_if_needed(realm, stream_name, invite_only=invite_only, stream_description=stream_description, acting_user=acting_user)[0] def get_recipient_from_user_profiles(recipient_profiles: Sequence[UserProfile], forwarded_mirror_message: bool, forwarder_user_profile: Optional[UserProfile], sender: UserProfile) -> Recipient: # Avoid mutating the passed in list of recipient_profiles. recipient_profiles_map = {} for user_profile in recipient_profiles: recipient_profiles_map[user_profile.id] = user_profile if forwarded_mirror_message: # In our mirroring integrations with some third-party # protocols, bots subscribed to the third-party protocol # forward to Zulip messages that they received in the # third-party service. The permissions model for that # forwarding is that users can only submit to Zulip private # messages they personally received, and here we do the check # for whether forwarder_user_profile is among the private # message recipients of the message. assert forwarder_user_profile is not None if forwarder_user_profile.id not in recipient_profiles_map: raise ValidationError(_("User not authorized for this query")) # If the private message is just between the sender and # another person, force it to be a personal internally if (len(recipient_profiles_map) == 2 and sender.id in recipient_profiles_map): del recipient_profiles_map[sender.id] assert len(recipient_profiles_map) != 0 if len(recipient_profiles_map) == 1: user_profile = list(recipient_profiles_map.values())[0] return user_profile.recipient # Otherwise, we need a huddle. Make sure the sender is included in huddle messages recipient_profiles_map[sender.id] = sender user_ids: Set[int] = {user_id for user_id in recipient_profiles_map} return get_huddle_recipient(user_ids) def validate_recipient_user_profiles(user_profiles: Sequence[UserProfile], sender: UserProfile, allow_deactivated: bool=False) -> Sequence[UserProfile]: recipient_profiles_map: Dict[int, UserProfile] = {} # We exempt cross-realm bots from the check that all the recipients # are in the same realm. realms = set() if not is_cross_realm_bot_email(sender.email): realms.add(sender.realm_id) for user_profile in user_profiles: if (not user_profile.is_active and not user_profile.is_mirror_dummy and not allow_deactivated) or user_profile.realm.deactivated: raise ValidationError(_("'{email}' is no longer using Zulip.").format(email=user_profile.email)) recipient_profiles_map[user_profile.id] = user_profile if not is_cross_realm_bot_email(user_profile.email): realms.add(user_profile.realm_id) if len(realms) > 1: raise ValidationError(_("You can't send private messages outside of your organization.")) return list(recipient_profiles_map.values()) def recipient_for_user_profiles(user_profiles: Sequence[UserProfile], forwarded_mirror_message: bool, forwarder_user_profile: Optional[UserProfile], sender: UserProfile, allow_deactivated: bool=False) -> Recipient: recipient_profiles = validate_recipient_user_profiles(user_profiles, sender, allow_deactivated=allow_deactivated) return get_recipient_from_user_profiles(recipient_profiles, forwarded_mirror_message, forwarder_user_profile, sender) def already_sent_mirrored_message_id(message: Message) -> Optional[int]: if message.recipient.type == Recipient.HUDDLE: # For huddle messages, we use a 10-second window because the # timestamps aren't guaranteed to actually match between two # copies of the same message. time_window = datetime.timedelta(seconds=10) else: time_window = datetime.timedelta(seconds=0) query = Message.objects.filter( sender=message.sender, recipient=message.recipient, content=message.content, sending_client=message.sending_client, date_sent__gte=message.date_sent - time_window, date_sent__lte=message.date_sent + time_window) messages = filter_by_exact_message_topic( query=query, message=message, ) if messages.exists(): return messages[0].id return None def extract_stream_indicator(s: str) -> Union[str, int]: # Users can pass stream name as either an id or a name, # and if they choose to pass a name, they may JSON encode # it for legacy reasons. try: data = ujson.loads(s) except (ValueError, TypeError): # If there was no JSON encoding, then we just # have a raw stream name. return s # We should stop supporting this odd use case # once we improve our documentation. if isinstance(data, list): if len(data) != 1: # nocoverage raise JsonableError(_("Expected exactly one stream")) data = data[0] if isinstance(data, str): # We had a JSON-encoded stream name. return data if isinstance(data, int): # We had a stream id. return data raise JsonableError(_("Invalid data type for stream")) def extract_private_recipients(s: str) -> Union[List[str], List[int]]: # We try to accept multiple incoming formats for recipients. # See test_extract_recipients() for examples of what we allow. try: data = ujson.loads(s) except (ValueError, TypeError): data = s if isinstance(data, str): data = data.split(',') if not isinstance(data, list): raise JsonableError(_("Invalid data type for recipients")) if not data: # We don't complain about empty message recipients here return data if isinstance(data[0], str): return get_validated_emails(data) if not isinstance(data[0], int): raise JsonableError(_("Invalid data type for recipients")) return get_validated_user_ids(data) def get_validated_user_ids(user_ids: Iterable[int]) -> List[int]: for user_id in user_ids: if not isinstance(user_id, int): raise JsonableError(_("Recipient lists may contain emails or user IDs, but not both.")) return list(set(user_ids)) def get_validated_emails(emails: Iterable[str]) -> List[str]: for email in emails: if not isinstance(email, str): raise JsonableError(_("Recipient lists may contain emails or user IDs, but not both.")) return list(filter(bool, {email.strip() for email in emails})) def check_send_stream_message(sender: UserProfile, client: Client, stream_name: str, topic: str, body: str, realm: Optional[Realm]=None) -> int: addressee = Addressee.for_stream_name(stream_name, topic) message = check_message(sender, client, addressee, body, realm) return do_send_messages([message])[0] def check_send_private_message(sender: UserProfile, client: Client, receiving_user: UserProfile, body: str) -> int: addressee = Addressee.for_user_profile(receiving_user) message = check_message(sender, client, addressee, body) return do_send_messages([message])[0] # check_send_message: # Returns the id of the sent message. Has same argspec as check_message. def check_send_message(sender: UserProfile, client: Client, message_type_name: str, message_to: Union[Sequence[int], Sequence[str]], topic_name: Optional[str], message_content: str, realm: Optional[Realm]=None, forged: bool=False, forged_timestamp: Optional[float]=None, forwarder_user_profile: Optional[UserProfile]=None, local_id: Optional[str]=None, sender_queue_id: Optional[str]=None, widget_content: Optional[str]=None) -> int: addressee = Addressee.legacy_build( sender, message_type_name, message_to, topic_name) message = check_message(sender, client, addressee, message_content, realm, forged, forged_timestamp, forwarder_user_profile, local_id, sender_queue_id, widget_content) return do_send_messages([message])[0] def check_schedule_message(sender: UserProfile, client: Client, message_type_name: str, message_to: Union[Sequence[str], Sequence[int]], topic_name: Optional[str], message_content: str, delivery_type: str, deliver_at: datetime.datetime, realm: Optional[Realm]=None, forwarder_user_profile: Optional[UserProfile]=None, ) -> int: addressee = Addressee.legacy_build( sender, message_type_name, message_to, topic_name) message = check_message(sender, client, addressee, message_content, realm=realm, forwarder_user_profile=forwarder_user_profile) message['deliver_at'] = deliver_at message['delivery_type'] = delivery_type recipient = message['message'].recipient if (delivery_type == 'remind' and (recipient.type != Recipient.STREAM and recipient.type_id != sender.id)): raise JsonableError(_("Reminders can only be set for streams.")) return do_schedule_messages([message])[0] def check_default_stream_group_name(group_name: str) -> None: if group_name.strip() == "": raise JsonableError(_("Invalid default stream group name '{}'").format(group_name)) if len(group_name) > DefaultStreamGroup.MAX_NAME_LENGTH: raise JsonableError(_("Default stream group name too long (limit: {} characters)").format( DefaultStreamGroup.MAX_NAME_LENGTH, )) for i in group_name: if ord(i) == 0: raise JsonableError(_("Default stream group name '{}' contains NULL (0x00) characters.").format( group_name, )) def send_rate_limited_pm_notification_to_bot_owner(sender: UserProfile, realm: Realm, content: str) -> None: """ Sends a PM error notification to a bot's owner if one hasn't already been sent in the last 5 minutes. """ if sender.realm.is_zephyr_mirror_realm or sender.realm.deactivated: return if not sender.is_bot or sender.bot_owner is None: return # Don't send these notifications for cross-realm bot messages # (e.g. from EMAIL_GATEWAY_BOT) since the owner for # EMAIL_GATEWAY_BOT is probably the server administrator, not # the owner of the bot who could potentially fix the problem. if sender.realm != realm: return # We warn the user once every 5 minutes to avoid a flood of # PMs on a misconfigured integration, re-using the # UserProfile.last_reminder field, which is not used for bots. last_reminder = sender.last_reminder waitperiod = datetime.timedelta(minutes=UserProfile.BOT_OWNER_STREAM_ALERT_WAITPERIOD) if last_reminder and timezone_now() - last_reminder <= waitperiod: return internal_send_private_message(realm, get_system_bot(settings.NOTIFICATION_BOT), sender.bot_owner, content) sender.last_reminder = timezone_now() sender.save(update_fields=['last_reminder']) def send_pm_if_empty_stream(stream: Optional[Stream], realm: Realm, sender: UserProfile, stream_name: Optional[str]=None, stream_id: Optional[int]=None) -> None: """If a bot sends a message to a stream that doesn't exist or has no subscribers, sends a notification to the bot owner (if not a cross-realm bot) so that the owner can correct the issue.""" if not sender.is_bot or sender.bot_owner is None: return arg_dict = { "bot_identity": f"`{sender.delivery_email}`", "stream_id": stream_id, "stream_name": f"#**{stream_name}**", "new_stream_link": "#streams/new", } if sender.bot_owner is not None: with override_language(sender.bot_owner.default_language): if stream is None: if stream_id is not None: content = _("Your bot {bot_identity} tried to send a message to stream ID " "{stream_id}, but there is no stream with that ID.").format(**arg_dict) else: assert(stream_name is not None) content = _("Your bot {bot_identity} tried to send a message to stream " "{stream_name}, but that stream does not exist. " "Click [here]({new_stream_link}) to create it.").format(**arg_dict) else: if num_subscribers_for_stream_id(stream.id) > 0: return content = _("Your bot {bot_identity} tried to send a message to " "stream {stream_name}. The stream exists but " "does not have any subscribers.").format(**arg_dict) send_rate_limited_pm_notification_to_bot_owner(sender, realm, content) def validate_stream_name_with_pm_notification(stream_name: str, realm: Realm, sender: UserProfile) -> Stream: stream_name = stream_name.strip() check_stream_name(stream_name) try: stream = get_stream(stream_name, realm) send_pm_if_empty_stream(stream, realm, sender) except Stream.DoesNotExist: send_pm_if_empty_stream(None, realm, sender, stream_name=stream_name) raise StreamDoesNotExistError(escape(stream_name)) return stream def validate_stream_id_with_pm_notification(stream_id: int, realm: Realm, sender: UserProfile) -> Stream: try: stream = get_stream_by_id_in_realm(stream_id, realm) send_pm_if_empty_stream(stream, realm, sender) except Stream.DoesNotExist: send_pm_if_empty_stream(None, realm, sender, stream_id=stream_id) raise StreamWithIDDoesNotExistError(stream_id) return stream def check_private_message_policy(realm: Realm, sender: UserProfile, user_profiles: Sequence[UserProfile]) -> None: if realm.private_message_policy == Realm.PRIVATE_MESSAGE_POLICY_DISABLED: if sender.is_bot or (len(user_profiles) == 1 and user_profiles[0].is_bot): # We allow PMs only between users and bots, to avoid # breaking the tutorial as well as automated # notifications from system bots to users. return raise JsonableError(_("Private messages are disabled in this organization.")) # check_message: # Returns message ready for sending with do_send_message on success or the error message (string) on error. def check_message(sender: UserProfile, client: Client, addressee: Addressee, message_content_raw: str, realm: Optional[Realm]=None, forged: bool=False, forged_timestamp: Optional[float]=None, forwarder_user_profile: Optional[UserProfile]=None, local_id: Optional[str]=None, sender_queue_id: Optional[str]=None, widget_content: Optional[str]=None) -> Dict[str, Any]: """See https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html for high-level documentation on this subsystem. """ stream = None message_content = message_content_raw.rstrip() if len(message_content) == 0: raise JsonableError(_("Message must not be empty")) if '\x00' in message_content: raise JsonableError(_("Message must not contain null bytes")) message_content = truncate_body(message_content) if realm is None: realm = sender.realm if addressee.is_stream(): topic_name = addressee.topic() topic_name = truncate_topic(topic_name) stream_name = addressee.stream_name() stream_id = addressee.stream_id() if stream_name is not None: stream = validate_stream_name_with_pm_notification(stream_name, realm, sender) elif stream_id is not None: stream = validate_stream_id_with_pm_notification(stream_id, realm, sender) else: stream = addressee.stream() assert stream is not None recipient = stream.recipient # This will raise JsonableError if there are problems. if sender.bot_type != sender.OUTGOING_WEBHOOK_BOT: access_stream_for_send_message( sender=sender, stream=stream, forwarder_user_profile=forwarder_user_profile) elif addressee.is_private(): user_profiles = addressee.user_profiles() mirror_message = client and client.name in ["zephyr_mirror", "irc_mirror", "jabber_mirror", "JabberMirror"] check_private_message_policy(realm, sender, user_profiles) # API Super-users who set the `forged` flag are allowed to # forge messages sent by any user, so we disable the # `forwarded_mirror_message` security check in that case. forwarded_mirror_message = mirror_message and not forged try: recipient = recipient_for_user_profiles(user_profiles, forwarded_mirror_message, forwarder_user_profile, sender) except ValidationError as e: assert isinstance(e.messages[0], str) raise JsonableError(e.messages[0]) else: # This is defensive code--Addressee already validates # the message type. raise AssertionError("Invalid message type") message = Message() message.sender = sender message.content = message_content message.recipient = recipient if addressee.is_stream(): message.set_topic_name(topic_name) if forged and forged_timestamp is not None: # Forged messages come with a timestamp message.date_sent = timestamp_to_datetime(forged_timestamp) else: message.date_sent = timezone_now() message.sending_client = client # We render messages later in the process. assert message.rendered_content is None if client.name == "zephyr_mirror": id = already_sent_mirrored_message_id(message) if id is not None: return {'message': id} if widget_content is not None: try: widget_content = ujson.loads(widget_content) except Exception: raise JsonableError(_('Widgets: API programmer sent invalid JSON content')) try: check_widget_content(widget_content) except ValidationError as error: raise JsonableError(_('Widgets: {error_msg}').format( error_msg=error.message, )) return {'message': message, 'stream': stream, 'local_id': local_id, 'sender_queue_id': sender_queue_id, 'realm': realm, 'widget_content': widget_content} def _internal_prep_message(realm: Realm, sender: UserProfile, addressee: Addressee, content: str) -> Optional[Dict[str, Any]]: """ Create a message object and checks it, but doesn't send it or save it to the database. The internal function that calls this can therefore batch send a bunch of created messages together as one database query. Call do_send_messages with a list of the return values of this method. """ # Remove any null bytes from the content if len(content) > MAX_MESSAGE_LENGTH: content = content[0:3900] + "\n\n[message was too long and has been truncated]" # If we have a stream name, and the stream doesn't exist, we # create it here (though this code path should probably be removed # eventually, moving that responsibility to the caller). If # addressee.stream_name() is None (i.e. we're sending to a stream # by ID), we skip this, as the stream object must already exist. if addressee.is_stream(): stream_name = addressee.stream_name() if stream_name is not None: ensure_stream(realm, stream_name, acting_user=sender) try: return check_message(sender, get_client("Internal"), addressee, content, realm=realm) except JsonableError as e: logging.exception("Error queueing internal message by %s: %s", sender.delivery_email, e.msg) return None def internal_prep_stream_message( realm: Realm, sender: UserProfile, stream: Stream, topic: str, content: str, ) -> Optional[Dict[str, Any]]: """ See _internal_prep_message for details of how this works. """ addressee = Addressee.for_stream(stream, topic) return _internal_prep_message( realm=realm, sender=sender, addressee=addressee, content=content, ) def internal_prep_stream_message_by_name( realm: Realm, sender: UserProfile, stream_name: str, topic: str, content: str, ) -> Optional[Dict[str, Any]]: """ See _internal_prep_message for details of how this works. """ addressee = Addressee.for_stream_name(stream_name, topic) return _internal_prep_message( realm=realm, sender=sender, addressee=addressee, content=content, ) def internal_prep_private_message(realm: Realm, sender: UserProfile, recipient_user: UserProfile, content: str) -> Optional[Dict[str, Any]]: """ See _internal_prep_message for details of how this works. """ addressee = Addressee.for_user_profile(recipient_user) return _internal_prep_message( realm=realm, sender=sender, addressee=addressee, content=content, ) def internal_send_private_message(realm: Realm, sender: UserProfile, recipient_user: UserProfile, content: str) -> Optional[int]: message = internal_prep_private_message(realm, sender, recipient_user, content) if message is None: return None message_ids = do_send_messages([message]) return message_ids[0] def internal_send_stream_message( realm: Realm, sender: UserProfile, stream: Stream, topic: str, content: str, email_gateway: bool=False) -> Optional[int]: message = internal_prep_stream_message( realm, sender, stream, topic, content, ) if message is None: return None message_ids = do_send_messages([message], email_gateway=email_gateway) return message_ids[0] def internal_send_stream_message_by_name( realm: Realm, sender: UserProfile, stream_name: str, topic: str, content: str, ) -> Optional[int]: message = internal_prep_stream_message_by_name( realm, sender, stream_name, topic, content, ) if message is None: return None message_ids = do_send_messages([message]) return message_ids[0] def internal_send_huddle_message(realm: Realm, sender: UserProfile, emails: List[str], content: str) -> Optional[int]: addressee = Addressee.for_private(emails, realm) message = _internal_prep_message( realm=realm, sender=sender, addressee=addressee, content=content, ) if message is None: return None message_ids = do_send_messages([message]) return message_ids[0] def pick_color(user_profile: UserProfile, subs: Iterable[Subscription]) -> str: # These colors are shared with the palette in subs.js. used_colors = [sub.color for sub in subs if sub.active] available_colors = [s for s in STREAM_ASSIGNMENT_COLORS if s not in used_colors] if available_colors: return available_colors[0] else: return STREAM_ASSIGNMENT_COLORS[len(used_colors) % len(STREAM_ASSIGNMENT_COLORS)] def validate_user_access_to_subscribers(user_profile: Optional[UserProfile], stream: Stream) -> None: """ Validates whether the user can view the subscribers of a stream. Raises a JsonableError if: * The user and the stream are in different realms * The realm is MIT and the stream is not invite only. * The stream is invite only, requesting_user is passed, and that user does not subscribe to the stream. """ validate_user_access_to_subscribers_helper( user_profile, {"realm_id": stream.realm_id, "invite_only": stream.invite_only}, # We use a lambda here so that we only compute whether the # user is subscribed if we have to lambda user_profile: subscribed_to_stream(user_profile, stream.id)) def validate_user_access_to_subscribers_helper( user_profile: Optional[UserProfile], stream_dict: Mapping[str, Any], check_user_subscribed: Callable[[UserProfile], bool], ) -> None: """Helper for validate_user_access_to_subscribers that doesn't require a full stream object. This function is a bit hard to read, because it is carefully optimized for performance in the two code paths we call it from: * In `bulk_get_subscriber_user_ids`, we already know whether the user was subscribed via `sub_dict`, and so we want to avoid a database query at all (especially since it calls this in a loop); * In `validate_user_access_to_subscribers`, we want to only check if the user is subscribed when we absolutely have to, since it costs a database query. The `check_user_subscribed` argument is a function that reports whether the user is subscribed to the stream. Note also that we raise a ValidationError in cases where the caller is doing the wrong thing (maybe these should be AssertionErrors), and JsonableError for 400 type errors. """ if user_profile is None: raise ValidationError("Missing user to validate access for") if user_profile.realm_id != stream_dict["realm_id"]: raise ValidationError("Requesting user not in given realm") # Guest users can access subscribed public stream's subscribers if user_profile.is_guest: if check_user_subscribed(user_profile): return # We could put an AssertionError here; in that we don't have # any code paths that would allow a guest user to access other # streams in the first place. if not user_profile.can_access_public_streams() and not stream_dict["invite_only"]: raise JsonableError(_("Subscriber data is not available for this stream")) # Organization administrators can view subscribers for all streams. if user_profile.is_realm_admin: return if (stream_dict["invite_only"] and not check_user_subscribed(user_profile)): raise JsonableError(_("Unable to retrieve subscribers for private stream")) def bulk_get_subscriber_user_ids(stream_dicts: Iterable[Mapping[str, Any]], user_profile: UserProfile, sub_dict: Mapping[int, bool], stream_recipient: StreamRecipientMap) -> Dict[int, List[int]]: """sub_dict maps stream_id => whether the user is subscribed to that stream.""" target_stream_dicts = [] for stream_dict in stream_dicts: stream_recipient.populate_with(stream_id=stream_dict["id"], recipient_id=stream_dict["recipient_id"]) try: validate_user_access_to_subscribers_helper( user_profile, stream_dict, lambda user_profile: sub_dict[stream_dict["id"]], ) except JsonableError: continue target_stream_dicts.append(stream_dict) stream_ids = [stream['id'] for stream in target_stream_dicts] recipient_ids = sorted([ stream_recipient.recipient_id_for(stream_id) for stream_id in stream_ids ]) result: Dict[int, List[int]] = {stream["id"]: [] for stream in stream_dicts} if not recipient_ids: return result ''' The raw SQL below leads to more than a 2x speedup when tested with 20k+ total subscribers. (For large realms with lots of default streams, this function deals with LOTS of data, so it is important to optimize.) ''' query = SQL(''' SELECT zerver_subscription.recipient_id, zerver_subscription.user_profile_id FROM zerver_subscription INNER JOIN zerver_userprofile ON zerver_userprofile.id = zerver_subscription.user_profile_id WHERE zerver_subscription.recipient_id in %(recipient_ids)s AND zerver_subscription.active AND zerver_userprofile.is_active ORDER BY zerver_subscription.recipient_id, zerver_subscription.user_profile_id ''') cursor = connection.cursor() cursor.execute(query, {"recipient_ids": tuple(recipient_ids)}) rows = cursor.fetchall() cursor.close() recip_to_stream_id = stream_recipient.recipient_to_stream_id_dict() ''' Using groupby/itemgetter here is important for performance, at scale. It makes it so that all interpreter overhead is just O(N) in nature. ''' for recip_id, recip_rows in itertools.groupby(rows, itemgetter(0)): user_profile_ids = [r[1] for r in recip_rows] stream_id = recip_to_stream_id[recip_id] result[stream_id] = list(user_profile_ids) return result def get_subscribers_query(stream: Stream, requesting_user: Optional[UserProfile]) -> QuerySet: # TODO: Make a generic stub for QuerySet """ Build a query to get the subscribers list for a stream, raising a JsonableError if: 'realm' is optional in stream. The caller can refine this query with select_related(), values(), etc. depending on whether it wants objects or just certain fields """ validate_user_access_to_subscribers(requesting_user, stream) # Note that non-active users may still have "active" subscriptions, because we # want to be able to easily reactivate them with their old subscriptions. This # is why the query here has to look at the UserProfile.is_active flag. subscriptions = get_active_subscriptions_for_stream_id(stream.id).filter( user_profile__is_active=True, ) return subscriptions def get_subscriber_emails(stream: Stream, requesting_user: Optional[UserProfile]=None) -> List[str]: subscriptions_query = get_subscribers_query(stream, requesting_user) subscriptions = subscriptions_query.values('user_profile__email') return [subscription['user_profile__email'] for subscription in subscriptions] def notify_subscriptions_added(user_profile: UserProfile, sub_pairs: Iterable[Tuple[Subscription, Stream]], stream_user_ids: Callable[[Stream], List[int]], recent_traffic: Dict[int, int], no_log: bool=False) -> None: if not no_log: log_event({'type': 'subscription_added', 'user': user_profile.email, 'names': [stream.name for sub, stream in sub_pairs], 'realm': user_profile.realm.string_id}) sub_dicts = [] for (subscription, stream) in sub_pairs: sub_dict = stream.to_dict() for field_name in Subscription.API_FIELDS: if field_name == "active": # Skip the "active" field, it's implied by context continue sub_dict[field_name] = getattr(subscription, field_name) sub_dict['in_home_view'] = not subscription.is_muted sub_dict['email_address'] = encode_email_address(stream, show_sender=True) sub_dict['stream_weekly_traffic'] = get_average_weekly_stream_traffic( stream.id, stream.date_created, recent_traffic) sub_dict['subscribers'] = stream_user_ids(stream) sub_dicts.append(sub_dict) # Send a notification to the user who subscribed. event = dict(type="subscription", op="add", subscriptions=sub_dicts) send_event(user_profile.realm, event, [user_profile.id]) def get_peer_user_ids_for_stream_change(stream: Stream, altered_user_ids: Iterable[int], subscribed_user_ids: Iterable[int]) -> Set[int]: ''' altered_user_ids is the user_ids that we are adding/removing subscribed_user_ids is the already-subscribed user_ids Based on stream policy, we notify the correct bystanders, while not notifying altered_users (who get subscribers via another event) ''' if stream.invite_only: # PRIVATE STREAMS # Realm admins can access all private stream subscribers. Send them an # event even if they aren't subscribed to stream. realm_admin_ids = [user.id for user in stream.realm.get_admin_users_and_bots()] user_ids_to_notify = [] user_ids_to_notify.extend(realm_admin_ids) user_ids_to_notify.extend(subscribed_user_ids) return set(user_ids_to_notify) - set(altered_user_ids) else: # PUBLIC STREAMS # We now do "peer_add" or "peer_remove" events even for streams # users were never subscribed to, in order for the neversubscribed # structure to stay up-to-date. return set(active_non_guest_user_ids(stream.realm_id)) - set(altered_user_ids) def get_user_ids_for_streams(streams: Iterable[Stream]) -> Dict[int, List[int]]: stream_ids = [stream.id for stream in streams] all_subs = get_active_subscriptions_for_stream_ids(stream_ids).filter( user_profile__is_active=True, ).values( 'recipient__type_id', 'user_profile_id', ).order_by( 'recipient__type_id', ) get_stream_id = itemgetter('recipient__type_id') all_subscribers_by_stream: Dict[int, List[int]] = defaultdict(list) for stream_id, rows in itertools.groupby(all_subs, get_stream_id): user_ids = [row['user_profile_id'] for row in rows] all_subscribers_by_stream[stream_id] = user_ids return all_subscribers_by_stream def get_last_message_id() -> int: # We generally use this function to populate RealmAuditLog, and # the max id here is actually systemwide, not per-realm. I # assume there's some advantage in not filtering by realm. last_id = Message.objects.aggregate(Max('id'))['id__max'] if last_id is None: # During initial realm creation, there might be 0 messages in # the database; in that case, the `aggregate` query returns # None. Since we want an int for "beginning of time", use -1. last_id = -1 return last_id SubT = Tuple[List[Tuple[UserProfile, Stream]], List[Tuple[UserProfile, Stream]]] def bulk_add_subscriptions(streams: Iterable[Stream], users: Iterable[UserProfile], color_map: Mapping[str, str]={}, from_stream_creation: bool=False, acting_user: Optional[UserProfile]=None) -> SubT: users = list(users) recipients_map: Dict[int, int] = {stream.id: stream.recipient_id for stream in streams} recipient_ids: List[int] = [recipient_id for recipient_id in recipients_map.values()] stream_map: Dict[int, Stream] = {} for stream in streams: stream_map[recipients_map[stream.id]] = stream subs_by_user: Dict[int, List[Subscription]] = defaultdict(list) all_subs_query = get_stream_subscriptions_for_users(users).select_related('user_profile') for sub in all_subs_query: subs_by_user[sub.user_profile_id].append(sub) realm = users[0].realm already_subscribed: List[Tuple[UserProfile, Stream]] = [] subs_to_activate: List[Tuple[Subscription, Stream]] = [] new_subs: List[Tuple[UserProfile, int, Stream]] = [] for user_profile in users: needs_new_sub: Set[int] = set(recipient_ids) for sub in subs_by_user[user_profile.id]: if sub.recipient_id in needs_new_sub: needs_new_sub.remove(sub.recipient_id) if sub.active: already_subscribed.append((user_profile, stream_map[sub.recipient_id])) else: subs_to_activate.append((sub, stream_map[sub.recipient_id])) # Mark the sub as active, without saving, so that # pick_color will consider this to be an active # subscription when picking colors sub.active = True for recipient_id in needs_new_sub: new_subs.append((user_profile, recipient_id, stream_map[recipient_id])) subs_to_add: List[Tuple[Subscription, Stream]] = [] for (user_profile, recipient_id, stream) in new_subs: if stream.name in color_map: color = color_map[stream.name] else: color = pick_color(user_profile, subs_by_user[user_profile.id]) sub_to_add = Subscription(user_profile=user_profile, active=True, color=color, recipient_id=recipient_id) subs_by_user[user_profile.id].append(sub_to_add) subs_to_add.append((sub_to_add, stream)) # TODO: XXX: This transaction really needs to be done at the serializeable # transaction isolation level. with transaction.atomic(): occupied_streams_before = list(get_occupied_streams(realm)) Subscription.objects.bulk_create([sub for (sub, stream) in subs_to_add]) sub_ids = [sub.id for (sub, stream) in subs_to_activate] Subscription.objects.filter(id__in=sub_ids).update(active=True) occupied_streams_after = list(get_occupied_streams(realm)) # Log Subscription Activities in RealmAuditLog event_time = timezone_now() event_last_message_id = get_last_message_id() all_subscription_logs: (List[RealmAuditLog]) = [] for (sub, stream) in subs_to_add: all_subscription_logs.append(RealmAuditLog(realm=realm, acting_user=acting_user, modified_user=sub.user_profile, modified_stream=stream, event_last_message_id=event_last_message_id, event_type=RealmAuditLog.SUBSCRIPTION_CREATED, event_time=event_time)) for (sub, stream) in subs_to_activate: all_subscription_logs.append(RealmAuditLog(realm=realm, acting_user=acting_user, modified_user=sub.user_profile, modified_stream=stream, event_last_message_id=event_last_message_id, event_type=RealmAuditLog.SUBSCRIPTION_ACTIVATED, event_time=event_time)) # Now since we have all log objects generated we can do a bulk insert RealmAuditLog.objects.bulk_create(all_subscription_logs) new_occupied_streams = [stream for stream in set(occupied_streams_after) - set(occupied_streams_before) if not stream.invite_only] if new_occupied_streams and not from_stream_creation: event: Dict[str, object] = dict( type="stream", op="occupy", streams=[stream.to_dict() for stream in new_occupied_streams], ) send_event(realm, event, active_user_ids(realm.id)) # Notify all existing users on streams that users have joined # First, get all users subscribed to the streams that we care about # We fetch all subscription information upfront, as it's used throughout # the following code and we want to minize DB queries all_subscribers_by_stream = get_user_ids_for_streams(streams=streams) def fetch_stream_subscriber_user_ids(stream: Stream) -> List[int]: if stream.is_in_zephyr_realm and not stream.invite_only: return [] user_ids = all_subscribers_by_stream[stream.id] return user_ids sub_tuples_by_user: Dict[int, List[Tuple[Subscription, Stream]]] = defaultdict(list) new_streams: Set[Tuple[int, int]] = set() for (sub, stream) in subs_to_add + subs_to_activate: sub_tuples_by_user[sub.user_profile.id].append((sub, stream)) new_streams.add((sub.user_profile.id, stream.id)) # We now send several types of events to notify browsers. The # first batch is notifications to users on invite-only streams # that the stream exists. for stream in streams: if not stream.is_public(): # Users newly added to invite-only streams # need a `create` notification. The former, because # they need the stream to exist before # they get the "subscribe" notification, and the latter so # they can manage the new stream. # Realm admins already have all created private streams. realm_admin_ids = [user.id for user in realm.get_admin_users_and_bots()] new_users_ids = [user.id for user in users if (user.id, stream.id) in new_streams and user.id not in realm_admin_ids] send_stream_creation_event(stream, new_users_ids) stream_ids = {stream.id for stream in streams} recent_traffic = get_streams_traffic(stream_ids=stream_ids) # The second batch is events for the users themselves that they # were subscribed to the new streams. for user_profile in users: if len(sub_tuples_by_user[user_profile.id]) == 0: continue sub_pairs = sub_tuples_by_user[user_profile.id] notify_subscriptions_added(user_profile, sub_pairs, fetch_stream_subscriber_user_ids, recent_traffic) # The second batch is events for other users who are tracking the # subscribers lists of streams in their browser; everyone for # public streams and only existing subscribers for private streams. for stream in streams: if stream.is_in_zephyr_realm and not stream.invite_only: continue new_user_ids = [user.id for user in users if (user.id, stream.id) in new_streams] subscribed_user_ids = all_subscribers_by_stream[stream.id] peer_user_ids = get_peer_user_ids_for_stream_change( stream=stream, altered_user_ids=new_user_ids, subscribed_user_ids=subscribed_user_ids, ) if peer_user_ids: for new_user_id in new_user_ids: event = dict(type="subscription", op="peer_add", stream_id=stream.id, user_id=new_user_id) send_event(realm, event, peer_user_ids) return ([(user_profile, stream) for (user_profile, recipient_id, stream) in new_subs] + [(sub.user_profile, stream) for (sub, stream) in subs_to_activate], already_subscribed) def get_available_notification_sounds() -> List[str]: notification_sounds_path = static_path('audio/notification_sounds') available_notification_sounds = [] for file_name in os.listdir(notification_sounds_path): root, ext = os.path.splitext(file_name) if '.' in root: # nocoverage # Exclude e.g. zulip.abcd1234.ogg (generated by production hash-naming) # to avoid spurious duplicates. continue if ext == '.ogg': available_notification_sounds.append(root) return available_notification_sounds def notify_subscriptions_removed(user_profile: UserProfile, streams: Iterable[Stream], no_log: bool=False) -> None: if not no_log: log_event({'type': 'subscription_removed', 'user': user_profile.email, 'names': [stream.name for stream in streams], 'realm': user_profile.realm.string_id}) payload = [dict(name=stream.name, stream_id=stream.id) for stream in streams] event = dict(type="subscription", op="remove", subscriptions=payload) send_event(user_profile.realm, event, [user_profile.id]) SubAndRemovedT = Tuple[List[Tuple[UserProfile, Stream]], List[Tuple[UserProfile, Stream]]] def bulk_remove_subscriptions(users: Iterable[UserProfile], streams: Iterable[Stream], acting_client: Client, acting_user: Optional[UserProfile]=None) -> SubAndRemovedT: users = list(users) streams = list(streams) stream_dict = {stream.id: stream for stream in streams} existing_subs_by_user = get_bulk_stream_subscriber_info(users, stream_dict) def get_non_subscribed_tups() -> List[Tuple[UserProfile, Stream]]: stream_ids = {stream.id for stream in streams} not_subscribed: List[Tuple[UserProfile, Stream]] = [] for user_profile in users: user_sub_stream_info = existing_subs_by_user[user_profile.id] subscribed_stream_ids = { stream.id for (sub, stream) in user_sub_stream_info } not_subscribed_stream_ids = stream_ids - subscribed_stream_ids for stream_id in not_subscribed_stream_ids: stream = stream_dict[stream_id] not_subscribed.append((user_profile, stream)) return not_subscribed not_subscribed = get_non_subscribed_tups() subs_to_deactivate: List[Tuple[Subscription, Stream]] = [] sub_ids_to_deactivate: List[int] = [] # This loop just flattens out our data into big lists for # bulk operations. for tup_list in existing_subs_by_user.values(): for (sub, stream) in tup_list: subs_to_deactivate.append((sub, stream)) sub_ids_to_deactivate.append(sub.id) our_realm = users[0].realm # TODO: XXX: This transaction really needs to be done at the serializeable # transaction isolation level. with transaction.atomic(): occupied_streams_before = list(get_occupied_streams(our_realm)) Subscription.objects.filter( id__in=sub_ids_to_deactivate, ) .update(active=False) occupied_streams_after = list(get_occupied_streams(our_realm)) # Log Subscription Activities in RealmAuditLog event_time = timezone_now() event_last_message_id = get_last_message_id() all_subscription_logs: (List[RealmAuditLog]) = [] for (sub, stream) in subs_to_deactivate: all_subscription_logs.append(RealmAuditLog(realm=sub.user_profile.realm, acting_user=acting_user, modified_user=sub.user_profile, modified_stream=stream, event_last_message_id=event_last_message_id, event_type=RealmAuditLog.SUBSCRIPTION_DEACTIVATED, event_time=event_time)) # Now since we have all log objects generated we can do a bulk insert RealmAuditLog.objects.bulk_create(all_subscription_logs) altered_user_dict: Dict[int, List[UserProfile]] = defaultdict(list) streams_by_user: Dict[int, List[Stream]] = defaultdict(list) for (sub, stream) in subs_to_deactivate: streams_by_user[sub.user_profile_id].append(stream) altered_user_dict[stream.id].append(sub.user_profile) for user_profile in users: if len(streams_by_user[user_profile.id]) == 0: continue notify_subscriptions_removed(user_profile, streams_by_user[user_profile.id]) event = {'type': 'mark_stream_messages_as_read', 'client_id': acting_client.id, 'user_profile_id': user_profile.id, 'stream_ids': [stream.id for stream in streams]} queue_json_publish("deferred_work", event) all_subscribers_by_stream = get_user_ids_for_streams(streams=streams) def send_peer_remove_event(stream: Stream) -> None: if stream.is_in_zephyr_realm and not stream.invite_only: return altered_users = altered_user_dict[stream.id] altered_user_ids = [u.id for u in altered_users] subscribed_user_ids = all_subscribers_by_stream[stream.id] peer_user_ids = get_peer_user_ids_for_stream_change( stream=stream, altered_user_ids=altered_user_ids, subscribed_user_ids=subscribed_user_ids, ) if peer_user_ids: for removed_user in altered_users: event = dict(type="subscription", op="peer_remove", stream_id=stream.id, user_id=removed_user.id) send_event(our_realm, event, peer_user_ids) for stream in streams: send_peer_remove_event(stream=stream) new_vacant_streams = [stream for stream in set(occupied_streams_before) - set(occupied_streams_after)] new_vacant_private_streams = [stream for stream in new_vacant_streams if stream.invite_only] new_vacant_public_streams = [stream for stream in new_vacant_streams if not stream.invite_only] if new_vacant_public_streams: event = dict(type="stream", op="vacate", streams=[stream.to_dict() for stream in new_vacant_public_streams]) send_event(our_realm, event, active_user_ids(our_realm.id)) if new_vacant_private_streams: # Deactivate any newly-vacant private streams for stream in new_vacant_private_streams: do_deactivate_stream(stream, acting_user=acting_user) return ( [(sub.user_profile, stream) for (sub, stream) in subs_to_deactivate], not_subscribed, ) def log_subscription_property_change(user_email: str, stream_name: str, property: str, value: Any) -> None: event = {'type': 'subscription_property', 'property': property, 'user': user_email, 'stream_name': stream_name, 'value': value} log_event(event) def do_change_subscription_property(user_profile: UserProfile, sub: Subscription, stream: Stream, property_name: str, value: Any, ) -> None: database_property_name = property_name event_property_name = property_name database_value = value event_value = value # For this property, is_muted is used in the database, but # in_home_view in the API, since we haven't migrated the events # API to the new name yet. if property_name == "in_home_view": database_property_name = "is_muted" database_value = not value if property_name == "is_muted": event_property_name = "in_home_view" event_value = not value setattr(sub, database_property_name, database_value) sub.save(update_fields=[database_property_name]) log_subscription_property_change(user_profile.email, stream.name, database_property_name, database_value) event = dict(type="subscription", op="update", email=user_profile.email, property=event_property_name, value=event_value, stream_id=stream.id, name=stream.name) send_event(user_profile.realm, event, [user_profile.id]) def do_change_password(user_profile: UserProfile, password: str, commit: bool=True) -> None: user_profile.set_password(password) if commit: user_profile.save(update_fields=["password"]) event_time = timezone_now() RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=user_profile, modified_user=user_profile, event_type=RealmAuditLog.USER_PASSWORD_CHANGED, event_time=event_time) def do_change_full_name(user_profile: UserProfile, full_name: str, acting_user: Optional[UserProfile]) -> None: old_name = user_profile.full_name user_profile.full_name = full_name user_profile.save(update_fields=["full_name"]) event_time = timezone_now() RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=acting_user, modified_user=user_profile, event_type=RealmAuditLog.USER_FULL_NAME_CHANGED, event_time=event_time, extra_data=old_name) payload = dict(user_id=user_profile.id, full_name=user_profile.full_name) send_event(user_profile.realm, dict(type='realm_user', op='update', person=payload), active_user_ids(user_profile.realm_id)) if user_profile.is_bot: send_event(user_profile.realm, dict(type='realm_bot', op='update', bot=payload), bot_owner_user_ids(user_profile)) def check_change_full_name(user_profile: UserProfile, full_name_raw: str, acting_user: UserProfile) -> str: """Verifies that the user's proposed full name is valid. The caller is responsible for checking check permissions. Returns the new full name, which may differ from what was passed in (because this function strips whitespace).""" new_full_name = check_full_name(full_name_raw) do_change_full_name(user_profile, new_full_name, acting_user) return new_full_name def check_change_bot_full_name(user_profile: UserProfile, full_name_raw: str, acting_user: UserProfile) -> None: new_full_name = check_full_name(full_name_raw) if new_full_name == user_profile.full_name: # Our web app will try to patch full_name even if the user didn't # modify the name in the form. We just silently ignore those # situations. return check_bot_name_available( realm_id=user_profile.realm_id, full_name=new_full_name, ) do_change_full_name(user_profile, new_full_name, acting_user) def do_change_bot_owner(user_profile: UserProfile, bot_owner: UserProfile, acting_user: UserProfile) -> None: previous_owner = user_profile.bot_owner user_profile.bot_owner = bot_owner user_profile.save() # Can't use update_fields because of how the foreign key works. event_time = timezone_now() RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=acting_user, modified_user=user_profile, event_type=RealmAuditLog.USER_BOT_OWNER_CHANGED, event_time=event_time) update_users = bot_owner_user_ids(user_profile) # For admins, update event is sent instead of delete/add # event. bot_data of admin contains all the # bots and none of them should be removed/(added again). # Delete the bot from previous owner's bot data. if previous_owner and not previous_owner.is_realm_admin: send_event(user_profile.realm, dict(type='realm_bot', op="delete", bot=dict( user_id=user_profile.id, )), {previous_owner.id}) # Do not send update event for previous bot owner. update_users = update_users - {previous_owner.id} # Notify the new owner that the bot has been added. if not bot_owner.is_realm_admin: add_event = created_bot_event(user_profile) send_event(user_profile.realm, add_event, {bot_owner.id}) # Do not send update event for bot_owner. update_users = update_users - {bot_owner.id} send_event(user_profile.realm, dict(type='realm_bot', op='update', bot=dict(user_id=user_profile.id, owner_id=user_profile.bot_owner.id, )), update_users) # Since `bot_owner_id` is included in the user profile dict we need # to update the users dict with the new bot owner id event: Dict[str, Any] = dict( type="realm_user", op="update", person=dict( user_id=user_profile.id, bot_owner_id=user_profile.bot_owner.id, ), ) send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id)) def do_change_tos_version(user_profile: UserProfile, tos_version: str) -> None: user_profile.tos_version = tos_version user_profile.save(update_fields=["tos_version"]) event_time = timezone_now() RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=user_profile, modified_user=user_profile, event_type=RealmAuditLog.USER_TOS_VERSION_CHANGED, event_time=event_time) def do_regenerate_api_key(user_profile: UserProfile, acting_user: UserProfile) -> str: old_api_key = user_profile.api_key new_api_key = generate_api_key() user_profile.api_key = new_api_key user_profile.save(update_fields=["api_key"]) # We need to explicitly delete the old API key from our caches, # because the on-save handler for flushing the UserProfile object # in zerver/lib/cache.py only has access to the new API key. cache_delete(user_profile_by_api_key_cache_key(old_api_key)) event_time = timezone_now() RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=acting_user, modified_user=user_profile, event_type=RealmAuditLog.USER_API_KEY_CHANGED, event_time=event_time) if user_profile.is_bot: send_event(user_profile.realm, dict(type='realm_bot', op='update', bot=dict(user_id=user_profile.id, api_key=new_api_key, )), bot_owner_user_ids(user_profile)) event = {'type': 'clear_push_device_tokens', 'user_profile_id': user_profile.id} queue_json_publish("deferred_work", event) return new_api_key def notify_avatar_url_change(user_profile: UserProfile) -> None: if user_profile.is_bot: send_event(user_profile.realm, dict(type='realm_bot', op='update', bot=dict(user_id=user_profile.id, avatar_url=avatar_url(user_profile), )), bot_owner_user_ids(user_profile)) payload = dict( avatar_source=user_profile.avatar_source, avatar_url=avatar_url(user_profile), avatar_url_medium=avatar_url(user_profile, medium=True), avatar_version=user_profile.avatar_version, # Even clients using client_gravatar don't need the email, # since we're sending the URL anyway. user_id=user_profile.id, ) send_event(user_profile.realm, dict(type='realm_user', op='update', person=payload), active_user_ids(user_profile.realm_id)) def do_change_avatar_fields(user_profile: UserProfile, avatar_source: str, skip_notify: bool=False, acting_user: Optional[UserProfile]=None) -> None: user_profile.avatar_source = avatar_source user_profile.avatar_version += 1 user_profile.save(update_fields=["avatar_source", "avatar_version"]) event_time = timezone_now() RealmAuditLog.objects.create(realm=user_profile.realm, modified_user=user_profile, event_type=RealmAuditLog.USER_AVATAR_SOURCE_CHANGED, extra_data={'avatar_source': avatar_source}, event_time=event_time, acting_user=acting_user) if not skip_notify: notify_avatar_url_change(user_profile) def do_delete_avatar_image(user: UserProfile, acting_user: Optional[UserProfile]=None) -> None: do_change_avatar_fields(user, UserProfile.AVATAR_FROM_GRAVATAR, acting_user=acting_user) delete_avatar_image(user) def do_change_icon_source(realm: Realm, icon_source: str, log: bool=True) -> None: realm.icon_source = icon_source realm.icon_version += 1 realm.save(update_fields=["icon_source", "icon_version"]) if log: log_event({'type': 'realm_change_icon', 'realm': realm.string_id, 'icon_source': icon_source}) send_event(realm, dict(type='realm', op='update_dict', property="icon", data=dict(icon_source=realm.icon_source, icon_url=realm_icon_url(realm))), active_user_ids(realm.id)) def do_change_logo_source(realm: Realm, logo_source: str, night: bool, acting_user: Optional[UserProfile]=None) -> None: if not night: realm.logo_source = logo_source realm.logo_version += 1 realm.save(update_fields=["logo_source", "logo_version"]) else: realm.night_logo_source = logo_source realm.night_logo_version += 1 realm.save(update_fields=["night_logo_source", "night_logo_version"]) RealmAuditLog.objects.create(event_type=RealmAuditLog.REALM_LOGO_CHANGED, realm=realm, event_time=timezone_now(), acting_user=acting_user) event = dict(type='realm', op='update_dict', property="night_logo" if night else "logo", data=get_realm_logo_data(realm, night)) send_event(realm, event, active_user_ids(realm.id)) def do_change_plan_type(realm: Realm, plan_type: int) -> None: old_value = realm.plan_type realm.plan_type = plan_type realm.save(update_fields=['plan_type']) RealmAuditLog.objects.create(event_type=RealmAuditLog.REALM_PLAN_TYPE_CHANGED, realm=realm, event_time=timezone_now(), extra_data={'old_value': old_value, 'new_value': plan_type}) if plan_type == Realm.STANDARD: realm.max_invites = Realm.INVITES_STANDARD_REALM_DAILY_MAX realm.message_visibility_limit = None realm.upload_quota_gb = Realm.UPLOAD_QUOTA_STANDARD elif plan_type == Realm.SELF_HOSTED: realm.max_invites = None # type: ignore[assignment] # Apparent mypy bug with Optional[int] setter. realm.message_visibility_limit = None realm.upload_quota_gb = None elif plan_type == Realm.STANDARD_FREE: realm.max_invites = Realm.INVITES_STANDARD_REALM_DAILY_MAX realm.message_visibility_limit = None realm.upload_quota_gb = Realm.UPLOAD_QUOTA_STANDARD elif plan_type == Realm.LIMITED: realm.max_invites = settings.INVITES_DEFAULT_REALM_DAILY_MAX realm.message_visibility_limit = Realm.MESSAGE_VISIBILITY_LIMITED realm.upload_quota_gb = Realm.UPLOAD_QUOTA_LIMITED else: raise AssertionError("Invalid plan type") update_first_visible_message_id(realm) realm.save(update_fields=['_max_invites', 'message_visibility_limit', 'upload_quota_gb']) event = {'type': 'realm', 'op': 'update', 'property': 'plan_type', 'value': plan_type, 'extra_data': {'upload_quota': realm.upload_quota_bytes()}} send_event(realm, event, active_user_ids(realm.id)) def do_change_default_sending_stream(user_profile: UserProfile, stream: Optional[Stream], log: bool=True) -> None: user_profile.default_sending_stream = stream user_profile.save(update_fields=['default_sending_stream']) if log: log_event({'type': 'user_change_default_sending_stream', 'user': user_profile.email, 'stream': str(stream)}) if user_profile.is_bot: if stream: stream_name: Optional[str] = stream.name else: stream_name = None send_event(user_profile.realm, dict(type='realm_bot', op='update', bot=dict(user_id=user_profile.id, default_sending_stream=stream_name, )), bot_owner_user_ids(user_profile)) def do_change_default_events_register_stream(user_profile: UserProfile, stream: Optional[Stream], log: bool=True) -> None: user_profile.default_events_register_stream = stream user_profile.save(update_fields=['default_events_register_stream']) if log: log_event({'type': 'user_change_default_events_register_stream', 'user': user_profile.email, 'stream': str(stream)}) if user_profile.is_bot: if stream: stream_name: Optional[str] = stream.name else: stream_name = None send_event(user_profile.realm, dict(type='realm_bot', op='update', bot=dict(user_id=user_profile.id, default_events_register_stream=stream_name, )), bot_owner_user_ids(user_profile)) def do_change_default_all_public_streams(user_profile: UserProfile, value: bool, log: bool=True) -> None: user_profile.default_all_public_streams = value user_profile.save(update_fields=['default_all_public_streams']) if log: log_event({'type': 'user_change_default_all_public_streams', 'user': user_profile.email, 'value': str(value)}) if user_profile.is_bot: send_event(user_profile.realm, dict(type='realm_bot', op='update', bot=dict(user_id=user_profile.id, default_all_public_streams=user_profile.default_all_public_streams, )), bot_owner_user_ids(user_profile)) def do_change_user_role(user_profile: UserProfile, value: int, acting_user: Optional[UserProfile]=None) -> None: old_value = user_profile.role user_profile.role = value user_profile.save(update_fields=["role"]) RealmAuditLog.objects.create( realm=user_profile.realm, modified_user=user_profile, acting_user=acting_user, event_type=RealmAuditLog.USER_ROLE_CHANGED, event_time=timezone_now(), extra_data=ujson.dumps({ RealmAuditLog.OLD_VALUE: old_value, RealmAuditLog.NEW_VALUE: value, RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm), })) event = dict(type="realm_user", op="update", person=dict(user_id=user_profile.id, role=user_profile.role)) send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id)) def do_change_is_api_super_user(user_profile: UserProfile, value: bool) -> None: user_profile.is_api_super_user = value user_profile.save(update_fields=["is_api_super_user"]) def do_change_stream_invite_only(stream: Stream, invite_only: bool, history_public_to_subscribers: Optional[bool]=None) -> None: history_public_to_subscribers = get_default_value_for_history_public_to_subscribers( stream.realm, invite_only, history_public_to_subscribers, ) stream.invite_only = invite_only stream.history_public_to_subscribers = history_public_to_subscribers stream.save(update_fields=['invite_only', 'history_public_to_subscribers']) event = dict( op="update", type="stream", property="invite_only", value=invite_only, history_public_to_subscribers=history_public_to_subscribers, stream_id=stream.id, name=stream.name, ) send_event(stream.realm, event, can_access_stream_user_ids(stream)) def do_change_stream_web_public(stream: Stream, is_web_public: bool) -> None: stream.is_web_public = is_web_public stream.save(update_fields=['is_web_public']) def do_change_stream_post_policy(stream: Stream, stream_post_policy: int) -> None: stream.stream_post_policy = stream_post_policy stream.save(update_fields=['stream_post_policy']) event = dict( op="update", type="stream", property="stream_post_policy", value=stream_post_policy, stream_id=stream.id, name=stream.name, ) send_event(stream.realm, event, can_access_stream_user_ids(stream)) # Backwards-compatibility code: We removed the # is_announcement_only property in early 2020, but we send a # duplicate event for legacy mobile clients that might want the # data. event = dict( op="update", type="stream", property="is_announcement_only", value=stream.stream_post_policy == Stream.STREAM_POST_POLICY_ADMINS, stream_id=stream.id, name=stream.name, ) send_event(stream.realm, event, can_access_stream_user_ids(stream)) def do_rename_stream(stream: Stream, new_name: str, user_profile: UserProfile, log: bool=True) -> Dict[str, str]: old_name = stream.name stream.name = new_name stream.save(update_fields=["name"]) if log: log_event({'type': 'stream_name_change', 'realm': stream.realm.string_id, 'new_name': new_name}) recipient_id = stream.recipient_id messages = Message.objects.filter(recipient_id=recipient_id).only("id") # Update the display recipient and stream, which are easy single # items to set. old_cache_key = get_stream_cache_key(old_name, stream.realm_id) new_cache_key = get_stream_cache_key(stream.name, stream.realm_id) if old_cache_key != new_cache_key: cache_delete(old_cache_key) cache_set(new_cache_key, stream) cache_set(display_recipient_cache_key(recipient_id), stream.name) # Delete cache entries for everything else, which is cheaper and # clearer than trying to set them. display_recipient is the out of # date field in all cases. cache_delete_many( to_dict_cache_key_id(message.id) for message in messages) new_email = encode_email_address(stream, show_sender=True) # We will tell our users to essentially # update stream.name = new_name where name = old_name # and update stream.email = new_email where name = old_name. # We could optimize this by trying to send one message, but the # client code really wants one property update at a time, and # updating stream names is a pretty infrequent operation. # More importantly, we want to key these updates by id, not name, # since id is the immutable primary key, and obviously name is not. data_updates = [ ['email_address', new_email], ['name', new_name], ] for property, value in data_updates: event = dict( op="update", type="stream", property=property, value=value, stream_id=stream.id, name=old_name, ) send_event(stream.realm, event, can_access_stream_user_ids(stream)) sender = get_system_bot(settings.NOTIFICATION_BOT) with override_language(stream.realm.default_language): internal_send_stream_message( stream.realm, sender, stream, Realm.STREAM_EVENTS_NOTIFICATION_TOPIC, _('{user_name} renamed stream {old_stream_name} to {new_stream_name}.').format( user_name=f"@_**{user_profile.full_name}|{user_profile.id}**", old_stream_name=f"**{old_name}**", new_stream_name=f"**{new_name}**", ), ) # Even though the token doesn't change, the web client needs to update the # email forwarding address to display the correctly-escaped new name. return {"email_address": new_email} def do_change_stream_description(stream: Stream, new_description: str) -> None: stream.description = new_description stream.rendered_description = render_stream_description(new_description) stream.save(update_fields=['description', 'rendered_description']) event = dict( type='stream', op='update', property='description', name=stream.name, stream_id=stream.id, value=new_description, rendered_description=stream.rendered_description, ) send_event(stream.realm, event, can_access_stream_user_ids(stream)) def do_change_stream_message_retention_days(stream: Stream, message_retention_days: Optional[int]=None) -> None: stream.message_retention_days = message_retention_days stream.save(update_fields=['message_retention_days']) event = dict( op="update", type="stream", property="message_retention_days", value=message_retention_days, stream_id=stream.id, name=stream.name, ) send_event(stream.realm, event, can_access_stream_user_ids(stream)) def do_create_realm(string_id: str, name: str, emails_restricted_to_domains: Optional[bool]=None) -> Realm: if Realm.objects.filter(string_id=string_id).exists(): raise AssertionError(f"Realm {string_id} already exists!") if not server_initialized(): logging.info("Server not yet initialized. Creating the internal realm first.") create_internal_realm() kwargs: Dict[str, Any] = {} if emails_restricted_to_domains is not None: kwargs['emails_restricted_to_domains'] = emails_restricted_to_domains realm = Realm(string_id=string_id, name=name, **kwargs) realm.save() # Create stream once Realm object has been saved notifications_stream = ensure_stream( realm, Realm.DEFAULT_NOTIFICATION_STREAM_NAME, stream_description="Everyone is added to this stream by default. Welcome! :octopus:", acting_user=None) realm.notifications_stream = notifications_stream # With the current initial streams situation, the only public # stream is the notifications_stream. DefaultStream.objects.create(stream=notifications_stream, realm=realm) signup_notifications_stream = ensure_stream( realm, Realm.INITIAL_PRIVATE_STREAM_NAME, invite_only=True, stream_description="A private stream for core team members.", acting_user=None) realm.signup_notifications_stream = signup_notifications_stream realm.save(update_fields=['notifications_stream', 'signup_notifications_stream']) if settings.BILLING_ENABLED: do_change_plan_type(realm, Realm.LIMITED) # Log the event log_event({"type": "realm_created", "string_id": string_id, "emails_restricted_to_domains": emails_restricted_to_domains}) sender = get_system_bot(settings.NOTIFICATION_BOT) admin_realm = sender.realm # Send a notification to the admin realm with override_language(admin_realm.default_language): signup_message = _("Signups enabled") try: signups_stream = get_signups_stream(admin_realm) topic = realm.display_subdomain internal_send_stream_message( admin_realm, sender, signups_stream, topic, signup_message, ) except Stream.DoesNotExist: # nocoverage # If the signups stream hasn't been created in the admin # realm, don't auto-create it to send to it; just do nothing. pass return realm def do_change_notification_settings(user_profile: UserProfile, name: str, value: Union[bool, int, str], log: bool=True) -> None: """Takes in a UserProfile object, the name of a global notification preference to update, and the value to update to """ notification_setting_type = UserProfile.notification_setting_types[name] assert isinstance(value, notification_setting_type), ( f'Cannot update {name}: {value} is not an instance of {notification_setting_type}') setattr(user_profile, name, value) # Disabling digest emails should clear a user's email queue if name == 'enable_digest_emails' and not value: clear_scheduled_emails([user_profile.id], ScheduledEmail.DIGEST) user_profile.save(update_fields=[name]) event = {'type': 'update_global_notifications', 'user': user_profile.email, 'notification_name': name, 'setting': value} if log: log_event(event) send_event(user_profile.realm, event, [user_profile.id]) def do_change_enter_sends(user_profile: UserProfile, enter_sends: bool) -> None: user_profile.enter_sends = enter_sends user_profile.save(update_fields=["enter_sends"]) def do_set_user_display_setting(user_profile: UserProfile, setting_name: str, setting_value: Union[bool, str, int]) -> None: property_type = UserProfile.property_types[setting_name] assert isinstance(setting_value, property_type) setattr(user_profile, setting_name, setting_value) user_profile.save(update_fields=[setting_name]) event = {'type': 'update_display_settings', 'user': user_profile.email, 'setting_name': setting_name, 'setting': setting_value} if setting_name == "default_language": assert isinstance(setting_value, str) event['language_name'] = get_language_name(setting_value) send_event(user_profile.realm, event, [user_profile.id]) # Updates to the timezone display setting are sent to all users if setting_name == "timezone": payload = dict(email=user_profile.email, user_id=user_profile.id, timezone=user_profile.timezone) send_event(user_profile.realm, dict(type='realm_user', op='update', person=payload), active_user_ids(user_profile.realm_id)) def lookup_default_stream_groups(default_stream_group_names: List[str], realm: Realm) -> List[DefaultStreamGroup]: default_stream_groups = [] for group_name in default_stream_group_names: try: default_stream_group = DefaultStreamGroup.objects.get( name=group_name, realm=realm) except DefaultStreamGroup.DoesNotExist: raise JsonableError(_('Invalid default stream group {}').format(group_name)) default_stream_groups.append(default_stream_group) return default_stream_groups def notify_default_streams(realm: Realm) -> None: event = dict( type="default_streams", default_streams=streams_to_dicts_sorted(get_default_streams_for_realm(realm.id)), ) send_event(realm, event, active_non_guest_user_ids(realm.id)) def notify_default_stream_groups(realm: Realm) -> None: event = dict( type="default_stream_groups", default_stream_groups=default_stream_groups_to_dicts_sorted(get_default_stream_groups(realm)), ) send_event(realm, event, active_non_guest_user_ids(realm.id)) def do_add_default_stream(stream: Stream) -> None: realm_id = stream.realm_id stream_id = stream.id if not DefaultStream.objects.filter(realm_id=realm_id, stream_id=stream_id).exists(): DefaultStream.objects.create(realm_id=realm_id, stream_id=stream_id) notify_default_streams(stream.realm) def do_remove_default_stream(stream: Stream) -> None: realm_id = stream.realm_id stream_id = stream.id DefaultStream.objects.filter(realm_id=realm_id, stream_id=stream_id).delete() notify_default_streams(stream.realm) def do_create_default_stream_group(realm: Realm, group_name: str, description: str, streams: List[Stream]) -> None: default_streams = get_default_streams_for_realm(realm.id) for stream in streams: if stream in default_streams: raise JsonableError(_( "'{stream_name}' is a default stream and cannot be added to '{group_name}'", ).format(stream_name=stream.name, group_name=group_name)) check_default_stream_group_name(group_name) (group, created) = DefaultStreamGroup.objects.get_or_create( name=group_name, realm=realm, description=description) if not created: raise JsonableError(_( "Default stream group '{group_name}' already exists", ).format(group_name=group_name)) group.streams.set(streams) notify_default_stream_groups(realm) def do_add_streams_to_default_stream_group(realm: Realm, group: DefaultStreamGroup, streams: List[Stream]) -> None: default_streams = get_default_streams_for_realm(realm.id) for stream in streams: if stream in default_streams: raise JsonableError(_( "'{stream_name}' is a default stream and cannot be added to '{group_name}'", ).format(stream_name=stream.name, group_name=group.name)) if stream in group.streams.all(): raise JsonableError(_( "Stream '{stream_name}' is already present in default stream group '{group_name}'", ).format(stream_name=stream.name, group_name=group.name)) group.streams.add(stream) group.save() notify_default_stream_groups(realm) def do_remove_streams_from_default_stream_group(realm: Realm, group: DefaultStreamGroup, streams: List[Stream]) -> None: for stream in streams: if stream not in group.streams.all(): raise JsonableError(_( "Stream '{stream_name}' is not present in default stream group '{group_name}'", ).format(stream_name=stream.name, group_name=group.name)) group.streams.remove(stream) group.save() notify_default_stream_groups(realm) def do_change_default_stream_group_name(realm: Realm, group: DefaultStreamGroup, new_group_name: str) -> None: if group.name == new_group_name: raise JsonableError(_("This default stream group is already named '{}'").format(new_group_name)) if DefaultStreamGroup.objects.filter(name=new_group_name, realm=realm).exists(): raise JsonableError(_("Default stream group '{}' already exists").format(new_group_name)) group.name = new_group_name group.save() notify_default_stream_groups(realm) def do_change_default_stream_group_description(realm: Realm, group: DefaultStreamGroup, new_description: str) -> None: group.description = new_description group.save() notify_default_stream_groups(realm) def do_remove_default_stream_group(realm: Realm, group: DefaultStreamGroup) -> None: group.delete() notify_default_stream_groups(realm) def get_default_streams_for_realm(realm_id: int) -> List[Stream]: return [default.stream for default in DefaultStream.objects.select_related().filter(realm_id=realm_id)] def get_default_subs(user_profile: UserProfile) -> List[Stream]: # Right now default streams are realm-wide. This wrapper gives us flexibility # to some day further customize how we set up default streams for new users. return get_default_streams_for_realm(user_profile.realm_id) # returns default streams in json serializeable format def streams_to_dicts_sorted(streams: List[Stream]) -> List[Dict[str, Any]]: return sorted([stream.to_dict() for stream in streams], key=lambda elt: elt["name"]) def default_stream_groups_to_dicts_sorted(groups: List[DefaultStreamGroup]) -> List[Dict[str, Any]]: return sorted([group.to_dict() for group in groups], key=lambda elt: elt["name"]) def do_update_user_activity_interval(user_profile: UserProfile, log_time: datetime.datetime) -> None: effective_end = log_time + UserActivityInterval.MIN_INTERVAL_LENGTH # This code isn't perfect, because with various races we might end # up creating two overlapping intervals, but that shouldn't happen # often, and can be corrected for in post-processing try: last = UserActivityInterval.objects.filter(user_profile=user_profile).order_by("-end")[0] # There are two ways our intervals could overlap: # (1) The start of the new interval could be inside the old interval # (2) The end of the new interval could be inside the old interval # In either case, we just extend the old interval to include the new interval. if ((log_time <= last.end and log_time >= last.start) or (effective_end <= last.end and effective_end >= last.start)): last.end = max(last.end, effective_end) last.start = min(last.start, log_time) last.save(update_fields=["start", "end"]) return except IndexError: pass # Otherwise, the intervals don't overlap, so we should make a new one UserActivityInterval.objects.create(user_profile=user_profile, start=log_time, end=effective_end) @statsd_increment('user_activity') def do_update_user_activity(user_profile_id: int, client_id: int, query: str, count: int, log_time: datetime.datetime) -> None: (activity, created) = UserActivity.objects.get_or_create( user_profile_id = user_profile_id, client_id = client_id, query = query, defaults={'last_visit': log_time, 'count': count}) if not created: activity.count += count activity.last_visit = log_time activity.save(update_fields=["last_visit", "count"]) def send_presence_changed(user_profile: UserProfile, presence: UserPresence) -> None: presence_dict = presence.to_dict() event = dict(type="presence", email=user_profile.email, user_id=user_profile.id, server_timestamp=time.time(), presence={presence_dict['client']: presence_dict}) send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id)) def consolidate_client(client: Client) -> Client: # The web app reports a client as 'website' # The desktop app reports a client as ZulipDesktop # due to it setting a custom user agent. We want both # to count as web users # Alias ZulipDesktop to website if client.name in ['ZulipDesktop']: return get_client('website') else: return client @statsd_increment('user_presence') def do_update_user_presence(user_profile: UserProfile, client: Client, log_time: datetime.datetime, status: int) -> None: client = consolidate_client(client) defaults = dict( timestamp=log_time, status=status, realm_id=user_profile.realm_id, ) (presence, created) = UserPresence.objects.get_or_create( user_profile = user_profile, client = client, defaults = defaults, ) stale_status = (log_time - presence.timestamp) > datetime.timedelta(minutes=1, seconds=10) was_idle = presence.status == UserPresence.IDLE became_online = (status == UserPresence.ACTIVE) and (stale_status or was_idle) # If an object was created, it has already been saved. # # We suppress changes from ACTIVE to IDLE before stale_status is reached; # this protects us from the user having two clients open: one active, the # other idle. Without this check, we would constantly toggle their status # between the two states. if not created and stale_status or was_idle or status == presence.status: # The following block attempts to only update the "status" # field in the event that it actually changed. This is # important to avoid flushing the UserPresence cache when the # data it would return to a client hasn't actually changed # (see the UserPresence post_save hook for details). presence.timestamp = log_time update_fields = ["timestamp"] if presence.status != status: presence.status = status update_fields.append("status") presence.save(update_fields=update_fields) if not user_profile.realm.presence_disabled and (created or became_online): # Push event to all users in the realm so they see the new user # appear in the presence list immediately, or the newly online # user without delay. Note that we won't send an update here for a # timestamp update, because we rely on the browser to ping us every 50 # seconds for realm-wide status updates, and those updates should have # recent timestamps, which means the browser won't think active users # have gone idle. If we were more aggressive in this function about # sending timestamp updates, we could eliminate the ping responses, but # that's not a high priority for now, considering that most of our non-MIT # realms are pretty small. send_presence_changed(user_profile, presence) def update_user_activity_interval(user_profile: UserProfile, log_time: datetime.datetime) -> None: event = {'user_profile_id': user_profile.id, 'time': datetime_to_timestamp(log_time)} queue_json_publish("user_activity_interval", event) def update_user_presence(user_profile: UserProfile, client: Client, log_time: datetime.datetime, status: int, new_user_input: bool) -> None: event = {'user_profile_id': user_profile.id, 'status': status, 'time': datetime_to_timestamp(log_time), 'client': client.name} queue_json_publish("user_presence", event) if new_user_input: update_user_activity_interval(user_profile, log_time) def do_update_user_status(user_profile: UserProfile, away: Optional[bool], status_text: Optional[str], client_id: int) -> None: if away: status = UserStatus.AWAY else: status = UserStatus.NORMAL realm = user_profile.realm update_user_status( user_profile_id=user_profile.id, status=status, status_text=status_text, client_id=client_id, ) event = dict( type='user_status', user_id=user_profile.id, ) if away is not None: event['away'] = away if status_text is not None: event['status_text'] = status_text send_event(realm, event, active_user_ids(realm.id)) def do_mark_all_as_read(user_profile: UserProfile, client: Client) -> int: log_statsd_event('bankruptcy') # First, we clear mobile push notifications. This is safer in the # event that the below logic times out and we're killed. all_push_message_ids = UserMessage.objects.filter( user_profile=user_profile, ).extra( where=[UserMessage.where_active_push_notification()], ).values_list("message_id", flat=True)[0:10000] do_clear_mobile_push_notifications_for_ids([user_profile.id], all_push_message_ids) msgs = UserMessage.objects.filter( user_profile=user_profile, ).extra( where=[UserMessage.where_unread()], ) count = msgs.update( flags=F('flags').bitor(UserMessage.flags.read), ) event = dict( type='update_message_flags', operation='add', flag='read', messages=[], # we don't send messages, since the client reloads anyway all=True, ) event_time = timezone_now() send_event(user_profile.realm, event, [user_profile.id]) do_increment_logging_stat(user_profile, COUNT_STATS['messages_read::hour'], None, event_time, increment=count) do_increment_logging_stat(user_profile, COUNT_STATS['messages_read_interactions::hour'], None, event_time, increment=min(1, count)) return count def do_mark_stream_messages_as_read(user_profile: UserProfile, client: Client, stream: Stream, topic_name: Optional[str]=None) -> int: log_statsd_event('mark_stream_as_read') msgs = UserMessage.objects.filter( user_profile=user_profile, ) recipient = stream.recipient msgs = msgs.filter(message__recipient=recipient) if topic_name: msgs = filter_by_topic_name_via_message( query=msgs, topic_name=topic_name, ) msgs = msgs.extra( where=[UserMessage.where_unread()], ) message_ids = list(msgs.values_list('message__id', flat=True)) count = msgs.update( flags=F('flags').bitor(UserMessage.flags.read), ) event = dict( type='update_message_flags', operation='add', flag='read', messages=message_ids, all=False, ) event_time = timezone_now() send_event(user_profile.realm, event, [user_profile.id]) do_clear_mobile_push_notifications_for_ids([user_profile.id], message_ids) do_increment_logging_stat(user_profile, COUNT_STATS['messages_read::hour'], None, event_time, increment=count) do_increment_logging_stat(user_profile, COUNT_STATS['messages_read_interactions::hour'], None, event_time, increment=min(1, count)) return count def do_update_mobile_push_notification(message: Message, prior_mention_user_ids: Set[int], stream_push_user_ids: Set[int]) -> None: # Called during the message edit code path to remove mobile push # notifications for users who are no longer mentioned following # the edit. See #15428 for details. # # A perfect implementation would also support updating the message # in a sent notification if a message was edited to mention a # group rather than a user (or vise versa), though it is likely # not worth the effort to do such a change. if not message.is_stream_message(): return remove_notify_users = prior_mention_user_ids - message.mentions_user_ids - stream_push_user_ids do_clear_mobile_push_notifications_for_ids(list(remove_notify_users), [message.id]) def do_clear_mobile_push_notifications_for_ids(user_profile_ids: List[int], message_ids: List[int]) -> None: if len(message_ids) == 0: return # This function supports clearing notifications for several users # only for the message-edit use case where we'll have a single message_id. assert len(user_profile_ids) == 1 or len(message_ids) == 1 messages_by_user = defaultdict(list) notifications_to_update = list(UserMessage.objects.filter( message_id__in=message_ids, user_profile_id__in=user_profile_ids, ).extra( where=[UserMessage.where_active_push_notification()], ).values_list('user_profile_id', 'message_id')) for (user_id, message_id) in notifications_to_update: messages_by_user[user_id].append(message_id) for (user_profile_id, event_message_ids) in messages_by_user.items(): queue_json_publish("missedmessage_mobile_notifications", { "type": "remove", "user_profile_id": user_profile_id, "message_ids": event_message_ids, }) def do_update_message_flags(user_profile: UserProfile, client: Client, operation: str, flag: str, messages: List[int]) -> int: valid_flags = [item for item in UserMessage.flags if item not in UserMessage.NON_API_FLAGS] if flag not in valid_flags: raise JsonableError(_("Invalid flag: '{}'").format(flag)) if flag in UserMessage.NON_EDITABLE_FLAGS: raise JsonableError(_("Flag not editable: '{}'").format(flag)) flagattr = getattr(UserMessage.flags, flag) msgs = UserMessage.objects.filter(user_profile=user_profile, message__id__in=messages) # This next block allows you to star any message, even those you # didn't receive (e.g. because you're looking at a public stream # you're not subscribed to, etc.). The problem is that starring # is a flag boolean on UserMessage, and UserMessage rows are # normally created only when you receive a message to support # searching your personal history. So we need to create one. We # add UserMessage.flags.historical, so that features that need # "messages you actually received" can exclude these UserMessages. if msgs.count() == 0: if not len(messages) == 1: raise JsonableError(_("Invalid message(s)")) if flag != "starred": raise JsonableError(_("Invalid message(s)")) # Validate that the user could have read the relevant message message = access_message(user_profile, messages[0])[0] # OK, this is a message that you legitimately have access # to via narrowing to the stream it is on, even though you # didn't actually receive it. So we create a historical, # read UserMessage message row for you to star. UserMessage.objects.create(user_profile=user_profile, message=message, flags=UserMessage.flags.historical | UserMessage.flags.read) if operation == 'add': count = msgs.update(flags=F('flags').bitor(flagattr)) elif operation == 'remove': count = msgs.update(flags=F('flags').bitand(~flagattr)) else: raise AssertionError("Invalid message flags operation") event = {'type': 'update_message_flags', 'operation': operation, 'flag': flag, 'messages': messages, 'all': False} send_event(user_profile.realm, event, [user_profile.id]) if flag == "read" and operation == "add": event_time = timezone_now() do_clear_mobile_push_notifications_for_ids([user_profile.id], messages) do_increment_logging_stat(user_profile, COUNT_STATS['messages_read::hour'], None, event_time, increment=count) do_increment_logging_stat(user_profile, COUNT_STATS['messages_read_interactions::hour'], None, event_time, increment=min(1, count)) return count class MessageUpdateUserInfoResult(TypedDict): message_user_ids: Set[int] mention_user_ids: Set[int] def notify_topic_moved_streams(user_profile: UserProfile, old_stream: Stream, old_topic: str, new_stream: Stream, new_topic: Optional[str], send_notification_to_old_thread: bool, send_notification_to_new_thread: bool) -> None: # Since moving content between streams is highly disruptive, # it's worth adding a couple tombstone messages showing what # happened. sender = get_system_bot(settings.NOTIFICATION_BOT) if new_topic is None: new_topic = old_topic user_mention = f"@_**{user_profile.full_name}|{user_profile.id}**" old_topic_link = f"#**{old_stream.name}>{old_topic}**" new_topic_link = f"#**{new_stream.name}>{new_topic}**" if send_notification_to_new_thread: with override_language(new_stream.realm.default_language): internal_send_stream_message( new_stream.realm, sender, new_stream, new_topic, _("This topic was moved here from {old_location} by {user}").format( old_location=old_topic_link, user=user_mention, ), ) if send_notification_to_old_thread: with override_language(old_stream.realm.default_language): # Send a notification to the old stream that the topic was moved. internal_send_stream_message( old_stream.realm, sender, old_stream, old_topic, _("This topic was moved by {user} to {new_location}").format( user=user_mention, new_location=new_topic_link, ), ) def get_user_info_for_message_updates(message_id: int) -> MessageUpdateUserInfoResult: # We exclude UserMessage.flags.historical rows since those # users did not receive the message originally, and thus # probably are not relevant for reprocessed alert_words, # mentions and similar rendering features. This may be a # decision we change in the future. query = UserMessage.objects.filter( message=message_id, flags=~UserMessage.flags.historical, ).values('user_profile_id', 'flags') rows = list(query) message_user_ids = { row['user_profile_id'] for row in rows } mask = UserMessage.flags.mentioned | UserMessage.flags.wildcard_mentioned mention_user_ids = { row['user_profile_id'] for row in rows if int(row['flags']) & mask } return dict( message_user_ids=message_user_ids, mention_user_ids=mention_user_ids, ) def update_user_message_flags(message: Message, ums: Iterable[UserMessage]) -> None: wildcard = message.mentions_wildcard mentioned_ids = message.mentions_user_ids ids_with_alert_words = message.user_ids_with_alert_words changed_ums: Set[UserMessage] = set() def update_flag(um: UserMessage, should_set: bool, flag: int) -> None: if should_set: if not (um.flags & flag): um.flags |= flag changed_ums.add(um) else: if (um.flags & flag): um.flags &= ~flag changed_ums.add(um) for um in ums: has_alert_word = um.user_profile_id in ids_with_alert_words update_flag(um, has_alert_word, UserMessage.flags.has_alert_word) mentioned = um.user_profile_id in mentioned_ids update_flag(um, mentioned, UserMessage.flags.mentioned) update_flag(um, wildcard, UserMessage.flags.wildcard_mentioned) for um in changed_ums: um.save(update_fields=['flags']) def update_to_dict_cache(changed_messages: List[Message], realm_id: Optional[int]=None) -> List[int]: """Updates the message as stored in the to_dict cache (for serving messages).""" items_for_remote_cache = {} message_ids = [] changed_messages_to_dict = MessageDict.to_dict_uncached(changed_messages, realm_id) for msg_id, msg in changed_messages_to_dict.items(): message_ids.append(msg_id) key = to_dict_cache_key_id(msg_id) items_for_remote_cache[key] = (msg,) cache_set_many(items_for_remote_cache) return message_ids # We use transaction.atomic to support select_for_update in the attachment codepath. @transaction.atomic def do_update_embedded_data(user_profile: UserProfile, message: Message, content: Optional[str], rendered_content: Optional[str]) -> None: event: Dict[str, Any] = { 'type': 'update_message', 'sender': user_profile.email, 'message_id': message.id} changed_messages = [message] ums = UserMessage.objects.filter(message=message.id) if content is not None: update_user_message_flags(message, ums) message.content = content message.rendered_content = rendered_content message.rendered_content_version = markdown_version event["content"] = content event["rendered_content"] = rendered_content message.save(update_fields=["content", "rendered_content"]) event['message_ids'] = update_to_dict_cache(changed_messages) def user_info(um: UserMessage) -> Dict[str, Any]: return { 'id': um.user_profile_id, 'flags': um.flags_list(), } send_event(user_profile.realm, event, list(map(user_info, ums))) class DeleteMessagesEvent(TypedDict, total=False): type: str message_ids: List[int] message_type: str sender_id: int recipient_id: int topic: str stream_id: int # We use transaction.atomic to support select_for_update in the attachment codepath. @transaction.atomic def do_update_message(user_profile: UserProfile, message: Message, new_stream: Optional[Stream], topic_name: Optional[str], propagate_mode: str, send_notification_to_old_thread: bool, send_notification_to_new_thread: bool, content: Optional[str], rendered_content: Optional[str], prior_mention_user_ids: Set[int], mention_user_ids: Set[int], mention_data: Optional[MentionData]=None) -> int: """ The main function for message editing. A message edit event can modify: * the message's content (in which case the caller will have set both content and rendered_content), * the topic, in which case the caller will have set topic_name * or both With topic edits, propagate_mode determines whether other message also have their topics edited. """ timestamp = timezone_now() message.last_edit_time = timestamp event: Dict[str, Any] = { 'type': 'update_message', 'user_id': user_profile.id, 'edit_timestamp': datetime_to_timestamp(timestamp), 'message_id': message.id, } edit_history_event: Dict[str, Any] = { 'user_id': user_profile.id, 'timestamp': event['edit_timestamp'], } changed_messages = [message] stream_being_edited = None if message.is_stream_message(): stream_id = message.recipient.type_id stream_being_edited = get_stream_by_id_in_realm(stream_id, user_profile.realm) event['stream_name'] = stream_being_edited.name ums = UserMessage.objects.filter(message=message.id) if content is not None: assert rendered_content is not None # mention_data is required if there's a content edit. assert mention_data is not None # add data from group mentions to mentions_user_ids. for group_id in message.mentions_user_group_ids: members = mention_data.get_group_members(group_id) message.mentions_user_ids.update(members) update_user_message_flags(message, ums) # One could imagine checking realm.allow_edit_history here and # modifying the events based on that setting, but doing so # doesn't really make sense. We need to send the edit event # to clients regardless, and a client already had access to # the original/pre-edit content of the message anyway. That # setting must be enforced on the client side, and making a # change here simply complicates the logic for clients parsing # edit history events. event['orig_content'] = message.content event['orig_rendered_content'] = message.rendered_content edit_history_event["prev_content"] = message.content edit_history_event["prev_rendered_content"] = message.rendered_content edit_history_event["prev_rendered_content_version"] = message.rendered_content_version message.content = content message.rendered_content = rendered_content message.rendered_content_version = markdown_version event["content"] = content event["rendered_content"] = rendered_content event['prev_rendered_content_version'] = message.rendered_content_version event['is_me_message'] = Message.is_status_message(content, rendered_content) # message.has_image and message.has_link will have been # already updated by markdown rendering in the caller. message.has_attachment = check_attachment_reference_change(message) if message.is_stream_message(): if topic_name is not None: new_topic_name = topic_name else: new_topic_name = message.topic_name() stream_topic: Optional[StreamTopicTarget] = StreamTopicTarget( stream_id=stream_id, topic_name=new_topic_name, ) else: stream_topic = None info = get_recipient_info( recipient=message.recipient, sender_id=message.sender_id, stream_topic=stream_topic, possible_wildcard_mention=mention_data.message_has_wildcards(), ) event['push_notify_user_ids'] = list(info['push_notify_user_ids']) event['stream_push_user_ids'] = list(info['stream_push_user_ids']) event['stream_email_user_ids'] = list(info['stream_email_user_ids']) event['prior_mention_user_ids'] = list(prior_mention_user_ids) event['mention_user_ids'] = list(mention_user_ids) event['presence_idle_user_ids'] = filter_presence_idle_user_ids(info['active_user_ids']) if message.mentions_wildcard: event['wildcard_mention_user_ids'] = list(info['wildcard_mention_user_ids']) else: event['wildcard_mention_user_ids'] = [] do_update_mobile_push_notification(message, prior_mention_user_ids, info['stream_push_user_ids']) if topic_name is not None or new_stream is not None: orig_topic_name = message.topic_name() event["propagate_mode"] = propagate_mode event["stream_id"] = message.recipient.type_id if new_stream is not None: assert content is None assert message.is_stream_message() assert stream_being_edited is not None edit_history_event['prev_stream'] = stream_being_edited.id event[ORIG_TOPIC] = orig_topic_name message.recipient_id = new_stream.recipient_id event["new_stream_id"] = new_stream.id event["propagate_mode"] = propagate_mode # When messages are moved from one stream to another, some # users may lose access to those messages, including guest # users and users not subscribed to the new stream (if it is a # private stream). For those users, their experience is as # though the messages were deleted, and we should send a # delete_message event to them instead. subscribers = get_active_subscriptions_for_stream_id( stream_id).select_related("user_profile") subs_to_new_stream = list(get_active_subscriptions_for_stream_id( new_stream.id).select_related("user_profile")) new_stream_sub_ids = [user.user_profile_id for user in subs_to_new_stream] # Get users who aren't subscribed to the new_stream. subs_losing_usermessages = [ sub for sub in subscribers if sub.user_profile_id not in new_stream_sub_ids ] # Users who can longer access the message without some action # from administrators. # # TODO: Extend this list to also contain users losing access # due to the messages moving to a private stream they are not # subscribed to. subs_losing_access = [ sub for sub in subs_losing_usermessages if sub.user_profile.is_guest ] ums = ums.exclude(user_profile_id__in=[ sub.user_profile_id for sub in subs_losing_usermessages]) if topic_name is not None: topic_name = truncate_topic(topic_name) message.set_topic_name(topic_name) # These fields have legacy field names. event[ORIG_TOPIC] = orig_topic_name event[TOPIC_NAME] = topic_name event[TOPIC_LINKS] = topic_links(message.sender.realm_id, topic_name) edit_history_event[LEGACY_PREV_TOPIC] = orig_topic_name delete_event_notify_user_ids: List[int] = [] if propagate_mode in ["change_later", "change_all"]: assert topic_name is not None or new_stream is not None messages_list = update_messages_for_topic_edit( message=message, propagate_mode=propagate_mode, orig_topic_name=orig_topic_name, topic_name=topic_name, new_stream=new_stream, ) changed_messages += messages_list if new_stream is not None: assert stream_being_edited is not None message_ids = [msg.id for msg in changed_messages] # Delete UserMessage objects for users who will no # longer have access to these messages. Note: This could be # very expensive, since it's N guest users x M messages. UserMessage.objects.filter( user_profile_id__in=[sub.user_profile_id for sub in subs_losing_usermessages], message_id__in=message_ids, ).delete() delete_event: DeleteMessagesEvent = { 'type': 'delete_message', 'message_ids': message_ids, 'message_type': 'stream', 'stream_id': stream_being_edited.id, 'topic': orig_topic_name, } delete_event_notify_user_ids = [sub.user_profile_id for sub in subs_losing_access] send_event(user_profile.realm, delete_event, delete_event_notify_user_ids) if message.edit_history is not None: edit_history = ujson.loads(message.edit_history) edit_history.insert(0, edit_history_event) else: edit_history = [edit_history_event] message.edit_history = ujson.dumps(edit_history) # This does message.save(update_fields=[...]) save_message_for_edit_use_case(message=message) realm_id: Optional[int] = None if stream_being_edited is not None: realm_id = stream_being_edited.realm_id event['message_ids'] = update_to_dict_cache(changed_messages, realm_id) def user_info(um: UserMessage) -> Dict[str, Any]: return { 'id': um.user_profile_id, 'flags': um.flags_list(), } # The following blocks arranges that users who are subscribed to a # stream and can see history from before they subscribed get # live-update when old messages are edited (e.g. if the user does # a topic edit themself). # # We still don't send an update event to users who are not # subscribed to this stream and don't have a UserMessage row. This # means if a non-subscriber is viewing the narrow, they won't get # a real-time updates. This is a balance between sending # message-edit notifications for every public stream to every user # in the organization (too expansive, and also not what we do for # newly sent messages anyway) and having magical live-updates # where possible. users_to_be_notified = list(map(user_info, ums)) if stream_being_edited is not None: if stream_being_edited.is_history_public_to_subscribers: subscribers = get_active_subscriptions_for_stream_id(stream_id) # We exclude long-term idle users, since they by # definition have no active clients. subscribers = subscribers.exclude(user_profile__long_term_idle=True) # Remove duplicates by excluding the id of users already # in users_to_be_notified list. This is the case where a # user both has a UserMessage row and is a current # Subscriber subscribers = subscribers.exclude(user_profile_id__in=[um.user_profile_id for um in ums]) if new_stream is not None: assert delete_event_notify_user_ids is not None subscribers = subscribers.exclude(user_profile_id__in=delete_event_notify_user_ids) # All users that are subscribed to the stream must be # notified when a message is edited subscriber_ids = [user.user_profile_id for user in subscribers] if new_stream is not None: # TODO: Guest users don't see the new moved topic # unless breadcrumb message for new stream is # enabled. Excluding these users from receiving this # event helps us avoid a error trackeback for our # clients. We should figure out a way to inform the # guest users of this new topic if sending a 'message' # event for these messages is not an option. # # Don't send this event to guest subs who are not # subscribers of the old stream but are subscribed to # the new stream; clients will be confused. old_stream_unsubbed_guests = [ sub for sub in subs_to_new_stream if sub.user_profile.is_guest and sub.user_profile_id not in subscriber_ids ] subscribers = subscribers.exclude(user_profile_id__in=[ sub.user_profile_id for sub in old_stream_unsubbed_guests]) subscriber_ids = [user.user_profile_id for user in subscribers] users_to_be_notified += list(map(subscriber_info, subscriber_ids)) send_event(user_profile.realm, event, users_to_be_notified) if (len(changed_messages) > 0 and new_stream is not None and stream_being_edited is not None): # Notify users that the topic was moved. notify_topic_moved_streams(user_profile, stream_being_edited, orig_topic_name, new_stream, topic_name, send_notification_to_old_thread, send_notification_to_new_thread) return len(changed_messages) def do_delete_messages(realm: Realm, messages: Iterable[Message]) -> None: # messages in delete_message event belong to the same topic # or is a single private message, as any other behaviour is not possible with # the current callers to this method. messages = list(messages) message_ids = [message.id for message in messages] if not message_ids: return event: DeleteMessagesEvent = { 'type': 'delete_message', 'message_ids': message_ids, } sample_message = messages[0] message_type = "stream" users_to_notify = [] if not sample_message.is_stream_message(): assert len(messages) == 1 message_type = "private" ums = UserMessage.objects.filter(message_id__in=message_ids) users_to_notify = [um.user_profile_id for um in ums] # TODO: We should plan to remove `sender_id` here. event['recipient_id'] = sample_message.recipient_id event['sender_id'] = sample_message.sender_id archiving_chunk_size = retention.MESSAGE_BATCH_SIZE if message_type == "stream": stream_id = sample_message.recipient.type_id event['stream_id'] = stream_id event['topic'] = sample_message.topic_name() subscribers = get_active_subscriptions_for_stream_id(stream_id) # We exclude long-term idle users, since they by definition have no active clients. subscribers = subscribers.exclude(user_profile__long_term_idle=True) subscriber_ids = [user.user_profile_id for user in subscribers] users_to_notify = list(map(subscriber_info, subscriber_ids)) archiving_chunk_size = retention.STREAM_MESSAGE_BATCH_SIZE move_messages_to_archive(message_ids, realm=realm, chunk_size=archiving_chunk_size) event['message_type'] = message_type send_event(realm, event, users_to_notify) def do_delete_messages_by_sender(user: UserProfile) -> None: message_ids = list(Message.objects.filter(sender=user).values_list('id', flat=True).order_by('id')) if message_ids: move_messages_to_archive(message_ids, chunk_size=retention.STREAM_MESSAGE_BATCH_SIZE) def get_streams_traffic(stream_ids: Set[int]) -> Dict[int, int]: stat = COUNT_STATS['messages_in_stream:is_bot:day'] traffic_from = timezone_now() - datetime.timedelta(days=28) query = StreamCount.objects.filter(property=stat.property, end_time__gt=traffic_from) query = query.filter(stream_id__in=stream_ids) traffic_list = query.values('stream_id').annotate(value=Sum('value')) traffic_dict = {} for traffic in traffic_list: traffic_dict[traffic["stream_id"]] = traffic["value"] return traffic_dict def round_to_2_significant_digits(number: int) -> int: return int(round(number, 2 - len(str(number)))) STREAM_TRAFFIC_CALCULATION_MIN_AGE_DAYS = 7 def get_average_weekly_stream_traffic(stream_id: int, stream_date_created: datetime.datetime, recent_traffic: Dict[int, int]) -> Optional[int]: try: stream_traffic = recent_traffic[stream_id] except KeyError: stream_traffic = 0 stream_age = (timezone_now() - stream_date_created).days if stream_age >= 28: average_weekly_traffic = int(stream_traffic // 4) elif stream_age >= STREAM_TRAFFIC_CALCULATION_MIN_AGE_DAYS: average_weekly_traffic = int(stream_traffic * 7 // stream_age) else: return None if average_weekly_traffic == 0 and stream_traffic > 0: average_weekly_traffic = 1 return round_to_2_significant_digits(average_weekly_traffic) SubHelperT = Tuple[List[Dict[str, Any]], List[Dict[str, Any]], List[Dict[str, Any]]] def get_web_public_subs(realm: Realm) -> SubHelperT: color_idx = 0 def get_next_color() -> str: nonlocal color_idx color = STREAM_ASSIGNMENT_COLORS[color_idx] color_idx = (color_idx + 1) % len(STREAM_ASSIGNMENT_COLORS) return color subscribed = [] for stream in Stream.objects.filter(realm=realm, is_web_public=True, deactivated=False): stream_dict = stream.to_dict() # Add versions of the Subscription fields based on a simulated # new user subscription set. stream_dict['is_muted'] = False stream_dict['color'] = get_next_color() stream_dict['desktop_notifications'] = True stream_dict['audible_notifications'] = True stream_dict['push_notifications'] = True stream_dict['email_notifications'] = True stream_dict['pin_to_top'] = False stream_weekly_traffic = get_average_weekly_stream_traffic(stream.id, stream.date_created, {}) stream_dict['stream_weekly_traffic'] = stream_weekly_traffic stream_dict['email_address'] = '' subscribed.append(stream_dict) return (subscribed, [], []) # In general, it's better to avoid using .values() because it makes # the code pretty ugly, but in this case, it has significant # performance impact for loading / for users with large numbers of # subscriptions, so it's worth optimizing. def gather_subscriptions_helper(user_profile: UserProfile, include_subscribers: bool=True) -> SubHelperT: sub_dicts = get_stream_subscriptions_for_user(user_profile).values( *Subscription.API_FIELDS, "recipient_id").order_by("recipient_id") sub_dicts = list(sub_dicts) sub_recipient_ids = [ sub['recipient_id'] for sub in sub_dicts ] stream_recipient = StreamRecipientMap() stream_recipient.populate_for_recipient_ids(sub_recipient_ids) stream_ids: Set[int] = set() for sub in sub_dicts: sub['stream_id'] = stream_recipient.stream_id_for(sub['recipient_id']) stream_ids.add(sub['stream_id']) recent_traffic = get_streams_traffic(stream_ids=stream_ids) all_streams = get_active_streams(user_profile.realm).select_related( "realm").values( *Stream.API_FIELDS, # date_created is used as an input for the stream_weekly_traffic computed field. "date_created", # The realm_id and recipient_id are generally not needed in the API. "realm_id", "recipient_id", # email_token isn't public to some users with access to # the stream, so doesn't belong in API_FIELDS. "email_token") stream_dicts = [stream for stream in all_streams if stream['id'] in stream_ids] stream_hash = {} for stream in stream_dicts: stream_hash[stream["id"]] = stream all_streams_id = [stream["id"] for stream in all_streams] subscribed = [] unsubscribed = [] never_subscribed = [] # Deactivated streams aren't in stream_hash. streams = [stream_hash[sub["stream_id"]] for sub in sub_dicts if sub["stream_id"] in stream_hash] streams_subscribed_map = {sub["stream_id"]: sub["active"] for sub in sub_dicts} # Add never subscribed streams to streams_subscribed_map streams_subscribed_map.update({stream['id']: False for stream in all_streams if stream not in streams}) if include_subscribers: subscriber_map: Mapping[int, Optional[List[int]]] = bulk_get_subscriber_user_ids( all_streams, user_profile, streams_subscribed_map, stream_recipient, ) else: # If we're not including subscribers, always return None, # which the below code needs to check for anyway. subscriber_map = defaultdict(lambda: None) sub_unsub_stream_ids = set() for sub in sub_dicts: sub_unsub_stream_ids.add(sub["stream_id"]) stream = stream_hash.get(sub["stream_id"]) if not stream: # This stream has been deactivated, don't include it. continue # We first construct a dictionary based on the standard Stream # and Subscription models' API_FIELDS. stream_dict = {} for field_name in Stream.API_FIELDS: if field_name == "id": stream_dict['stream_id'] = stream["id"] continue stream_dict[field_name] = stream[field_name] # Copy Subscription.API_FIELDS except for "active", which is # used to determine where to the put the field. for field_name in Subscription.API_FIELDS: stream_dict[field_name] = sub[field_name] # Backwards-compatibility for clients that haven't been # updated for the in_home_view => is_muted API migration. stream_dict['in_home_view'] = not stream_dict['is_muted'] # Backwards-compatibility for clients that haven't been # updated for the is_announcement_only -> stream_post_policy # migration. stream_dict['is_announcement_only'] = \ stream['stream_post_policy'] == Stream.STREAM_POST_POLICY_ADMINS # Add a few computed fields not directly from the data models. stream_dict['stream_weekly_traffic'] = get_average_weekly_stream_traffic( stream["id"], stream["date_created"], recent_traffic) stream_dict['email_address'] = encode_email_address_helper( stream["name"], stream["email_token"], show_sender=True) # Construct and add subscribers data subscribers: Optional[List[int]] = subscriber_map[stream["id"]] # Important: don't show the subscribers if the stream is invite only # and this user isn't on it anymore (or a realm administrator). if stream["invite_only"] and not (sub["active"] or user_profile.is_realm_admin): subscribers = None # Guest users lose access to subscribers when they are unsubscribed. if not sub["active"] and user_profile.is_guest: subscribers = None if subscribers is not None: stream_dict['subscribers'] = subscribers # is_active is represented in this structure by which list we include it in. is_active = stream_dict.pop("active") if is_active: subscribed.append(stream_dict) else: unsubscribed.append(stream_dict) all_streams_id_set = set(all_streams_id) if user_profile.can_access_public_streams(): never_subscribed_stream_ids = all_streams_id_set - sub_unsub_stream_ids else: never_subscribed_stream_ids = set() never_subscribed_streams = [ns_stream_dict for ns_stream_dict in all_streams if ns_stream_dict['id'] in never_subscribed_stream_ids] for stream in never_subscribed_streams: is_public = (not stream['invite_only']) if is_public or user_profile.is_realm_admin: stream_dict = {} for field_name in Stream.API_FIELDS: if field_name == "id": stream_dict['stream_id'] = stream["id"] continue stream_dict[field_name] = stream[field_name] stream_dict['stream_weekly_traffic'] = get_average_weekly_stream_traffic( stream["id"], stream["date_created"], recent_traffic) # Backwards-compatibility addition of removed field. stream_dict['is_announcement_only'] = \ stream['stream_post_policy'] == Stream.STREAM_POST_POLICY_ADMINS if is_public or user_profile.is_realm_admin: subscribers = subscriber_map[stream["id"]] if subscribers is not None: stream_dict['subscribers'] = subscribers never_subscribed.append(stream_dict) return (sorted(subscribed, key=lambda x: x['name']), sorted(unsubscribed, key=lambda x: x['name']), sorted(never_subscribed, key=lambda x: x['name'])) def gather_subscriptions( user_profile: UserProfile, include_subscribers: bool=False, ) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]: subscribed, unsubscribed, _ = gather_subscriptions_helper( user_profile, include_subscribers=include_subscribers) if include_subscribers: user_ids = set() for subs in [subscribed, unsubscribed]: for sub in subs: if 'subscribers' in sub: for subscriber in sub['subscribers']: user_ids.add(subscriber) email_dict = get_emails_from_user_ids(list(user_ids)) for subs in [subscribed, unsubscribed]: for sub in subs: if 'subscribers' in sub: sub['subscribers'] = sorted([ email_dict[user_id] for user_id in sub['subscribers'] ]) return (subscribed, unsubscribed) def get_active_presence_idle_user_ids(realm: Realm, sender_id: int, message_type: str, active_user_ids: Set[int], user_flags: Dict[int, List[str]]) -> List[int]: ''' Given a list of active_user_ids, we build up a subset of those users who fit these criteria: * They are likely to need notifications (either due to mentions, alert words, or being PM'ed). * They are no longer "present" according to the UserPresence table. ''' if realm.presence_disabled: return [] is_pm = message_type == 'private' user_ids = set() for user_id in active_user_ids: flags: Iterable[str] = user_flags.get(user_id, []) mentioned = 'mentioned' in flags or 'wildcard_mentioned' in flags private_message = is_pm and user_id != sender_id alerted = 'has_alert_word' in flags if mentioned or private_message or alerted: user_ids.add(user_id) return filter_presence_idle_user_ids(user_ids) def filter_presence_idle_user_ids(user_ids: Set[int]) -> List[int]: # Given a set of user IDs (the recipients of a message), accesses # the UserPresence table to determine which of these users are # currently idle and should potentially get email notifications # (and push notifications with with # user_profile.enable_online_push_notifications=False). # # We exclude any presence data from ZulipMobile for the purpose of # triggering these notifications; the mobile app can more # effectively do its own client-side filtering of notification # sounds/etc. for the case that the user is actively doing a PM # conversation in the app. if not user_ids: return [] # Matches presence.js constant OFFLINE_THRESHOLD_SECS = 140 recent = timezone_now() - datetime.timedelta(seconds=OFFLINE_THRESHOLD_SECS) rows = UserPresence.objects.filter( user_profile_id__in=user_ids, status=UserPresence.ACTIVE, timestamp__gte=recent, ).exclude(client__name="ZulipMobile").distinct('user_profile_id').values('user_profile_id') active_user_ids = {row['user_profile_id'] for row in rows} idle_user_ids = user_ids - active_user_ids return sorted(list(idle_user_ids)) def do_send_confirmation_email(invitee: PreregistrationUser, referrer: UserProfile) -> str: """ Send the confirmation/welcome e-mail to an invited user. """ activation_url = create_confirmation_link(invitee, Confirmation.INVITATION) context = {'referrer_full_name': referrer.full_name, 'referrer_email': referrer.delivery_email, 'activate_url': activation_url, 'referrer_realm_name': referrer.realm.name} from_name = f"{referrer.full_name} (via Zulip)" send_email('zerver/emails/invitation', to_emails=[invitee.email], from_name=from_name, from_address=FromAddress.tokenized_no_reply_address(), language=referrer.realm.default_language, context=context, realm=referrer.realm) return activation_url def email_not_system_bot(email: str) -> None: if is_cross_realm_bot_email(email): msg = email_reserved_for_system_bots_error(email) code = msg raise ValidationError( msg, code=code, params=dict(deactivated=False), ) class InvitationError(JsonableError): code = ErrorCode.INVITATION_FAILED data_fields = ['errors', 'sent_invitations'] def __init__(self, msg: str, errors: List[Tuple[str, str, bool]], sent_invitations: bool) -> None: self._msg: str = msg self.errors: List[Tuple[str, str, bool]] = errors self.sent_invitations: bool = sent_invitations def estimate_recent_invites(realms: Iterable[Realm], *, days: int) -> int: '''An upper bound on the number of invites sent in the last `days` days''' recent_invites = RealmCount.objects.filter( realm__in=realms, property='invites_sent::day', end_time__gte=timezone_now() - datetime.timedelta(days=days), ).aggregate(Sum('value'))['value__sum'] if recent_invites is None: return 0 return recent_invites def check_invite_limit(realm: Realm, num_invitees: int) -> None: '''Discourage using invitation emails as a vector for carrying spam.''' msg = _("You do not have enough remaining invites. " "Please contact {email} to have your limit raised. " "No invitations were sent.").format(email=settings.ZULIP_ADMINISTRATOR) if not settings.OPEN_REALM_CREATION: return recent_invites = estimate_recent_invites([realm], days=1) if num_invitees + recent_invites > realm.max_invites: raise InvitationError(msg, [], sent_invitations=False) default_max = settings.INVITES_DEFAULT_REALM_DAILY_MAX newrealm_age = datetime.timedelta(days=settings.INVITES_NEW_REALM_DAYS) if realm.date_created <= timezone_now() - newrealm_age: # If this isn't a "newly-created" realm, we're done. The # remaining code applies an aggregate limit across all # "new" realms, to address sudden bursts of spam realms. return if realm.max_invites > default_max: # If a user is on a realm where we've bumped up # max_invites, then we exempt them from invite limits. return new_realms = Realm.objects.filter( date_created__gte=timezone_now() - newrealm_age, _max_invites__lte=default_max, ).all() for days, count in settings.INVITES_NEW_REALM_LIMIT_DAYS: recent_invites = estimate_recent_invites(new_realms, days=days) if num_invitees + recent_invites > count: raise InvitationError(msg, [], sent_invitations=False) def do_invite_users(user_profile: UserProfile, invitee_emails: SizedTextIterable, streams: Iterable[Stream], invite_as: int=PreregistrationUser.INVITE_AS['MEMBER']) -> None: check_invite_limit(user_profile.realm, len(invitee_emails)) realm = user_profile.realm if not realm.invite_required: # Inhibit joining an open realm to send spam invitations. min_age = datetime.timedelta(days=settings.INVITES_MIN_USER_AGE_DAYS) if (user_profile.date_joined > timezone_now() - min_age and not user_profile.is_realm_admin): raise InvitationError( _("Your account is too new to send invites for this organization. " "Ask an organization admin, or a more experienced user."), [], sent_invitations=False) good_emails: Set[str] = set() errors: List[Tuple[str, str, bool]] = [] validate_email_allowed_in_realm = get_realm_email_validator(user_profile.realm) for email in invitee_emails: if email == '': continue email_error = validate_email_is_valid( email, validate_email_allowed_in_realm, ) if email_error: errors.append((email, email_error, False)) else: good_emails.add(email) ''' good_emails are emails that look ok so far, but we still need to make sure they're not gonna conflict with existing users ''' error_dict = get_existing_user_errors(user_profile.realm, good_emails) skipped: List[Tuple[str, str, bool]] = [] for email in error_dict: msg, deactivated = error_dict[email] skipped.append((email, msg, deactivated)) good_emails.remove(email) validated_emails = list(good_emails) if errors: raise InvitationError( _("Some emails did not validate, so we didn't send any invitations."), errors + skipped, sent_invitations=False) if skipped and len(skipped) == len(invitee_emails): # All e-mails were skipped, so we didn't actually invite anyone. raise InvitationError(_("We weren't able to invite anyone."), skipped, sent_invitations=False) # We do this here rather than in the invite queue processor since this # is used for rate limiting invitations, rather than keeping track of # when exactly invitations were sent do_increment_logging_stat(user_profile.realm, COUNT_STATS['invites_sent::day'], None, timezone_now(), increment=len(validated_emails)) # Now that we are past all the possible errors, we actually create # the PreregistrationUser objects and trigger the email invitations. for email in validated_emails: # The logged in user is the referrer. prereg_user = PreregistrationUser(email=email, referred_by=user_profile, invited_as=invite_as, realm=user_profile.realm) prereg_user.save() stream_ids = [stream.id for stream in streams] prereg_user.streams.set(stream_ids) event = {"prereg_id": prereg_user.id, "referrer_id": user_profile.id} queue_json_publish("invites", event) if skipped: raise InvitationError(_("Some of those addresses are already using Zulip, " "so we didn't send them an invitation. We did send " "invitations to everyone else!"), skipped, sent_invitations=True) notify_invites_changed(user_profile) def do_get_user_invites(user_profile: UserProfile) -> List[Dict[str, Any]]: if user_profile.is_realm_admin: prereg_users = filter_to_valid_prereg_users( PreregistrationUser.objects.filter(referred_by__realm=user_profile.realm) ) else: prereg_users = filter_to_valid_prereg_users( PreregistrationUser.objects.filter(referred_by=user_profile) ) invites = [] for invitee in prereg_users: invites.append(dict(email=invitee.email, invited_by_user_id=invitee.referred_by.id, invited=datetime_to_timestamp(invitee.invited_at), id=invitee.id, invited_as=invitee.invited_as, is_multiuse=False)) if not user_profile.is_realm_admin: # We do not return multiuse invites to non-admin users. return invites lowest_datetime = timezone_now() - datetime.timedelta(days=settings.INVITATION_LINK_VALIDITY_DAYS) multiuse_confirmation_objs = Confirmation.objects.filter(realm=user_profile.realm, type=Confirmation.MULTIUSE_INVITE, date_sent__gte=lowest_datetime) for confirmation_obj in multiuse_confirmation_objs: invite = confirmation_obj.content_object invites.append(dict(invited_by_user_id=invite.referred_by.id, invited=datetime_to_timestamp(confirmation_obj.date_sent), id=invite.id, link_url=confirmation_url(confirmation_obj.confirmation_key, user_profile.realm, Confirmation.MULTIUSE_INVITE), invited_as=invite.invited_as, is_multiuse=True)) return invites def do_create_multiuse_invite_link(referred_by: UserProfile, invited_as: int, streams: Sequence[Stream] = []) -> str: realm = referred_by.realm invite = MultiuseInvite.objects.create(realm=realm, referred_by=referred_by) if streams: invite.streams.set(streams) invite.invited_as = invited_as invite.save() notify_invites_changed(referred_by) return create_confirmation_link(invite, Confirmation.MULTIUSE_INVITE) def do_revoke_user_invite(prereg_user: PreregistrationUser) -> None: email = prereg_user.email # Delete both the confirmation objects and the prereg_user object. # TODO: Probably we actually want to set the confirmation objects # to a "revoked" status so that we can give the invited user a better # error message. content_type = ContentType.objects.get_for_model(PreregistrationUser) Confirmation.objects.filter(content_type=content_type, object_id=prereg_user.id).delete() prereg_user.delete() clear_scheduled_invitation_emails(email) notify_invites_changed(prereg_user) def do_revoke_multi_use_invite(multiuse_invite: MultiuseInvite) -> None: content_type = ContentType.objects.get_for_model(MultiuseInvite) Confirmation.objects.filter(content_type=content_type, object_id=multiuse_invite.id).delete() multiuse_invite.delete() notify_invites_changed(multiuse_invite.referred_by) def do_resend_user_invite_email(prereg_user: PreregistrationUser) -> int: # These are two structurally for the caller's code path. assert prereg_user.referred_by is not None assert prereg_user.realm is not None check_invite_limit(prereg_user.referred_by.realm, 1) prereg_user.invited_at = timezone_now() prereg_user.save() do_increment_logging_stat(prereg_user.realm, COUNT_STATS['invites_sent::day'], None, prereg_user.invited_at) clear_scheduled_invitation_emails(prereg_user.email) # We don't store the custom email body, so just set it to None event = {"prereg_id": prereg_user.id, "referrer_id": prereg_user.referred_by.id, "email_body": None} queue_json_publish("invites", event) return datetime_to_timestamp(prereg_user.invited_at) def notify_realm_emoji(realm: Realm) -> None: event = dict(type="realm_emoji", op="update", realm_emoji=realm.get_emoji()) send_event(realm, event, active_user_ids(realm.id)) def check_add_realm_emoji(realm: Realm, name: str, author: UserProfile, image_file: File) -> Optional[RealmEmoji]: realm_emoji = RealmEmoji(realm=realm, name=name, author=author) realm_emoji.full_clean() realm_emoji.save() emoji_file_name = get_emoji_file_name(image_file.name, realm_emoji.id) # The only user-controlled portion of 'emoji_file_name' is an extension, # which can not contain '..' or '/' or '\', making it difficult to exploit emoji_file_name = mark_sanitized(emoji_file_name) emoji_uploaded_successfully = False try: upload_emoji_image(image_file, emoji_file_name, author) emoji_uploaded_successfully = True finally: if not emoji_uploaded_successfully: realm_emoji.delete() return None else: realm_emoji.file_name = emoji_file_name realm_emoji.save(update_fields=['file_name']) notify_realm_emoji(realm_emoji.realm) return realm_emoji def do_remove_realm_emoji(realm: Realm, name: str) -> None: emoji = RealmEmoji.objects.get(realm=realm, name=name, deactivated=False) emoji.deactivated = True emoji.save(update_fields=['deactivated']) notify_realm_emoji(realm) def notify_alert_words(user_profile: UserProfile, words: Iterable[str]) -> None: event = dict(type="alert_words", alert_words=words) send_event(user_profile.realm, event, [user_profile.id]) def do_add_alert_words(user_profile: UserProfile, alert_words: Iterable[str]) -> None: words = add_user_alert_words(user_profile, alert_words) notify_alert_words(user_profile, words) def do_remove_alert_words(user_profile: UserProfile, alert_words: Iterable[str]) -> None: words = remove_user_alert_words(user_profile, alert_words) notify_alert_words(user_profile, words) def do_mute_topic(user_profile: UserProfile, stream: Stream, recipient: Recipient, topic: str, date_muted: Optional[datetime.datetime]=None) -> None: if date_muted is None: date_muted = timezone_now() add_topic_mute(user_profile, stream.id, recipient.id, topic, date_muted) event = dict(type="muted_topics", muted_topics=get_topic_mutes(user_profile)) send_event(user_profile.realm, event, [user_profile.id]) def do_unmute_topic(user_profile: UserProfile, stream: Stream, topic: str) -> None: remove_topic_mute(user_profile, stream.id, topic) event = dict(type="muted_topics", muted_topics=get_topic_mutes(user_profile)) send_event(user_profile.realm, event, [user_profile.id]) def do_mark_hotspot_as_read(user: UserProfile, hotspot: str) -> None: UserHotspot.objects.get_or_create(user=user, hotspot=hotspot) event = dict(type="hotspots", hotspots=get_next_hotspots(user)) send_event(user.realm, event, [user.id]) def notify_realm_filters(realm: Realm) -> None: realm_filters = realm_filters_for_realm(realm.id) event = dict(type="realm_filters", realm_filters=realm_filters) send_event(realm, event, active_user_ids(realm.id)) # NOTE: Regexes must be simple enough that they can be easily translated to JavaScript # RegExp syntax. In addition to JS-compatible syntax, the following features are available: # * Named groups will be converted to numbered groups automatically # * Inline-regex flags will be stripped, and where possible translated to RegExp-wide flags def do_add_realm_filter(realm: Realm, pattern: str, url_format_string: str) -> int: pattern = pattern.strip() url_format_string = url_format_string.strip() realm_filter = RealmFilter( realm=realm, pattern=pattern, url_format_string=url_format_string) realm_filter.full_clean() realm_filter.save() notify_realm_filters(realm) return realm_filter.id def do_remove_realm_filter(realm: Realm, pattern: Optional[str]=None, id: Optional[int]=None) -> None: if pattern is not None: RealmFilter.objects.get(realm=realm, pattern=pattern).delete() else: RealmFilter.objects.get(realm=realm, pk=id).delete() notify_realm_filters(realm) def get_emails_from_user_ids(user_ids: Sequence[int]) -> Dict[int, str]: # We may eventually use memcached to speed this up, but the DB is fast. return UserProfile.emails_from_ids(user_ids) def do_add_realm_domain(realm: Realm, domain: str, allow_subdomains: bool) -> (RealmDomain): realm_domain = RealmDomain.objects.create(realm=realm, domain=domain, allow_subdomains=allow_subdomains) event = dict(type="realm_domains", op="add", realm_domain=dict(domain=realm_domain.domain, allow_subdomains=realm_domain.allow_subdomains)) send_event(realm, event, active_user_ids(realm.id)) return realm_domain def do_change_realm_domain(realm_domain: RealmDomain, allow_subdomains: bool) -> None: realm_domain.allow_subdomains = allow_subdomains realm_domain.save(update_fields=['allow_subdomains']) event = dict(type="realm_domains", op="change", realm_domain=dict(domain=realm_domain.domain, allow_subdomains=realm_domain.allow_subdomains)) send_event(realm_domain.realm, event, active_user_ids(realm_domain.realm_id)) def do_remove_realm_domain(realm_domain: RealmDomain, acting_user: Optional[UserProfile]=None) -> None: realm = realm_domain.realm domain = realm_domain.domain realm_domain.delete() if RealmDomain.objects.filter(realm=realm).count() == 0 and realm.emails_restricted_to_domains: # If this was the last realm domain, we mark the realm as no # longer restricted to domain, because the feature doesn't do # anything if there are no domains, and this is probably less # confusing than the alternative. do_set_realm_property(realm, 'emails_restricted_to_domains', False, acting_user=acting_user) event = dict(type="realm_domains", op="remove", domain=domain) send_event(realm, event, active_user_ids(realm.id)) def get_occupied_streams(realm: Realm) -> QuerySet: # TODO: Make a generic stub for QuerySet """ Get streams with subscribers """ exists_expression = Exists( Subscription.objects.filter(active=True, user_profile__is_active=True, user_profile__realm=realm, recipient_id=OuterRef('recipient_id')), ) occupied_streams = Stream.objects.filter(realm=realm, deactivated=False) \ .annotate(occupied=exists_expression).filter(occupied=True) return occupied_streams def get_web_public_streams(realm: Realm) -> List[Dict[str, Any]]: query = Stream.objects.filter(realm=realm, deactivated=False, is_web_public=True) streams = Stream.get_client_data(query) return streams def do_get_streams( user_profile: UserProfile, include_public: bool=True, include_subscribed: bool=True, include_all_active: bool=False, include_default: bool=False, include_owner_subscribed: bool=False, ) -> List[Dict[str, Any]]: if include_all_active and not user_profile.is_api_super_user: raise JsonableError(_("User not authorized for this query")) include_public = include_public and user_profile.can_access_public_streams() # Start out with all streams in the realm with subscribers query = get_occupied_streams(user_profile.realm) if include_all_active: streams = Stream.get_client_data(query) else: # We construct a query as the or (|) of the various sources # this user requested streams from. query_filter: Optional[Q] = None def add_filter_option(option: Q) -> None: nonlocal query_filter if query_filter is None: query_filter = option else: query_filter |= option if include_subscribed: subscribed_stream_ids = get_subscribed_stream_ids_for_user(user_profile) recipient_check = Q(id__in=set(subscribed_stream_ids)) add_filter_option(recipient_check) if include_public: invite_only_check = Q(invite_only=False) add_filter_option(invite_only_check) if include_owner_subscribed and user_profile.is_bot: bot_owner = user_profile.bot_owner assert bot_owner is not None owner_stream_ids = get_subscribed_stream_ids_for_user(bot_owner) owner_subscribed_check = Q(id__in=set(owner_stream_ids)) add_filter_option(owner_subscribed_check) if query_filter is not None: query = query.filter(query_filter) streams = Stream.get_client_data(query) else: # Don't bother going to the database with no valid sources streams = [] streams.sort(key=lambda elt: elt["name"]) if include_default: is_default = {} default_streams = get_default_streams_for_realm(user_profile.realm_id) for default_stream in default_streams: is_default[default_stream.id] = True for stream in streams: stream['is_default'] = is_default.get(stream["stream_id"], False) return streams def notify_attachment_update(user_profile: UserProfile, op: str, attachment_dict: Dict[str, Any]) -> None: event = { 'type': 'attachment', 'op': op, 'attachment': attachment_dict, "upload_space_used": user_profile.realm.currently_used_upload_space_bytes(), } send_event(user_profile.realm, event, [user_profile.id]) def do_claim_attachments(message: Message, potential_path_ids: List[str]) -> bool: claimed = False for path_id in potential_path_ids: user_profile = message.sender is_message_realm_public = False if message.is_stream_message(): is_message_realm_public = Stream.objects.get(id=message.recipient.type_id).is_public() if not validate_attachment_request(user_profile, path_id): # Technically, there are 2 cases here: # * The user put something in their message that has the form # of an upload, but doesn't correspond to a file that doesn't # exist. validate_attachment_request will return None. # * The user is trying to send a link to a file they don't have permission to # access themselves. validate_attachment_request will return False. # # Either case is unusual and suggests a UI bug that got # the user in this situation, so we log in these cases. logging.warning( "User %s tried to share upload %s in message %s, but lacks permission", user_profile.id, path_id, message.id, ) continue claimed = True attachment = claim_attachment(user_profile, path_id, message, is_message_realm_public) notify_attachment_update(user_profile, "update", attachment.to_dict()) return claimed def do_delete_old_unclaimed_attachments(weeks_ago: int) -> None: old_unclaimed_attachments = get_old_unclaimed_attachments(weeks_ago) for attachment in old_unclaimed_attachments: delete_message_image(attachment.path_id) attachment.delete() def check_attachment_reference_change(message: Message) -> bool: # For a unsaved message edit (message.* has been updated, but not # saved to the database), adjusts Attachment data to correspond to # the new content. prev_attachments = {a.path_id for a in message.attachment_set.all()} new_attachments = set(message.potential_attachment_path_ids) if new_attachments == prev_attachments: return bool(prev_attachments) to_remove = list(prev_attachments - new_attachments) if len(to_remove) > 0: attachments_to_update = Attachment.objects.filter(path_id__in=to_remove).select_for_update() message.attachment_set.remove(*attachments_to_update) to_add = list(new_attachments - prev_attachments) if len(to_add) > 0: do_claim_attachments(message, to_add) return message.attachment_set.exists() def notify_realm_custom_profile_fields(realm: Realm, operation: str) -> None: fields = custom_profile_fields_for_realm(realm.id) event = dict(type="custom_profile_fields", op=operation, fields=[f.as_dict() for f in fields]) send_event(realm, event, active_user_ids(realm.id)) def try_add_realm_default_custom_profile_field(realm: Realm, field_subtype: str) -> CustomProfileField: field_data = DEFAULT_EXTERNAL_ACCOUNTS[field_subtype] field = CustomProfileField(realm=realm, name=field_data['name'], field_type=CustomProfileField.EXTERNAL_ACCOUNT, hint=field_data['hint'], field_data=ujson.dumps(dict(subtype=field_subtype))) field.save() field.order = field.id field.save(update_fields=['order']) notify_realm_custom_profile_fields(realm, 'add') return field def try_add_realm_custom_profile_field(realm: Realm, name: str, field_type: int, hint: str='', field_data: Optional[ProfileFieldData]=None) -> CustomProfileField: field = CustomProfileField(realm=realm, name=name, field_type=field_type) field.hint = hint if (field.field_type == CustomProfileField.CHOICE or field.field_type == CustomProfileField.EXTERNAL_ACCOUNT): field.field_data = ujson.dumps(field_data or {}) field.save() field.order = field.id field.save(update_fields=['order']) notify_realm_custom_profile_fields(realm, 'add') return field def do_remove_realm_custom_profile_field(realm: Realm, field: CustomProfileField) -> None: """ Deleting a field will also delete the user profile data associated with it in CustomProfileFieldValue model. """ field.delete() notify_realm_custom_profile_fields(realm, 'delete') def do_remove_realm_custom_profile_fields(realm: Realm) -> None: CustomProfileField.objects.filter(realm=realm).delete() def try_update_realm_custom_profile_field(realm: Realm, field: CustomProfileField, name: str, hint: str='', field_data: Optional[ProfileFieldData]=None) -> None: field.name = name field.hint = hint if (field.field_type == CustomProfileField.CHOICE or field.field_type == CustomProfileField.EXTERNAL_ACCOUNT): field.field_data = ujson.dumps(field_data or {}) field.save() notify_realm_custom_profile_fields(realm, 'update') def try_reorder_realm_custom_profile_fields(realm: Realm, order: List[int]) -> None: order_mapping = {_[1]: _[0] for _ in enumerate(order)} fields = CustomProfileField.objects.filter(realm=realm) for field in fields: if field.id not in order_mapping: raise JsonableError(_("Invalid order mapping.")) for field in fields: field.order = order_mapping[field.id] field.save(update_fields=['order']) notify_realm_custom_profile_fields(realm, 'update') def notify_user_update_custom_profile_data(user_profile: UserProfile, field: Dict[str, Union[int, str, List[int], None]]) -> None: data = dict(id=field['id']) if field['type'] == CustomProfileField.USER: data["value"] = ujson.dumps(field['value']) else: data['value'] = field['value'] if field['rendered_value']: data['rendered_value'] = field['rendered_value'] payload = dict(user_id=user_profile.id, custom_profile_field=data) event = dict(type="realm_user", op="update", person=payload) send_event(user_profile.realm, event, active_user_ids(user_profile.realm.id)) def do_update_user_custom_profile_data_if_changed(user_profile: UserProfile, data: List[Dict[str, Union[int, str, List[int]]]], ) -> None: with transaction.atomic(): for field in data: field_value, created = CustomProfileFieldValue.objects.get_or_create( user_profile=user_profile, field_id=field['id']) if not created and field_value.value == str(field['value']): # If the field value isn't actually being changed to a different one, # and always_notify is disabled, we have nothing to do here for this field. # Note: field_value.value is a TextField() so we need to cast field['value'] # to a string for the comparison in this if. continue field_value.value = field['value'] if field_value.field.is_renderable(): field_value.rendered_value = render_stream_description(str(field['value'])) field_value.save(update_fields=['value', 'rendered_value']) else: field_value.save(update_fields=['value']) notify_user_update_custom_profile_data(user_profile, { "id": field_value.field_id, "value": field_value.value, "rendered_value": field_value.rendered_value, "type": field_value.field.field_type}) def check_remove_custom_profile_field_value(user_profile: UserProfile, field_id: int) -> None: try: field = CustomProfileField.objects.get(realm=user_profile.realm, id=field_id) field_value = CustomProfileFieldValue.objects.get(field=field, user_profile=user_profile) field_value.delete() notify_user_update_custom_profile_data(user_profile, {'id': field_id, 'value': None, 'rendered_value': None, 'type': field.field_type}) except CustomProfileField.DoesNotExist: raise JsonableError(_('Field id {id} not found.').format(id=field_id)) except CustomProfileFieldValue.DoesNotExist: pass def do_send_create_user_group_event(user_group: UserGroup, members: List[UserProfile]) -> None: event = dict(type="user_group", op="add", group=dict(name=user_group.name, members=[member.id for member in members], description=user_group.description, id=user_group.id, ), ) send_event(user_group.realm, event, active_user_ids(user_group.realm_id)) def check_add_user_group(realm: Realm, name: str, initial_members: List[UserProfile], description: str) -> None: try: user_group = create_user_group(name, initial_members, realm, description=description) do_send_create_user_group_event(user_group, initial_members) except django.db.utils.IntegrityError: raise JsonableError(_("User group '{}' already exists.").format(name)) def do_send_user_group_update_event(user_group: UserGroup, data: Dict[str, Any]) -> None: event = dict(type="user_group", op='update', group_id=user_group.id, data=data) send_event(user_group.realm, event, active_user_ids(user_group.realm_id)) def do_update_user_group_name(user_group: UserGroup, name: str) -> None: try: user_group.name = name user_group.save(update_fields=['name']) except django.db.utils.IntegrityError: raise JsonableError(_("User group '{}' already exists.").format(name)) do_send_user_group_update_event(user_group, dict(name=name)) def do_update_user_group_description(user_group: UserGroup, description: str) -> None: user_group.description = description user_group.save(update_fields=['description']) do_send_user_group_update_event(user_group, dict(description=description)) def do_update_outgoing_webhook_service(bot_profile: UserProfile, service_interface: int, service_payload_url: str) -> None: # TODO: First service is chosen because currently one bot can only have one service. # Update this once multiple services are supported. service = get_bot_services(bot_profile.id)[0] service.base_url = service_payload_url service.interface = service_interface service.save() send_event(bot_profile.realm, dict(type='realm_bot', op='update', bot=dict(user_id=bot_profile.id, services = [dict(base_url=service.base_url, interface=service.interface, token=service.token)], ), ), bot_owner_user_ids(bot_profile)) def do_update_bot_config_data(bot_profile: UserProfile, config_data: Dict[str, str]) -> None: for key, value in config_data.items(): set_bot_config(bot_profile, key, value) updated_config_data = get_bot_config(bot_profile) send_event(bot_profile.realm, dict(type='realm_bot', op='update', bot=dict(user_id=bot_profile.id, services = [dict(config_data=updated_config_data)], ), ), bot_owner_user_ids(bot_profile)) def get_service_dicts_for_bot(user_profile_id: int) -> List[Dict[str, Any]]: user_profile = get_user_profile_by_id(user_profile_id) services = get_bot_services(user_profile_id) service_dicts: List[Dict[str, Any]] = [] if user_profile.bot_type == UserProfile.OUTGOING_WEBHOOK_BOT: service_dicts = [{'base_url': service.base_url, 'interface': service.interface, 'token': service.token, } for service in services] elif user_profile.bot_type == UserProfile.EMBEDDED_BOT: try: service_dicts = [{'config_data': get_bot_config(user_profile), 'service_name': services[0].name, }] # A ConfigError just means that there are no config entries for user_profile. except ConfigError: pass return service_dicts def get_service_dicts_for_bots(bot_dicts: List[Dict[str, Any]], realm: Realm) -> Dict[int, List[Dict[str, Any]]]: bot_profile_ids = [bot_dict['id'] for bot_dict in bot_dicts] bot_services_by_uid: Dict[int, List[Service]] = defaultdict(list) for service in Service.objects.filter(user_profile_id__in=bot_profile_ids): bot_services_by_uid[service.user_profile_id].append(service) embedded_bot_ids = [bot_dict['id'] for bot_dict in bot_dicts if bot_dict['bot_type'] == UserProfile.EMBEDDED_BOT] embedded_bot_configs = get_bot_configs(embedded_bot_ids) service_dicts_by_uid: Dict[int, List[Dict[str, Any]]] = {} for bot_dict in bot_dicts: bot_profile_id = bot_dict["id"] bot_type = bot_dict["bot_type"] services = bot_services_by_uid[bot_profile_id] service_dicts: List[Dict[str, Any]] = [] if bot_type == UserProfile.OUTGOING_WEBHOOK_BOT: service_dicts = [{'base_url': service.base_url, 'interface': service.interface, 'token': service.token, } for service in services] elif bot_type == UserProfile.EMBEDDED_BOT: if bot_profile_id in embedded_bot_configs.keys(): bot_config = embedded_bot_configs[bot_profile_id] service_dicts = [{'config_data': bot_config, 'service_name': services[0].name, }] service_dicts_by_uid[bot_profile_id] = service_dicts return service_dicts_by_uid def get_owned_bot_dicts(user_profile: UserProfile, include_all_realm_bots_if_admin: bool=True) -> List[Dict[str, Any]]: if user_profile.is_realm_admin and include_all_realm_bots_if_admin: result = get_bot_dicts_in_realm(user_profile.realm) else: result = UserProfile.objects.filter(realm=user_profile.realm, is_bot=True, bot_owner=user_profile).values(*bot_dict_fields) services_by_ids = get_service_dicts_for_bots(result, user_profile.realm) return [{'email': botdict['email'], 'user_id': botdict['id'], 'full_name': botdict['full_name'], 'bot_type': botdict['bot_type'], 'is_active': botdict['is_active'], 'api_key': botdict['api_key'], 'default_sending_stream': botdict['default_sending_stream__name'], 'default_events_register_stream': botdict['default_events_register_stream__name'], 'default_all_public_streams': botdict['default_all_public_streams'], 'owner_id': botdict['bot_owner__id'], 'avatar_url': avatar_url_from_dict(botdict), 'services': services_by_ids[botdict['id']], } for botdict in result] def do_send_user_group_members_update_event(event_name: str, user_group: UserGroup, user_ids: List[int]) -> None: event = dict(type="user_group", op=event_name, group_id=user_group.id, user_ids=user_ids) send_event(user_group.realm, event, active_user_ids(user_group.realm_id)) def bulk_add_members_to_user_group(user_group: UserGroup, user_profiles: List[UserProfile]) -> None: memberships = [UserGroupMembership(user_group_id=user_group.id, user_profile=user_profile) for user_profile in user_profiles] UserGroupMembership.objects.bulk_create(memberships) user_ids = [up.id for up in user_profiles] do_send_user_group_members_update_event('add_members', user_group, user_ids) def remove_members_from_user_group(user_group: UserGroup, user_profiles: List[UserProfile]) -> None: UserGroupMembership.objects.filter( user_group_id=user_group.id, user_profile__in=user_profiles).delete() user_ids = [up.id for up in user_profiles] do_send_user_group_members_update_event('remove_members', user_group, user_ids) def do_send_delete_user_group_event(realm: Realm, user_group_id: int, realm_id: int) -> None: event = dict(type="user_group", op="remove", group_id=user_group_id) send_event(realm, event, active_user_ids(realm_id)) def check_delete_user_group(user_group_id: int, user_profile: UserProfile) -> None: user_group = access_user_group_by_id(user_group_id, user_profile) user_group.delete() do_send_delete_user_group_event(user_profile.realm, user_group_id, user_profile.realm.id) def do_send_realm_reactivation_email(realm: Realm) -> None: url = create_confirmation_link(realm, Confirmation.REALM_REACTIVATION) context = {'confirmation_url': url, 'realm_uri': realm.uri, 'realm_name': realm.name} language = realm.default_language send_email_to_admins( 'zerver/emails/realm_reactivation', realm, from_address=FromAddress.tokenized_no_reply_address(), from_name=FromAddress.security_email_from_name(language=language), language=language, context=context) def do_set_zoom_token(user: UserProfile, token: Optional[Dict[str, object]]) -> None: user.zoom_token = token user.save(update_fields=["zoom_token"]) send_event( user.realm, dict(type="has_zoom_token", value=token is not None), [user.id], ) def notify_realm_export(user_profile: UserProfile) -> None: # In the future, we may want to send this event to all realm admins. event = dict(type='realm_export', exports=get_realm_exports_serialized(user_profile)) send_event(user_profile.realm, event, [user_profile.id]) def do_delete_realm_export(user_profile: UserProfile, export: RealmAuditLog) -> None: # Give mypy a hint so it knows `ujson.loads` # isn't being passed an `Optional[str]`. export_extra_data = export.extra_data assert export_extra_data is not None export_data = ujson.loads(export_extra_data) export_path = export_data.get('export_path') if export_path: # Allow removal even if the export failed. delete_export_tarball(export_path) export_data.update({'deleted_timestamp': timezone_now().timestamp()}) export.extra_data = ujson.dumps(export_data) export.save(update_fields=['extra_data']) notify_realm_export(user_profile) def get_topic_messages(user_profile: UserProfile, stream: Stream, topic_name: str) -> List[Message]: query = UserMessage.objects.filter( user_profile=user_profile, message__recipient=stream.recipient, ).order_by("id") return [um.message for um in filter_by_topic_name_via_message(query, topic_name)]
def do_deactivate_realm(realm: Realm, acting_user: Optional[UserProfile]=None) -> None: """ Deactivate this realm. Do NOT deactivate the users -- we need to be able to tell the difference between users that were intentionally deactivated, e.g. by a realm admin, and users who can't currently use Zulip because their realm has been deactivated. """ if realm.deactivated: return realm.deactivated = True realm.save(update_fields=["deactivated"]) if settings.BILLING_ENABLED: downgrade_now(realm) event_time = timezone_now() RealmAuditLog.objects.create( realm=realm, event_type=RealmAuditLog.REALM_DEACTIVATED, event_time=event_time, acting_user=acting_user, extra_data=ujson.dumps({ RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(realm), })) ScheduledEmail.objects.filter(realm=realm).delete() for user in active_humans_in_realm(realm): # Don't deactivate the users, but do delete their sessions so they get # bumped to the login screen, where they'll get a realm deactivation # notice when they try to log in. delete_user_sessions(user) event = dict(type="realm", op="deactivated", realm_id=realm.id) send_event(realm, event, active_user_ids(realm.id))
795
827
import datetime import itertools import logging import os import platform import time from collections import defaultdict from operator import itemgetter from typing import ( AbstractSet, Any, Callable, Dict, Iterable, List, Mapping, MutableMapping, Optional, Sequence, Set, Tuple, Union, ) import django.db.utils import ujson from django.conf import settings from django.contrib.contenttypes.models import ContentType from django.core.exceptions import ValidationError from django.core.files import File from django.db import IntegrityError, connection, transaction from django.db.models import Count, Exists, F, Max, OuterRef, Q, Sum from django.db.models.query import QuerySet from django.utils.html import escape from django.utils.timezone import now as timezone_now from django.utils.translation import override as override_language from django.utils.translation import ugettext as _ from psycopg2.extras import execute_values from psycopg2.sql import SQL from typing_extensions import TypedDict from analytics.lib.counts import COUNT_STATS, RealmCount, do_increment_logging_stat from analytics.models import StreamCount from confirmation import settings as confirmation_settings from confirmation.models import ( Confirmation, confirmation_url, create_confirmation_link, generate_key, ) from zerver.decorator import statsd_increment from zerver.lib import retention as retention from zerver.lib.addressee import Addressee from zerver.lib.alert_words import ( add_user_alert_words, get_alert_word_automaton, remove_user_alert_words, ) from zerver.lib.avatar import avatar_url, avatar_url_from_dict from zerver.lib.bot_config import ConfigError, get_bot_config, get_bot_configs, set_bot_config from zerver.lib.bulk_create import bulk_create_users from zerver.lib.cache import ( bot_dict_fields, cache_delete, cache_delete_many, cache_set, cache_set_many, cache_with_key, delete_user_profile_caches, display_recipient_cache_key, flush_user_profile, to_dict_cache_key_id, user_profile_by_api_key_cache_key, user_profile_by_email_cache_key, ) from zerver.lib.context_managers import lockfile from zerver.lib.create_user import create_user, get_display_email_address from zerver.lib.email_mirror_helpers import encode_email_address, encode_email_address_helper from zerver.lib.email_notifications import enqueue_welcome_emails from zerver.lib.email_validation import ( email_reserved_for_system_bots_error, get_existing_user_errors, get_realm_email_validator, validate_email_is_valid, ) from zerver.lib.emoji import get_emoji_file_name from zerver.lib.exceptions import ( ErrorCode, JsonableError, MarkdownRenderingException, StreamDoesNotExistError, StreamWithIDDoesNotExistError, ) from zerver.lib.export import get_realm_exports_serialized from zerver.lib.external_accounts import DEFAULT_EXTERNAL_ACCOUNTS from zerver.lib.hotspots import get_next_hotspots from zerver.lib.i18n import get_language_name from zerver.lib.markdown import MentionData, topic_links from zerver.lib.markdown import version as markdown_version from zerver.lib.message import ( MessageDict, access_message, render_markdown, truncate_body, truncate_topic, update_first_visible_message_id, ) from zerver.lib.pysa import mark_sanitized from zerver.lib.queue import queue_json_publish from zerver.lib.realm_icon import realm_icon_url from zerver.lib.realm_logo import get_realm_logo_data from zerver.lib.retention import move_messages_to_archive from zerver.lib.send_email import ( FromAddress, clear_scheduled_emails, clear_scheduled_invitation_emails, send_email, send_email_to_admins, ) from zerver.lib.server_initialization import create_internal_realm, server_initialized from zerver.lib.sessions import delete_user_sessions from zerver.lib.storage import static_path from zerver.lib.stream_recipient import StreamRecipientMap from zerver.lib.stream_subscription import ( get_active_subscriptions_for_stream_id, get_active_subscriptions_for_stream_ids, get_bulk_stream_subscriber_info, get_stream_subscriptions_for_user, get_stream_subscriptions_for_users, get_subscribed_stream_ids_for_user, num_subscribers_for_stream_id, ) from zerver.lib.stream_topic import StreamTopicTarget from zerver.lib.streams import ( access_stream_for_send_message, check_stream_name, create_stream_if_needed, get_default_value_for_history_public_to_subscribers, render_stream_description, send_stream_creation_event, subscribed_to_stream, ) from zerver.lib.timestamp import datetime_to_timestamp, timestamp_to_datetime from zerver.lib.topic import ( LEGACY_PREV_TOPIC, ORIG_TOPIC, TOPIC_LINKS, TOPIC_NAME, filter_by_exact_message_topic, filter_by_topic_name_via_message, save_message_for_edit_use_case, update_messages_for_topic_edit, ) from zerver.lib.topic_mutes import add_topic_mute, get_topic_mutes, remove_topic_mute from zerver.lib.types import ProfileFieldData from zerver.lib.upload import ( claim_attachment, delete_avatar_image, delete_export_tarball, delete_message_image, upload_emoji_image, ) from zerver.lib.user_groups import access_user_group_by_id, create_user_group from zerver.lib.user_status import update_user_status from zerver.lib.users import ( check_bot_name_available, check_full_name, format_user_row, get_api_key, user_profile_to_user_row, ) from zerver.lib.utils import generate_api_key, log_statsd_event from zerver.lib.validator import check_widget_content from zerver.lib.widget import do_widget_post_save_actions from zerver.models import ( MAX_MESSAGE_LENGTH, Attachment, Client, CustomProfileField, CustomProfileFieldValue, DefaultStream, DefaultStreamGroup, EmailChangeStatus, Message, MultiuseInvite, PreregistrationUser, Reaction, Realm, RealmAuditLog, RealmDomain, RealmEmoji, RealmFilter, Recipient, ScheduledEmail, ScheduledMessage, Service, Stream, SubMessage, Subscription, UserActivity, UserActivityInterval, UserGroup, UserGroupMembership, UserHotspot, UserMessage, UserPresence, UserProfile, UserStatus, active_non_guest_user_ids, active_user_ids, custom_profile_fields_for_realm, filter_to_valid_prereg_users, get_active_streams, get_bot_dicts_in_realm, get_bot_services, get_client, get_default_stream_groups, get_huddle_recipient, get_huddle_user_ids, get_old_unclaimed_attachments, get_stream, get_stream_by_id_in_realm, get_stream_cache_key, get_system_bot, get_user_by_delivery_email, get_user_by_id_in_realm_including_cross_realm, get_user_profile_by_id, is_cross_realm_bot_email, query_for_ids, realm_filters_for_realm, stream_name_in_use, validate_attachment_request, ) from zerver.tornado.event_queue import send_event if settings.BILLING_ENABLED: from corporate.lib.stripe import downgrade_now, update_license_ledger_if_needed # This will be used to type annotate parameters in a function if the function # works on both str and unicode in python 2 but in python 3 it only works on str. SizedTextIterable = Union[Sequence[str], AbstractSet[str]] ONBOARDING_TOTAL_MESSAGES = 1000 ONBOARDING_UNREAD_MESSAGES = 20 STREAM_ASSIGNMENT_COLORS = [ "#76ce90", "#fae589", "#a6c7e5", "#e79ab5", "#bfd56f", "#f4ae55", "#b0a5fd", "#addfe5", "#f5ce6e", "#c2726a", "#94c849", "#bd86e5", "#ee7e4a", "#a6dcbf", "#95a5fd", "#53a063", "#9987e1", "#e4523d", "#c2c2c2", "#4f8de4", "#c6a8ad", "#e7cc4d", "#c8bebf", "#a47462"] def subscriber_info(user_id: int) -> Dict[str, Any]: return { 'id': user_id, 'flags': ['read'] } # Store an event in the log for re-importing messages def log_event(event: MutableMapping[str, Any]) -> None: if settings.EVENT_LOG_DIR is None: return if "timestamp" not in event: event["timestamp"] = time.time() if not os.path.exists(settings.EVENT_LOG_DIR): os.mkdir(settings.EVENT_LOG_DIR) template = os.path.join(settings.EVENT_LOG_DIR, '%s.' + platform.node() + timezone_now().strftime('.%Y-%m-%d')) with lockfile(template % ('lock',)): with open(template % ('events',), 'a') as log: log.write(ujson.dumps(event) + '\n') def can_access_stream_user_ids(stream: Stream) -> Set[int]: # return user ids of users who can access the attributes of # a stream, such as its name/description. if stream.is_public(): # For a public stream, this is everyone in the realm # except unsubscribed guest users return public_stream_user_ids(stream) else: # for a private stream, it's subscribers plus realm admins. return private_stream_user_ids( stream.id) | {user.id for user in stream.realm.get_admin_users_and_bots()} def private_stream_user_ids(stream_id: int) -> Set[int]: # TODO: Find similar queries elsewhere and de-duplicate this code. subscriptions = get_active_subscriptions_for_stream_id(stream_id) return {sub['user_profile_id'] for sub in subscriptions.values('user_profile_id')} def public_stream_user_ids(stream: Stream) -> Set[int]: guest_subscriptions = get_active_subscriptions_for_stream_id( stream.id).filter(user_profile__role=UserProfile.ROLE_GUEST) guest_subscriptions = {sub['user_profile_id'] for sub in guest_subscriptions.values('user_profile_id')} return set(active_non_guest_user_ids(stream.realm_id)) | guest_subscriptions def bot_owner_user_ids(user_profile: UserProfile) -> Set[int]: is_private_bot = ( user_profile.default_sending_stream and user_profile.default_sending_stream.invite_only or user_profile.default_events_register_stream and user_profile.default_events_register_stream.invite_only) if is_private_bot: return {user_profile.bot_owner_id} else: users = {user.id for user in user_profile.realm.get_human_admin_users()} users.add(user_profile.bot_owner_id) return users def realm_user_count(realm: Realm) -> int: return UserProfile.objects.filter(realm=realm, is_active=True, is_bot=False).count() def realm_user_count_by_role(realm: Realm) -> Dict[str, Any]: human_counts = {UserProfile.ROLE_REALM_ADMINISTRATOR: 0, UserProfile.ROLE_REALM_OWNER: 0, UserProfile.ROLE_MEMBER: 0, UserProfile.ROLE_GUEST: 0} for value_dict in list(UserProfile.objects.filter( realm=realm, is_bot=False, is_active=True).values('role').annotate(Count('role'))): human_counts[value_dict['role']] = value_dict['role__count'] bot_count = UserProfile.objects.filter(realm=realm, is_bot=True, is_active=True).count() return { RealmAuditLog.ROLE_COUNT_HUMANS: human_counts, RealmAuditLog.ROLE_COUNT_BOTS: bot_count, } def get_signups_stream(realm: Realm) -> Stream: # This one-liner helps us work around a lint rule. return get_stream("signups", realm) def notify_new_user(user_profile: UserProfile) -> None: sender_email = settings.NOTIFICATION_BOT sender = get_system_bot(sender_email) user_count = realm_user_count(user_profile.realm) signup_notifications_stream = user_profile.realm.get_signup_notifications_stream() # Send notification to realm signup notifications stream if it exists # Don't send notification for the first user in a realm if signup_notifications_stream is not None and user_count > 1: with override_language(user_profile.realm.default_language): message = _("{user} just signed up for Zulip. (total: {user_count})").format( user=f"@_**{user_profile.full_name}|{user_profile.id}**", user_count=user_count ) internal_send_stream_message( user_profile.realm, sender, signup_notifications_stream, _("signups"), message ) # We also send a notification to the Zulip administrative realm admin_realm = sender.realm try: # Check whether the stream exists signups_stream = get_signups_stream(admin_realm) with override_language(admin_realm.default_language): # We intentionally use the same strings as above to avoid translation burden. message = _("{user} just signed up for Zulip. (total: {user_count})").format( user=f"{user_profile.full_name} <`{user_profile.email}`>", user_count=user_count ) internal_send_stream_message( admin_realm, sender, signups_stream, user_profile.realm.display_subdomain, message ) except Stream.DoesNotExist: # If the signups stream hasn't been created in the admin # realm, don't auto-create it to send to it; just do nothing. pass def notify_invites_changed(user_profile: UserProfile) -> None: event = dict(type="invites_changed") admin_ids = [user.id for user in user_profile.realm.get_admin_users_and_bots()] send_event(user_profile.realm, event, admin_ids) def add_new_user_history(user_profile: UserProfile, streams: Iterable[Stream]) -> None: """Give you the last ONBOARDING_TOTAL_MESSAGES messages on your public streams, so you have something to look at in your home view once you finish the tutorial. The most recent ONBOARDING_UNREAD_MESSAGES are marked unread. """ one_week_ago = timezone_now() - datetime.timedelta(weeks=1) recipient_ids = [stream.recipient_id for stream in streams if not stream.invite_only] recent_messages = Message.objects.filter(recipient_id__in=recipient_ids, date_sent__gt=one_week_ago).order_by("-id") message_ids_to_use = list(reversed(recent_messages.values_list( 'id', flat=True)[0:ONBOARDING_TOTAL_MESSAGES])) if len(message_ids_to_use) == 0: return # Handle the race condition where a message arrives between # bulk_add_subscriptions above and the Message query just above already_ids = set(UserMessage.objects.filter(message_id__in=message_ids_to_use, user_profile=user_profile).values_list("message_id", flat=True)) # Mark the newest ONBOARDING_UNREAD_MESSAGES as unread. marked_unread = 0 ums_to_create = [] for message_id in reversed(message_ids_to_use): if message_id in already_ids: continue um = UserMessage(user_profile=user_profile, message_id=message_id) if marked_unread < ONBOARDING_UNREAD_MESSAGES: marked_unread += 1 else: um.flags = UserMessage.flags.read ums_to_create.append(um) UserMessage.objects.bulk_create(reversed(ums_to_create)) # Does the processing for a new user account: # * Subscribes to default/invitation streams # * Fills in some recent historical messages # * Notifies other users in realm and Zulip about the signup # * Deactivates PreregistrationUser objects # * subscribe the user to newsletter if newsletter_data is specified def process_new_human_user(user_profile: UserProfile, prereg_user: Optional[PreregistrationUser]=None, newsletter_data: Optional[Mapping[str, str]]=None, default_stream_groups: Sequence[DefaultStreamGroup]=[], realm_creation: bool=False) -> None: mit_beta_user = user_profile.realm.is_zephyr_mirror_realm if prereg_user is not None: prereg_user.status = confirmation_settings.STATUS_ACTIVE prereg_user.save(update_fields=['status']) streams = prereg_user.streams.all() acting_user: Optional[UserProfile] = prereg_user.referred_by else: streams = [] acting_user = None # If the user's invitation didn't explicitly list some streams, we # add the default streams if len(streams) == 0: streams = get_default_subs(user_profile) for default_stream_group in default_stream_groups: default_stream_group_streams = default_stream_group.streams.all() for stream in default_stream_group_streams: if stream not in streams: streams.append(stream) bulk_add_subscriptions(streams, [user_profile], acting_user=acting_user) add_new_user_history(user_profile, streams) # mit_beta_users don't have a referred_by field if not mit_beta_user and prereg_user is not None and prereg_user.referred_by is not None: # This is a cross-realm private message. with override_language(prereg_user.referred_by.default_language): internal_send_private_message( user_profile.realm, get_system_bot(settings.NOTIFICATION_BOT), prereg_user.referred_by, _("{user} accepted your invitation to join Zulip!").format(user=f"{user_profile.full_name} <`{user_profile.email}`>") ) # Mark any other PreregistrationUsers that are STATUS_ACTIVE as # inactive so we can keep track of the PreregistrationUser we # actually used for analytics if prereg_user is not None: PreregistrationUser.objects.filter( email__iexact=user_profile.delivery_email).exclude(id=prereg_user.id)\ .update(status=confirmation_settings.STATUS_REVOKED) if prereg_user.referred_by is not None: notify_invites_changed(user_profile) else: PreregistrationUser.objects.filter(email__iexact=user_profile.delivery_email)\ .update(status=confirmation_settings.STATUS_REVOKED) notify_new_user(user_profile) # Clear any scheduled invitation emails to prevent them # from being sent after the user is created. clear_scheduled_invitation_emails(user_profile.delivery_email) if user_profile.realm.send_welcome_emails: enqueue_welcome_emails(user_profile, realm_creation) # We have an import loop here; it's intentional, because we want # to keep all the onboarding code in zerver/lib/onboarding.py. from zerver.lib.onboarding import send_initial_pms send_initial_pms(user_profile) if newsletter_data is not None: # If the user was created automatically via the API, we may # not want to register them for the newsletter queue_json_publish( "signups", { 'email_address': user_profile.delivery_email, 'user_id': user_profile.id, 'merge_fields': { 'NAME': user_profile.full_name, 'REALM_ID': user_profile.realm_id, 'OPTIN_IP': newsletter_data["IP"], 'OPTIN_TIME': datetime.datetime.isoformat(timezone_now().replace(microsecond=0)), }, }, lambda event: None) def notify_created_user(user_profile: UserProfile) -> None: user_row = user_profile_to_user_row(user_profile) person = format_user_row(user_profile.realm, user_profile, user_row, # Since we don't know what the client # supports at this point in the code, we # just assume client_gravatar and # user_avatar_url_field_optional = False :( client_gravatar=False, user_avatar_url_field_optional=False, # We assume there's no custom profile # field data for a new user; initial # values are expected to be added in a # later event. custom_profile_field_data={}) event: Dict[str, Any] = dict(type="realm_user", op="add", person=person) send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id)) def created_bot_event(user_profile: UserProfile) -> Dict[str, Any]: def stream_name(stream: Optional[Stream]) -> Optional[str]: if not stream: return None return stream.name default_sending_stream_name = stream_name(user_profile.default_sending_stream) default_events_register_stream_name = stream_name(user_profile.default_events_register_stream) bot = dict(email=user_profile.email, user_id=user_profile.id, full_name=user_profile.full_name, bot_type=user_profile.bot_type, is_active=user_profile.is_active, api_key=get_api_key(user_profile), default_sending_stream=default_sending_stream_name, default_events_register_stream=default_events_register_stream_name, default_all_public_streams=user_profile.default_all_public_streams, avatar_url=avatar_url(user_profile), services = get_service_dicts_for_bot(user_profile.id), ) # Set the owner key only when the bot has an owner. # The default bots don't have an owner. So don't # set the owner key while reactivating them. if user_profile.bot_owner is not None: bot['owner_id'] = user_profile.bot_owner.id return dict(type="realm_bot", op="add", bot=bot) def notify_created_bot(user_profile: UserProfile) -> None: event = created_bot_event(user_profile) send_event(user_profile.realm, event, bot_owner_user_ids(user_profile)) def create_users(realm: Realm, name_list: Iterable[Tuple[str, str]], bot_type: Optional[int]=None) -> None: user_set = set() for full_name, email in name_list: user_set.add((email, full_name, True)) bulk_create_users(realm, user_set, bot_type) def do_create_user(email: str, password: Optional[str], realm: Realm, full_name: str, bot_type: Optional[int]=None, role: Optional[int]=None, bot_owner: Optional[UserProfile]=None, tos_version: Optional[str]=None, timezone: str="", avatar_source: str=UserProfile.AVATAR_FROM_GRAVATAR, default_sending_stream: Optional[Stream]=None, default_events_register_stream: Optional[Stream]=None, default_all_public_streams: Optional[bool]=None, prereg_user: Optional[PreregistrationUser]=None, newsletter_data: Optional[Dict[str, str]]=None, default_stream_groups: Sequence[DefaultStreamGroup]=[], source_profile: Optional[UserProfile]=None, realm_creation: bool=False, acting_user: Optional[UserProfile]=None) -> UserProfile: user_profile = create_user(email=email, password=password, realm=realm, full_name=full_name, role=role, bot_type=bot_type, bot_owner=bot_owner, tos_version=tos_version, timezone=timezone, avatar_source=avatar_source, default_sending_stream=default_sending_stream, default_events_register_stream=default_events_register_stream, default_all_public_streams=default_all_public_streams, source_profile=source_profile) event_time = user_profile.date_joined if not acting_user: acting_user = user_profile RealmAuditLog.objects.create( realm=user_profile.realm, acting_user=acting_user, modified_user=user_profile, event_type=RealmAuditLog.USER_CREATED, event_time=event_time, extra_data=ujson.dumps({ RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm), })) do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'], user_profile.is_bot, event_time) if settings.BILLING_ENABLED: update_license_ledger_if_needed(user_profile.realm, event_time) # Note that for bots, the caller will send an additional event # with bot-specific info like services. notify_created_user(user_profile) if bot_type is None: process_new_human_user(user_profile, prereg_user=prereg_user, newsletter_data=newsletter_data, default_stream_groups=default_stream_groups, realm_creation=realm_creation) return user_profile def do_activate_user(user_profile: UserProfile, acting_user: Optional[UserProfile]=None) -> None: user_profile.is_active = True user_profile.is_mirror_dummy = False user_profile.set_unusable_password() user_profile.date_joined = timezone_now() user_profile.tos_version = settings.TOS_VERSION user_profile.save(update_fields=["is_active", "date_joined", "password", "is_mirror_dummy", "tos_version"]) event_time = user_profile.date_joined RealmAuditLog.objects.create( realm=user_profile.realm, modified_user=user_profile, acting_user=acting_user, event_type=RealmAuditLog.USER_ACTIVATED, event_time=event_time, extra_data=ujson.dumps({ RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm), })) do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'], user_profile.is_bot, event_time) if settings.BILLING_ENABLED: update_license_ledger_if_needed(user_profile.realm, event_time) notify_created_user(user_profile) def do_reactivate_user(user_profile: UserProfile, acting_user: Optional[UserProfile]=None) -> None: # Unlike do_activate_user, this is meant for re-activating existing users, # so it doesn't reset their password, etc. user_profile.is_active = True user_profile.save(update_fields=["is_active"]) event_time = timezone_now() RealmAuditLog.objects.create( realm=user_profile.realm, modified_user=user_profile, acting_user=acting_user, event_type=RealmAuditLog.USER_REACTIVATED, event_time=event_time, extra_data=ujson.dumps({ RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm), })) do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'], user_profile.is_bot, event_time) if settings.BILLING_ENABLED: update_license_ledger_if_needed(user_profile.realm, event_time) notify_created_user(user_profile) if user_profile.is_bot: notify_created_bot(user_profile) def active_humans_in_realm(realm: Realm) -> Sequence[UserProfile]: return UserProfile.objects.filter(realm=realm, is_active=True, is_bot=False) def do_set_realm_property(realm: Realm, name: str, value: Any, acting_user: Optional[UserProfile] = None) -> None: """Takes in a realm object, the name of an attribute to update, the value to update and and the user who initiated the update. """ property_type = Realm.property_types[name] assert isinstance(value, property_type), ( f'Cannot update {name}: {value} is not an instance of {property_type}') old_value = getattr(realm, name) setattr(realm, name, value) realm.save(update_fields=[name]) event = dict( type='realm', op='update', property=name, value=value, ) send_event(realm, event, active_user_ids(realm.id)) event_time = timezone_now() RealmAuditLog.objects.create( realm=realm, event_type=RealmAuditLog.REALM_PROPERTY_CHANGED, event_time=event_time, acting_user=acting_user, extra_data=ujson.dumps({ RealmAuditLog.OLD_VALUE: {'property': name, 'value': old_value}, RealmAuditLog.NEW_VALUE: {'property': name, 'value': value} })) if name == "email_address_visibility": if Realm.EMAIL_ADDRESS_VISIBILITY_EVERYONE not in [old_value, value]: # We use real email addresses on UserProfile.email only if # EMAIL_ADDRESS_VISIBILITY_EVERYONE is configured, so # changes between values that will not require changing # that field, so we can save work and return here. return user_profiles = UserProfile.objects.filter(realm=realm, is_bot=False) for user_profile in user_profiles: user_profile.email = get_display_email_address(user_profile, realm) # TODO: Design a bulk event for this or force-reload all clients send_user_email_update_event(user_profile) UserProfile.objects.bulk_update(user_profiles, ['email']) for user_profile in user_profiles: flush_user_profile(sender=UserProfile, instance=user_profile) def do_set_realm_authentication_methods(realm: Realm, authentication_methods: Dict[str, bool], acting_user: Optional[UserProfile]=None) -> None: old_value = realm.authentication_methods_dict() for key, value in list(authentication_methods.items()): index = getattr(realm.authentication_methods, key).number realm.authentication_methods.set_bit(index, int(value)) realm.save(update_fields=['authentication_methods']) updated_value = realm.authentication_methods_dict() RealmAuditLog.objects.create( realm=realm, event_type=RealmAuditLog.REALM_PROPERTY_CHANGED, event_time=timezone_now(), acting_user=acting_user, extra_data=ujson.dumps({ RealmAuditLog.OLD_VALUE: {'property': 'authentication_methods', 'value': old_value}, RealmAuditLog.NEW_VALUE: {'property': 'authentication_methods', 'value': updated_value} })) event = dict( type="realm", op="update_dict", property='default', data=dict(authentication_methods=updated_value), ) send_event(realm, event, active_user_ids(realm.id)) def do_set_realm_message_editing(realm: Realm, allow_message_editing: bool, message_content_edit_limit_seconds: int, allow_community_topic_editing: bool) -> None: realm.allow_message_editing = allow_message_editing realm.message_content_edit_limit_seconds = message_content_edit_limit_seconds realm.allow_community_topic_editing = allow_community_topic_editing realm.save(update_fields=['allow_message_editing', 'allow_community_topic_editing', 'message_content_edit_limit_seconds', ], ) event = dict( type="realm", op="update_dict", property="default", data=dict(allow_message_editing=allow_message_editing, message_content_edit_limit_seconds=message_content_edit_limit_seconds, allow_community_topic_editing=allow_community_topic_editing), ) send_event(realm, event, active_user_ids(realm.id)) def do_set_realm_message_deleting(realm: Realm, message_content_delete_limit_seconds: int) -> None: realm.message_content_delete_limit_seconds = message_content_delete_limit_seconds realm.save(update_fields=['message_content_delete_limit_seconds']) event = dict( type="realm", op="update_dict", property="default", data=dict(message_content_delete_limit_seconds=message_content_delete_limit_seconds), ) send_event(realm, event, active_user_ids(realm.id)) def do_set_realm_notifications_stream(realm: Realm, stream: Optional[Stream], stream_id: int) -> None: realm.notifications_stream = stream realm.save(update_fields=['notifications_stream']) event = dict( type="realm", op="update", property="notifications_stream_id", value=stream_id, ) send_event(realm, event, active_user_ids(realm.id)) def do_set_realm_signup_notifications_stream(realm: Realm, stream: Optional[Stream], stream_id: int) -> None: realm.signup_notifications_stream = stream realm.save(update_fields=['signup_notifications_stream']) event = dict( type="realm", op="update", property="signup_notifications_stream_id", value=stream_id, ) send_event(realm, event, active_user_ids(realm.id)) def do_deactivate_realm(realm: Realm, acting_user: Optional[UserProfile]=None) -> None: """ Deactivate this realm. Do NOT deactivate the users -- we need to be able to tell the difference between users that were intentionally deactivated, e.g. by a realm admin, and users who can't currently use Zulip because their realm has been deactivated. """ if realm.deactivated: return realm.deactivated = True realm.save(update_fields=["deactivated"]) if settings.BILLING_ENABLED: downgrade_now(realm) event_time = timezone_now() RealmAuditLog.objects.create( realm=realm, event_type=RealmAuditLog.REALM_DEACTIVATED, event_time=event_time, acting_user=acting_user, extra_data=ujson.dumps({ RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(realm), })) ScheduledEmail.objects.filter(realm=realm).delete() for user in active_humans_in_realm(realm): # Don't deactivate the users, but do delete their sessions so they get # bumped to the login screen, where they'll get a realm deactivation # notice when they try to log in. delete_user_sessions(user) event = dict(type="realm", op="deactivated", realm_id=realm.id) send_event(realm, event, active_user_ids(realm.id)) def do_reactivate_realm(realm: Realm) -> None: realm.deactivated = False realm.save(update_fields=["deactivated"]) event_time = timezone_now() RealmAuditLog.objects.create( realm=realm, event_type=RealmAuditLog.REALM_REACTIVATED, event_time=event_time, extra_data=ujson.dumps({ RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(realm), })) def do_change_realm_subdomain(realm: Realm, new_subdomain: str) -> None: realm.string_id = new_subdomain realm.save(update_fields=["string_id"]) def do_scrub_realm(realm: Realm, acting_user: Optional[UserProfile]=None) -> None: users = UserProfile.objects.filter(realm=realm) for user in users: do_delete_messages_by_sender(user) do_delete_avatar_image(user, acting_user=acting_user) user.full_name = f"Scrubbed {generate_key()[:15]}" scrubbed_email = f"scrubbed-{generate_key()[:15]}@{realm.host}" user.email = scrubbed_email user.delivery_email = scrubbed_email user.save(update_fields=["full_name", "email", "delivery_email"]) do_remove_realm_custom_profile_fields(realm) Attachment.objects.filter(realm=realm).delete() RealmAuditLog.objects.create(realm=realm, event_time=timezone_now(), acting_user=acting_user, event_type=RealmAuditLog.REALM_SCRUBBED) def do_deactivate_user(user_profile: UserProfile, acting_user: Optional[UserProfile]=None, _cascade: bool=True) -> None: if not user_profile.is_active: return if user_profile.realm.is_zephyr_mirror_realm: # nocoverage # For zephyr mirror users, we need to make them a mirror dummy # again; otherwise, other users won't get the correct behavior # when trying to send messages to this person inside Zulip. # # Ideally, we need to also ensure their zephyr mirroring bot # isn't running, but that's a separate issue. user_profile.is_mirror_dummy = True user_profile.is_active = False user_profile.save(update_fields=["is_active"]) delete_user_sessions(user_profile) clear_scheduled_emails([user_profile.id]) event_time = timezone_now() RealmAuditLog.objects.create( realm=user_profile.realm, modified_user=user_profile, acting_user=acting_user, event_type=RealmAuditLog.USER_DEACTIVATED, event_time=event_time, extra_data=ujson.dumps({ RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm), })) do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'], user_profile.is_bot, event_time, increment=-1) if settings.BILLING_ENABLED: update_license_ledger_if_needed(user_profile.realm, event_time) event = dict(type="realm_user", op="remove", person=dict(user_id=user_profile.id, full_name=user_profile.full_name)) send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id)) if user_profile.is_bot: event = dict(type="realm_bot", op="remove", bot=dict(user_id=user_profile.id, full_name=user_profile.full_name)) send_event(user_profile.realm, event, bot_owner_user_ids(user_profile)) if _cascade: bot_profiles = UserProfile.objects.filter(is_bot=True, is_active=True, bot_owner=user_profile) for profile in bot_profiles: do_deactivate_user(profile, acting_user=acting_user, _cascade=False) def do_deactivate_stream(stream: Stream, log: bool=True, acting_user: Optional[UserProfile]=None) -> None: # Get the affected user ids *before* we deactivate everybody. affected_user_ids = can_access_stream_user_ids(stream) get_active_subscriptions_for_stream_id(stream.id).update(active=False) was_invite_only = stream.invite_only stream.deactivated = True stream.invite_only = True # Preserve as much as possible the original stream name while giving it a # special prefix that both indicates that the stream is deactivated and # frees up the original name for reuse. old_name = stream.name new_name = ("!DEACTIVATED:" + old_name)[:Stream.MAX_NAME_LENGTH] for i in range(20): if stream_name_in_use(new_name, stream.realm_id): # This stream has already been deactivated, keep prepending !s until # we have a unique stream name or you've hit a rename limit. new_name = ("!" + new_name)[:Stream.MAX_NAME_LENGTH] else: break # If you don't have a unique name at this point, this will fail later in the # code path. stream.name = new_name[:Stream.MAX_NAME_LENGTH] stream.save(update_fields=['name', 'deactivated', 'invite_only']) # If this is a default stream, remove it, properly sending a # notification to browser clients. if DefaultStream.objects.filter(realm_id=stream.realm_id, stream_id=stream.id).exists(): do_remove_default_stream(stream) default_stream_groups_for_stream = DefaultStreamGroup.objects.filter(streams__id=stream.id) for group in default_stream_groups_for_stream: do_remove_streams_from_default_stream_group(stream.realm, group, [stream]) # Remove the old stream information from remote cache. old_cache_key = get_stream_cache_key(old_name, stream.realm_id) cache_delete(old_cache_key) stream_dict = stream.to_dict() stream_dict.update(dict(name=old_name, invite_only=was_invite_only)) event = dict(type="stream", op="delete", streams=[stream_dict]) send_event(stream.realm, event, affected_user_ids) event_time = timezone_now() RealmAuditLog.objects.create(realm=stream.realm, acting_user=acting_user, modified_stream=stream, event_type=RealmAuditLog.STREAM_DEACTIVATED, event_time=event_time) def send_user_email_update_event(user_profile: UserProfile) -> None: payload = dict(user_id=user_profile.id, new_email=user_profile.email) send_event(user_profile.realm, dict(type='realm_user', op='update', person=payload), active_user_ids(user_profile.realm_id)) def do_change_user_delivery_email(user_profile: UserProfile, new_email: str) -> None: delete_user_profile_caches([user_profile]) user_profile.delivery_email = new_email if user_profile.email_address_is_realm_public(): user_profile.email = new_email user_profile.save(update_fields=["email", "delivery_email"]) else: user_profile.save(update_fields=["delivery_email"]) # We notify just the target user (and eventually org admins, only # when email_address_visibility=EMAIL_ADDRESS_VISIBILITY_ADMINS) # about their new delivery email, since that field is private. payload = dict(user_id=user_profile.id, delivery_email=new_email) event = dict(type='realm_user', op='update', person=payload) send_event(user_profile.realm, event, [user_profile.id]) if user_profile.avatar_source == UserProfile.AVATAR_FROM_GRAVATAR: # If the user is using Gravatar to manage their email address, # their Gravatar just changed, and we need to notify other # clients. notify_avatar_url_change(user_profile) if user_profile.email_address_is_realm_public(): # Additionally, if we're also changing the publicly visible # email, we send a new_email event as well. send_user_email_update_event(user_profile) event_time = timezone_now() RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=user_profile, modified_user=user_profile, event_type=RealmAuditLog.USER_EMAIL_CHANGED, event_time=event_time) def do_start_email_change_process(user_profile: UserProfile, new_email: str) -> None: old_email = user_profile.delivery_email obj = EmailChangeStatus.objects.create(new_email=new_email, old_email=old_email, user_profile=user_profile, realm=user_profile.realm) activation_url = create_confirmation_link(obj, Confirmation.EMAIL_CHANGE) from zerver.context_processors import common_context context = common_context(user_profile) context.update({ 'old_email': old_email, 'new_email': new_email, 'activate_url': activation_url, }) language = user_profile.default_language send_email('zerver/emails/confirm_new_email', to_emails=[new_email], from_name=FromAddress.security_email_from_name(language=language), from_address=FromAddress.tokenized_no_reply_address(), language=language, context=context, realm=user_profile.realm) def compute_irc_user_fullname(email: str) -> str: return email.split("@")[0] + " (IRC)" def compute_jabber_user_fullname(email: str) -> str: return email.split("@")[0] + " (XMPP)" @cache_with_key(lambda realm, email, f: user_profile_by_email_cache_key(email), timeout=3600*24*7) def create_mirror_user_if_needed(realm: Realm, email: str, email_to_fullname: Callable[[str], str]) -> UserProfile: try: return get_user_by_delivery_email(email, realm) except UserProfile.DoesNotExist: try: # Forge a user for this person return create_user( email=email, password=None, realm=realm, full_name=email_to_fullname(email), active=False, is_mirror_dummy=True, ) except IntegrityError: return get_user_by_delivery_email(email, realm) def send_welcome_bot_response(message: MutableMapping[str, Any]) -> None: welcome_bot = get_system_bot(settings.WELCOME_BOT) human_recipient_id = message['message'].sender.recipient_id if Message.objects.filter(sender=welcome_bot, recipient_id=human_recipient_id).count() < 2: content = ( _("Congratulations on your first reply!") + " " ":tada:" "\n" "\n" + _("Feel free to continue using this space to practice your new messaging " "skills. Or, try clicking on some of the stream names to your left!") ) internal_send_private_message( message['realm'], welcome_bot, message['message'].sender, content) def render_incoming_message(message: Message, content: str, user_ids: Set[int], realm: Realm, mention_data: Optional[MentionData]=None, email_gateway: bool=False) -> str: realm_alert_words_automaton = get_alert_word_automaton(realm) try: rendered_content = render_markdown( message=message, content=content, realm=realm, realm_alert_words_automaton = realm_alert_words_automaton, mention_data=mention_data, email_gateway=email_gateway, ) except MarkdownRenderingException: raise JsonableError(_('Unable to render message')) return rendered_content class RecipientInfoResult(TypedDict): active_user_ids: Set[int] push_notify_user_ids: Set[int] stream_email_user_ids: Set[int] stream_push_user_ids: Set[int] wildcard_mention_user_ids: Set[int] um_eligible_user_ids: Set[int] long_term_idle_user_ids: Set[int] default_bot_user_ids: Set[int] service_bot_tuples: List[Tuple[int, int]] def get_recipient_info(recipient: Recipient, sender_id: int, stream_topic: Optional[StreamTopicTarget], possibly_mentioned_user_ids: AbstractSet[int]=set(), possible_wildcard_mention: bool=True) -> RecipientInfoResult: stream_push_user_ids: Set[int] = set() stream_email_user_ids: Set[int] = set() wildcard_mention_user_ids: Set[int] = set() if recipient.type == Recipient.PERSONAL: # The sender and recipient may be the same id, so # de-duplicate using a set. message_to_user_ids = list({recipient.type_id, sender_id}) assert(len(message_to_user_ids) in [1, 2]) elif recipient.type == Recipient.STREAM: # Anybody calling us w/r/t a stream message needs to supply # stream_topic. We may eventually want to have different versions # of this function for different message types. assert(stream_topic is not None) user_ids_muting_topic = stream_topic.user_ids_muting_topic() subscription_rows = stream_topic.get_active_subscriptions().annotate( user_profile_email_notifications=F('user_profile__enable_stream_email_notifications'), user_profile_push_notifications=F('user_profile__enable_stream_push_notifications'), user_profile_wildcard_mentions_notify=F( 'user_profile__wildcard_mentions_notify'), ).values( 'user_profile_id', 'push_notifications', 'email_notifications', 'wildcard_mentions_notify', 'user_profile_email_notifications', 'user_profile_push_notifications', 'user_profile_wildcard_mentions_notify', 'is_muted', ).order_by('user_profile_id') message_to_user_ids = [ row['user_profile_id'] for row in subscription_rows ] def should_send(setting: str, row: Dict[str, Any]) -> bool: # This implements the structure that the UserProfile stream notification settings # are defaults, which can be overridden by the stream-level settings (if those # values are not null). if row['is_muted']: return False if row['user_profile_id'] in user_ids_muting_topic: return False if row[setting] is not None: return row[setting] return row['user_profile_' + setting] stream_push_user_ids = { row['user_profile_id'] for row in subscription_rows # Note: muting a stream overrides stream_push_notify if should_send('push_notifications', row) } stream_email_user_ids = { row['user_profile_id'] for row in subscription_rows # Note: muting a stream overrides stream_email_notify if should_send('email_notifications', row) } if possible_wildcard_mention: # If there's a possible wildcard mention, we need to # determine which users would receive a wildcard mention # notification for this message should the message indeed # contain a wildcard mention. # # We don't have separate values for push/email # notifications here; at this stage, we're just # determining whether this wildcard mention should be # treated as a mention (and follow the user's mention # notification preferences) or a normal message. wildcard_mention_user_ids = { row['user_profile_id'] for row in subscription_rows if should_send("wildcard_mentions_notify", row) } elif recipient.type == Recipient.HUDDLE: message_to_user_ids = get_huddle_user_ids(recipient) else: raise ValueError('Bad recipient type') message_to_user_id_set = set(message_to_user_ids) user_ids = set(message_to_user_id_set) # Important note: Because we haven't rendered markdown yet, we # don't yet know which of these possibly-mentioned users was # actually mentioned in the message (in other words, the # mention syntax might have been in a code block or otherwise # escaped). `get_ids_for` will filter these extra user rows # for our data structures not related to bots user_ids |= possibly_mentioned_user_ids if user_ids: query = UserProfile.objects.filter( is_active=True, ).values( 'id', 'enable_online_push_notifications', 'is_bot', 'bot_type', 'long_term_idle', ) # query_for_ids is fast highly optimized for large queries, and we # need this codepath to be fast (it's part of sending messages) query = query_for_ids( query=query, user_ids=sorted(list(user_ids)), field='id', ) rows = list(query) else: # TODO: We should always have at least one user_id as a recipient # of any message we send. Right now the exception to this # rule is `notify_new_user`, which, at least in a possibly # contrived test scenario, can attempt to send messages # to an inactive bot. When we plug that hole, we can avoid # this `else` clause and just `assert(user_ids)`. # # UPDATE: It's February 2020 (and a couple years after the above # comment was written). We have simplified notify_new_user # so that it should be a little easier to reason about. # There is currently some cleanup to how we handle cross # realm bots that is still under development. Once that # effort is complete, we should be able to address this # to-do. rows = [] def get_ids_for(f: Callable[[Dict[str, Any]], bool]) -> Set[int]: """Only includes users on the explicit message to line""" return { row['id'] for row in rows if f(row) } & message_to_user_id_set def is_service_bot(row: Dict[str, Any]) -> bool: return row['is_bot'] and (row['bot_type'] in UserProfile.SERVICE_BOT_TYPES) active_user_ids = get_ids_for(lambda r: True) push_notify_user_ids = get_ids_for( lambda r: r['enable_online_push_notifications'], ) # Service bots don't get UserMessage rows. um_eligible_user_ids = get_ids_for( lambda r: not is_service_bot(r), ) long_term_idle_user_ids = get_ids_for( lambda r: r['long_term_idle'], ) # These two bot data structures need to filter from the full set # of users who either are receiving the message or might have been # mentioned in it, and so can't use get_ids_for. # # Further in the do_send_messages code path, once # `mentioned_user_ids` has been computed via markdown, we'll filter # these data structures for just those users who are either a # direct recipient or were mentioned; for now, we're just making # sure we have the data we need for that without extra database # queries. default_bot_user_ids = { row['id'] for row in rows if row['is_bot'] and row['bot_type'] == UserProfile.DEFAULT_BOT } service_bot_tuples = [ (row['id'], row['bot_type']) for row in rows if is_service_bot(row) ] info: RecipientInfoResult = dict( active_user_ids=active_user_ids, push_notify_user_ids=push_notify_user_ids, stream_push_user_ids=stream_push_user_ids, stream_email_user_ids=stream_email_user_ids, wildcard_mention_user_ids=wildcard_mention_user_ids, um_eligible_user_ids=um_eligible_user_ids, long_term_idle_user_ids=long_term_idle_user_ids, default_bot_user_ids=default_bot_user_ids, service_bot_tuples=service_bot_tuples, ) return info def get_service_bot_events(sender: UserProfile, service_bot_tuples: List[Tuple[int, int]], mentioned_user_ids: Set[int], active_user_ids: Set[int], recipient_type: int) -> Dict[str, List[Dict[str, Any]]]: event_dict: Dict[str, List[Dict[str, Any]]] = defaultdict(list) # Avoid infinite loops by preventing messages sent by bots from generating # Service events. if sender.is_bot: return event_dict def maybe_add_event(user_profile_id: int, bot_type: int) -> None: if bot_type == UserProfile.OUTGOING_WEBHOOK_BOT: queue_name = 'outgoing_webhooks' elif bot_type == UserProfile.EMBEDDED_BOT: queue_name = 'embedded_bots' else: logging.error( 'Unexpected bot_type for Service bot id=%s: %s', user_profile_id, bot_type, ) return is_stream = (recipient_type == Recipient.STREAM) # Important note: service_bot_tuples may contain service bots # who were not actually mentioned in the message (e.g. if # mention syntax for that bot appeared in a code block). # Thus, it is important to filter any users who aren't part of # either mentioned_user_ids (the actual mentioned users) or # active_user_ids (the actual recipients). # # So even though this is implied by the logic below, we filter # these not-actually-mentioned users here, to help keep this # function future-proof. if user_profile_id not in mentioned_user_ids and user_profile_id not in active_user_ids: return # Mention triggers, for stream messages if is_stream and user_profile_id in mentioned_user_ids: trigger = 'mention' # PM triggers for personal and huddle messages elif (not is_stream) and (user_profile_id in active_user_ids): trigger = 'private_message' else: return event_dict[queue_name].append({ 'trigger': trigger, 'user_profile_id': user_profile_id, }) for user_profile_id, bot_type in service_bot_tuples: maybe_add_event( user_profile_id=user_profile_id, bot_type=bot_type, ) return event_dict def do_schedule_messages(messages: Sequence[Mapping[str, Any]]) -> List[int]: scheduled_messages: List[ScheduledMessage] = [] for message in messages: scheduled_message = ScheduledMessage() scheduled_message.sender = message['message'].sender scheduled_message.recipient = message['message'].recipient topic_name = message['message'].topic_name() scheduled_message.set_topic_name(topic_name=topic_name) scheduled_message.content = message['message'].content scheduled_message.sending_client = message['message'].sending_client scheduled_message.stream = message['stream'] scheduled_message.realm = message['realm'] scheduled_message.scheduled_timestamp = message['deliver_at'] if message['delivery_type'] == 'send_later': scheduled_message.delivery_type = ScheduledMessage.SEND_LATER elif message['delivery_type'] == 'remind': scheduled_message.delivery_type = ScheduledMessage.REMIND scheduled_messages.append(scheduled_message) ScheduledMessage.objects.bulk_create(scheduled_messages) return [scheduled_message.id for scheduled_message in scheduled_messages] def do_send_messages(messages_maybe_none: Sequence[Optional[MutableMapping[str, Any]]], email_gateway: bool=False, mark_as_read: Sequence[int]=[]) -> List[int]: """See https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html for high-level documentation on this subsystem. """ # Filter out messages which didn't pass internal_prep_message properly messages = [message for message in messages_maybe_none if message is not None] # Filter out zephyr mirror anomalies where the message was already sent already_sent_ids: List[int] = [] new_messages: List[MutableMapping[str, Any]] = [] for message in messages: if isinstance(message['message'], int): already_sent_ids.append(message['message']) else: new_messages.append(message) messages = new_messages links_for_embed: Set[str] = set() # For consistency, changes to the default values for these gets should also be applied # to the default args in do_send_message for message in messages: message['rendered_content'] = message.get('rendered_content', None) message['stream'] = message.get('stream', None) message['local_id'] = message.get('local_id', None) message['sender_queue_id'] = message.get('sender_queue_id', None) message['realm'] = message.get('realm', message['message'].sender.realm) mention_data = MentionData( realm_id=message['realm'].id, content=message['message'].content, ) message['mention_data'] = mention_data if message['message'].is_stream_message(): stream_id = message['message'].recipient.type_id stream_topic: Optional[StreamTopicTarget] = StreamTopicTarget( stream_id=stream_id, topic_name=message['message'].topic_name(), ) else: stream_topic = None info = get_recipient_info( recipient=message['message'].recipient, sender_id=message['message'].sender_id, stream_topic=stream_topic, possibly_mentioned_user_ids=mention_data.get_user_ids(), possible_wildcard_mention=mention_data.message_has_wildcards(), ) message['active_user_ids'] = info['active_user_ids'] message['push_notify_user_ids'] = info['push_notify_user_ids'] message['stream_push_user_ids'] = info['stream_push_user_ids'] message['stream_email_user_ids'] = info['stream_email_user_ids'] message['um_eligible_user_ids'] = info['um_eligible_user_ids'] message['long_term_idle_user_ids'] = info['long_term_idle_user_ids'] message['default_bot_user_ids'] = info['default_bot_user_ids'] message['service_bot_tuples'] = info['service_bot_tuples'] # Render our messages. assert message['message'].rendered_content is None rendered_content = render_incoming_message( message['message'], message['message'].content, message['active_user_ids'], message['realm'], mention_data=message['mention_data'], email_gateway=email_gateway, ) message['message'].rendered_content = rendered_content message['message'].rendered_content_version = markdown_version links_for_embed |= message['message'].links_for_preview # Add members of the mentioned user groups into `mentions_user_ids`. for group_id in message['message'].mentions_user_group_ids: members = message['mention_data'].get_group_members(group_id) message['message'].mentions_user_ids.update(members) # Only send data to Tornado about wildcard mentions if message # rendering determined the message had an actual wildcard # mention in it (and not e.g. wildcard mention syntax inside a # code block). if message['message'].mentions_wildcard: message['wildcard_mention_user_ids'] = info['wildcard_mention_user_ids'] else: message['wildcard_mention_user_ids'] = [] ''' Once we have the actual list of mentioned ids from message rendering, we can patch in "default bots" (aka normal bots) who were directly mentioned in this message as eligible to get UserMessage rows. ''' mentioned_user_ids = message['message'].mentions_user_ids default_bot_user_ids = message['default_bot_user_ids'] mentioned_bot_user_ids = default_bot_user_ids & mentioned_user_ids message['um_eligible_user_ids'] |= mentioned_bot_user_ids # Save the message receipts in the database user_message_flags: Dict[int, Dict[int, List[str]]] = defaultdict(dict) with transaction.atomic(): Message.objects.bulk_create([message['message'] for message in messages]) # Claim attachments in message for message in messages: if do_claim_attachments(message['message'], message['message'].potential_attachment_path_ids): message['message'].has_attachment = True message['message'].save(update_fields=['has_attachment']) ums: List[UserMessageLite] = [] for message in messages: # Service bots (outgoing webhook bots and embedded bots) don't store UserMessage rows; # they will be processed later. mentioned_user_ids = message['message'].mentions_user_ids user_messages = create_user_messages( message=message['message'], um_eligible_user_ids=message['um_eligible_user_ids'], long_term_idle_user_ids=message['long_term_idle_user_ids'], stream_push_user_ids = message['stream_push_user_ids'], stream_email_user_ids = message['stream_email_user_ids'], mentioned_user_ids=mentioned_user_ids, mark_as_read=mark_as_read, ) for um in user_messages: user_message_flags[message['message'].id][um.user_profile_id] = um.flags_list() ums.extend(user_messages) message['message'].service_queue_events = get_service_bot_events( sender=message['message'].sender, service_bot_tuples=message['service_bot_tuples'], mentioned_user_ids=mentioned_user_ids, active_user_ids=message['active_user_ids'], recipient_type=message['message'].recipient.type, ) bulk_insert_ums(ums) for message in messages: do_widget_post_save_actions(message) for message in messages: realm_id: Optional[int] = None if message['message'].is_stream_message(): if message['stream'] is None: stream_id = message['message'].recipient.type_id message['stream'] = Stream.objects.select_related().get(id=stream_id) assert message['stream'] is not None # assert needed because stubs for django are missing realm_id = message['stream'].realm_id # Deliver events to the real-time push system, as well as # enqueuing any additional processing triggered by the message. wide_message_dict = MessageDict.wide_dict(message['message'], realm_id) user_flags = user_message_flags.get(message['message'].id, {}) sender = message['message'].sender message_type = wide_message_dict['type'] presence_idle_user_ids = get_active_presence_idle_user_ids( realm=sender.realm, sender_id=sender.id, message_type=message_type, active_user_ids=message['active_user_ids'], user_flags=user_flags, ) event = dict( type='message', message=message['message'].id, message_dict=wide_message_dict, presence_idle_user_ids=presence_idle_user_ids, ) ''' TODO: We may want to limit user_ids to only those users who have UserMessage rows, if only for minor performance reasons. For now we queue events for all subscribers/sendees of the message, since downstream code may still do notifications that don't require UserMessage rows. Our automated tests have gotten better on this codepath, but we may have coverage gaps, so we should be careful about changing the next line. ''' user_ids = message['active_user_ids'] | set(user_flags.keys()) users = [ dict( id=user_id, flags=user_flags.get(user_id, []), always_push_notify=(user_id in message['push_notify_user_ids']), stream_push_notify=(user_id in message['stream_push_user_ids']), stream_email_notify=(user_id in message['stream_email_user_ids']), wildcard_mention_notify=(user_id in message['wildcard_mention_user_ids']), ) for user_id in user_ids ] if message['message'].is_stream_message(): # Note: This is where authorization for single-stream # get_updates happens! We only attach stream data to the # notify new_message request if it's a public stream, # ensuring that in the tornado server, non-public stream # messages are only associated to their subscribed users. assert message['stream'] is not None # assert needed because stubs for django are missing if message['stream'].is_public(): event['realm_id'] = message['stream'].realm_id event['stream_name'] = message['stream'].name if message['stream'].invite_only: event['invite_only'] = True if message['stream'].first_message_id is None: message['stream'].first_message_id = message['message'].id message['stream'].save(update_fields=["first_message_id"]) if message['local_id'] is not None: event['local_id'] = message['local_id'] if message['sender_queue_id'] is not None: event['sender_queue_id'] = message['sender_queue_id'] send_event(message['realm'], event, users) if links_for_embed: event_data = { 'message_id': message['message'].id, 'message_content': message['message'].content, 'message_realm_id': message['realm'].id, 'urls': links_for_embed} queue_json_publish('embed_links', event_data) if message['message'].recipient.type == Recipient.PERSONAL: welcome_bot_id = get_system_bot(settings.WELCOME_BOT).id if (welcome_bot_id in message['active_user_ids'] and welcome_bot_id != message['message'].sender_id): send_welcome_bot_response(message) for queue_name, events in message['message'].service_queue_events.items(): for event in events: queue_json_publish( queue_name, { "message": wide_message_dict, "trigger": event['trigger'], "user_profile_id": event["user_profile_id"], }, ) # Note that this does not preserve the order of message ids # returned. In practice, this shouldn't matter, as we only # mirror single zephyr messages at a time and don't otherwise # intermingle sending zephyr messages with other messages. return already_sent_ids + [message['message'].id for message in messages] class UserMessageLite: ''' The Django ORM is too slow for bulk operations. This class is optimized for the simple use case of inserting a bunch of rows into zerver_usermessage. ''' def __init__(self, user_profile_id: int, message_id: int, flags: int) -> None: self.user_profile_id = user_profile_id self.message_id = message_id self.flags = flags def flags_list(self) -> List[str]: return UserMessage.flags_list_for_flags(self.flags) def create_user_messages(message: Message, um_eligible_user_ids: AbstractSet[int], long_term_idle_user_ids: AbstractSet[int], stream_push_user_ids: AbstractSet[int], stream_email_user_ids: AbstractSet[int], mentioned_user_ids: AbstractSet[int], mark_as_read: Sequence[int] = []) -> List[UserMessageLite]: ums_to_create = [] for user_profile_id in um_eligible_user_ids: um = UserMessageLite( user_profile_id=user_profile_id, message_id=message.id, flags=0, ) ums_to_create.append(um) # These properties on the Message are set via # render_markdown by code in the markdown inline patterns wildcard = message.mentions_wildcard ids_with_alert_words = message.user_ids_with_alert_words for um in ums_to_create: if (um.user_profile_id == message.sender.id and message.sent_by_human()) or \ um.user_profile_id in mark_as_read: um.flags |= UserMessage.flags.read if wildcard: um.flags |= UserMessage.flags.wildcard_mentioned if um.user_profile_id in mentioned_user_ids: um.flags |= UserMessage.flags.mentioned if um.user_profile_id in ids_with_alert_words: um.flags |= UserMessage.flags.has_alert_word if message.recipient.type in [Recipient.HUDDLE, Recipient.PERSONAL]: um.flags |= UserMessage.flags.is_private # For long_term_idle (aka soft-deactivated) users, we are allowed # to optimize by lazily not creating UserMessage rows that would # have the default 0 flag set (since the soft-reactivation logic # knows how to create those when the user comes back). We need to # create the UserMessage rows for these long_term_idle users # non-lazily in a few cases: # # * There are nonzero flags (e.g. the user was mentioned), since # that case is rare and this saves a lot of complexity in # soft-reactivation. # # * If the user is going to be notified (e.g. they get push/email # notifications for every message on a stream), since in that # case the notifications code will call `access_message` on the # message to re-verify permissions, and for private streams, # will get an error if the UserMessage row doesn't exist yet. # # See https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html#soft-deactivation # for details on this system. user_messages = [] for um in ums_to_create: if (um.user_profile_id in long_term_idle_user_ids and um.user_profile_id not in stream_push_user_ids and um.user_profile_id not in stream_email_user_ids and message.is_stream_message() and int(um.flags) == 0): continue user_messages.append(um) return user_messages def bulk_insert_ums(ums: List[UserMessageLite]) -> None: ''' Doing bulk inserts this way is much faster than using Django, since we don't have any ORM overhead. Profiling with 1000 users shows a speedup of 0.436 -> 0.027 seconds, so we're talking about a 15x speedup. ''' if not ums: return vals = [ (um.user_profile_id, um.message_id, um.flags) for um in ums ] query = SQL(''' INSERT into zerver_usermessage (user_profile_id, message_id, flags) VALUES %s ''') with connection.cursor() as cursor: execute_values(cursor.cursor, query, vals) def do_add_submessage(realm: Realm, sender_id: int, message_id: int, msg_type: str, content: str, ) -> None: submessage = SubMessage( sender_id=sender_id, message_id=message_id, msg_type=msg_type, content=content, ) submessage.save() event = dict( type="submessage", msg_type=msg_type, message_id=message_id, submessage_id=submessage.id, sender_id=sender_id, content=content, ) ums = UserMessage.objects.filter(message_id=message_id) target_user_ids = [um.user_profile_id for um in ums] send_event(realm, event, target_user_ids) def notify_reaction_update(user_profile: UserProfile, message: Message, reaction: Reaction, op: str) -> None: user_dict = {'user_id': user_profile.id, 'email': user_profile.email, 'full_name': user_profile.full_name} event: Dict[str, Any] = { 'type': 'reaction', 'op': op, 'user_id': user_profile.id, # TODO: We plan to remove this redundant user_dict object once # clients are updated to support accessing use user_id. See # https://github.com/zulip/zulip/pull/14711 for details. 'user': user_dict, 'message_id': message.id, 'emoji_name': reaction.emoji_name, 'emoji_code': reaction.emoji_code, 'reaction_type': reaction.reaction_type, } # Update the cached message since new reaction is added. update_to_dict_cache([message]) # Recipients for message update events, including reactions, are # everyone who got the original message. This means reactions # won't live-update in preview narrows, but it's the right # performance tradeoff, since otherwise we'd need to send all # reactions to public stream messages to every browser for every # client in the organization, which doesn't scale. # # However, to ensure that reactions do live-update for any user # who has actually participated in reacting to a message, we add a # "historical" UserMessage row for any user who reacts to message, # subscribing them to future notifications. ums = UserMessage.objects.filter(message=message.id) send_event(user_profile.realm, event, [um.user_profile_id for um in ums]) def do_add_reaction(user_profile: UserProfile, message: Message, emoji_name: str, emoji_code: str, reaction_type: str) -> None: reaction = Reaction(user_profile=user_profile, message=message, emoji_name=emoji_name, emoji_code=emoji_code, reaction_type=reaction_type) try: reaction.save() except django.db.utils.IntegrityError: # nocoverage # This can happen when a race results in the check in views # code not catching an attempt to double-add a reaction, or # perhaps if the emoji_name/emoji_code mapping is busted. raise JsonableError(_("Reaction already exists.")) notify_reaction_update(user_profile, message, reaction, "add") def do_remove_reaction(user_profile: UserProfile, message: Message, emoji_code: str, reaction_type: str) -> None: reaction = Reaction.objects.filter(user_profile=user_profile, message=message, emoji_code=emoji_code, reaction_type=reaction_type).get() reaction.delete() notify_reaction_update(user_profile, message, reaction, "remove") def do_send_typing_notification( realm: Realm, sender: UserProfile, recipient_user_profiles: List[UserProfile], operator: str) -> None: sender_dict = {'user_id': sender.id, 'email': sender.email} # Include a list of recipients in the event body to help identify where the typing is happening recipient_dicts = [{'user_id': profile.id, 'email': profile.email} for profile in recipient_user_profiles] event = dict( type='typing', op=operator, sender=sender_dict, recipients=recipient_dicts, ) # Only deliver the notification to active user recipients user_ids_to_notify = [ user.id for user in recipient_user_profiles if user.is_active ] send_event(realm, event, user_ids_to_notify) # check_send_typing_notification: # Checks the typing notification and sends it def check_send_typing_notification(sender: UserProfile, user_ids: List[int], operator: str) -> None: realm = sender.realm if len(user_ids) == 0: raise JsonableError(_('Missing parameter: \'to\' (recipient)')) elif operator not in ('start', 'stop'): raise JsonableError(_('Invalid \'op\' value (should be start or stop)')) ''' The next chunk of code will go away when we upgrade old mobile users away from versions of mobile that send emails. For the small number of very outdated mobile clients, we do double work here in terms of fetching users, but this structure reduces lots of other unnecessary duplicated code and will make it convenient to mostly delete code when we desupport old versions of the app.''' if sender.id not in user_ids: user_ids.append(sender.id) # If any of the user_ids being sent in are invalid, we will # just reject the whole request, since a partial list of user_ids # can create confusion related to huddles. Plus it's a good # sign that a client is confused (or possibly even malicious) if # we get bad user_ids. user_profiles = [] for user_id in user_ids: try: # We include cross-bot realms as possible recipients, # so that clients can know which huddle conversation # is relevant here. user_profile = get_user_by_id_in_realm_including_cross_realm( user_id, sender.realm) except UserProfile.DoesNotExist: raise JsonableError(_("Invalid user ID {}").format(user_id)) user_profiles.append(user_profile) do_send_typing_notification( realm=realm, sender=sender, recipient_user_profiles=user_profiles, operator=operator, ) def ensure_stream(realm: Realm, stream_name: str, invite_only: bool=False, stream_description: str="", acting_user: Optional[UserProfile]=None) -> Stream: return create_stream_if_needed(realm, stream_name, invite_only=invite_only, stream_description=stream_description, acting_user=acting_user)[0] def get_recipient_from_user_profiles(recipient_profiles: Sequence[UserProfile], forwarded_mirror_message: bool, forwarder_user_profile: Optional[UserProfile], sender: UserProfile) -> Recipient: # Avoid mutating the passed in list of recipient_profiles. recipient_profiles_map = {} for user_profile in recipient_profiles: recipient_profiles_map[user_profile.id] = user_profile if forwarded_mirror_message: # In our mirroring integrations with some third-party # protocols, bots subscribed to the third-party protocol # forward to Zulip messages that they received in the # third-party service. The permissions model for that # forwarding is that users can only submit to Zulip private # messages they personally received, and here we do the check # for whether forwarder_user_profile is among the private # message recipients of the message. assert forwarder_user_profile is not None if forwarder_user_profile.id not in recipient_profiles_map: raise ValidationError(_("User not authorized for this query")) # If the private message is just between the sender and # another person, force it to be a personal internally if (len(recipient_profiles_map) == 2 and sender.id in recipient_profiles_map): del recipient_profiles_map[sender.id] assert len(recipient_profiles_map) != 0 if len(recipient_profiles_map) == 1: user_profile = list(recipient_profiles_map.values())[0] return user_profile.recipient # Otherwise, we need a huddle. Make sure the sender is included in huddle messages recipient_profiles_map[sender.id] = sender user_ids: Set[int] = {user_id for user_id in recipient_profiles_map} return get_huddle_recipient(user_ids) def validate_recipient_user_profiles(user_profiles: Sequence[UserProfile], sender: UserProfile, allow_deactivated: bool=False) -> Sequence[UserProfile]: recipient_profiles_map: Dict[int, UserProfile] = {} # We exempt cross-realm bots from the check that all the recipients # are in the same realm. realms = set() if not is_cross_realm_bot_email(sender.email): realms.add(sender.realm_id) for user_profile in user_profiles: if (not user_profile.is_active and not user_profile.is_mirror_dummy and not allow_deactivated) or user_profile.realm.deactivated: raise ValidationError(_("'{email}' is no longer using Zulip.").format(email=user_profile.email)) recipient_profiles_map[user_profile.id] = user_profile if not is_cross_realm_bot_email(user_profile.email): realms.add(user_profile.realm_id) if len(realms) > 1: raise ValidationError(_("You can't send private messages outside of your organization.")) return list(recipient_profiles_map.values()) def recipient_for_user_profiles(user_profiles: Sequence[UserProfile], forwarded_mirror_message: bool, forwarder_user_profile: Optional[UserProfile], sender: UserProfile, allow_deactivated: bool=False) -> Recipient: recipient_profiles = validate_recipient_user_profiles(user_profiles, sender, allow_deactivated=allow_deactivated) return get_recipient_from_user_profiles(recipient_profiles, forwarded_mirror_message, forwarder_user_profile, sender) def already_sent_mirrored_message_id(message: Message) -> Optional[int]: if message.recipient.type == Recipient.HUDDLE: # For huddle messages, we use a 10-second window because the # timestamps aren't guaranteed to actually match between two # copies of the same message. time_window = datetime.timedelta(seconds=10) else: time_window = datetime.timedelta(seconds=0) query = Message.objects.filter( sender=message.sender, recipient=message.recipient, content=message.content, sending_client=message.sending_client, date_sent__gte=message.date_sent - time_window, date_sent__lte=message.date_sent + time_window) messages = filter_by_exact_message_topic( query=query, message=message, ) if messages.exists(): return messages[0].id return None def extract_stream_indicator(s: str) -> Union[str, int]: # Users can pass stream name as either an id or a name, # and if they choose to pass a name, they may JSON encode # it for legacy reasons. try: data = ujson.loads(s) except (ValueError, TypeError): # If there was no JSON encoding, then we just # have a raw stream name. return s # We should stop supporting this odd use case # once we improve our documentation. if isinstance(data, list): if len(data) != 1: # nocoverage raise JsonableError(_("Expected exactly one stream")) data = data[0] if isinstance(data, str): # We had a JSON-encoded stream name. return data if isinstance(data, int): # We had a stream id. return data raise JsonableError(_("Invalid data type for stream")) def extract_private_recipients(s: str) -> Union[List[str], List[int]]: # We try to accept multiple incoming formats for recipients. # See test_extract_recipients() for examples of what we allow. try: data = ujson.loads(s) except (ValueError, TypeError): data = s if isinstance(data, str): data = data.split(',') if not isinstance(data, list): raise JsonableError(_("Invalid data type for recipients")) if not data: # We don't complain about empty message recipients here return data if isinstance(data[0], str): return get_validated_emails(data) if not isinstance(data[0], int): raise JsonableError(_("Invalid data type for recipients")) return get_validated_user_ids(data) def get_validated_user_ids(user_ids: Iterable[int]) -> List[int]: for user_id in user_ids: if not isinstance(user_id, int): raise JsonableError(_("Recipient lists may contain emails or user IDs, but not both.")) return list(set(user_ids)) def get_validated_emails(emails: Iterable[str]) -> List[str]: for email in emails: if not isinstance(email, str): raise JsonableError(_("Recipient lists may contain emails or user IDs, but not both.")) return list(filter(bool, {email.strip() for email in emails})) def check_send_stream_message(sender: UserProfile, client: Client, stream_name: str, topic: str, body: str, realm: Optional[Realm]=None) -> int: addressee = Addressee.for_stream_name(stream_name, topic) message = check_message(sender, client, addressee, body, realm) return do_send_messages([message])[0] def check_send_private_message(sender: UserProfile, client: Client, receiving_user: UserProfile, body: str) -> int: addressee = Addressee.for_user_profile(receiving_user) message = check_message(sender, client, addressee, body) return do_send_messages([message])[0] # check_send_message: # Returns the id of the sent message. Has same argspec as check_message. def check_send_message(sender: UserProfile, client: Client, message_type_name: str, message_to: Union[Sequence[int], Sequence[str]], topic_name: Optional[str], message_content: str, realm: Optional[Realm]=None, forged: bool=False, forged_timestamp: Optional[float]=None, forwarder_user_profile: Optional[UserProfile]=None, local_id: Optional[str]=None, sender_queue_id: Optional[str]=None, widget_content: Optional[str]=None) -> int: addressee = Addressee.legacy_build( sender, message_type_name, message_to, topic_name) message = check_message(sender, client, addressee, message_content, realm, forged, forged_timestamp, forwarder_user_profile, local_id, sender_queue_id, widget_content) return do_send_messages([message])[0] def check_schedule_message(sender: UserProfile, client: Client, message_type_name: str, message_to: Union[Sequence[str], Sequence[int]], topic_name: Optional[str], message_content: str, delivery_type: str, deliver_at: datetime.datetime, realm: Optional[Realm]=None, forwarder_user_profile: Optional[UserProfile]=None, ) -> int: addressee = Addressee.legacy_build( sender, message_type_name, message_to, topic_name) message = check_message(sender, client, addressee, message_content, realm=realm, forwarder_user_profile=forwarder_user_profile) message['deliver_at'] = deliver_at message['delivery_type'] = delivery_type recipient = message['message'].recipient if (delivery_type == 'remind' and (recipient.type != Recipient.STREAM and recipient.type_id != sender.id)): raise JsonableError(_("Reminders can only be set for streams.")) return do_schedule_messages([message])[0] def check_default_stream_group_name(group_name: str) -> None: if group_name.strip() == "": raise JsonableError(_("Invalid default stream group name '{}'").format(group_name)) if len(group_name) > DefaultStreamGroup.MAX_NAME_LENGTH: raise JsonableError(_("Default stream group name too long (limit: {} characters)").format( DefaultStreamGroup.MAX_NAME_LENGTH, )) for i in group_name: if ord(i) == 0: raise JsonableError(_("Default stream group name '{}' contains NULL (0x00) characters.").format( group_name, )) def send_rate_limited_pm_notification_to_bot_owner(sender: UserProfile, realm: Realm, content: str) -> None: """ Sends a PM error notification to a bot's owner if one hasn't already been sent in the last 5 minutes. """ if sender.realm.is_zephyr_mirror_realm or sender.realm.deactivated: return if not sender.is_bot or sender.bot_owner is None: return # Don't send these notifications for cross-realm bot messages # (e.g. from EMAIL_GATEWAY_BOT) since the owner for # EMAIL_GATEWAY_BOT is probably the server administrator, not # the owner of the bot who could potentially fix the problem. if sender.realm != realm: return # We warn the user once every 5 minutes to avoid a flood of # PMs on a misconfigured integration, re-using the # UserProfile.last_reminder field, which is not used for bots. last_reminder = sender.last_reminder waitperiod = datetime.timedelta(minutes=UserProfile.BOT_OWNER_STREAM_ALERT_WAITPERIOD) if last_reminder and timezone_now() - last_reminder <= waitperiod: return internal_send_private_message(realm, get_system_bot(settings.NOTIFICATION_BOT), sender.bot_owner, content) sender.last_reminder = timezone_now() sender.save(update_fields=['last_reminder']) def send_pm_if_empty_stream(stream: Optional[Stream], realm: Realm, sender: UserProfile, stream_name: Optional[str]=None, stream_id: Optional[int]=None) -> None: """If a bot sends a message to a stream that doesn't exist or has no subscribers, sends a notification to the bot owner (if not a cross-realm bot) so that the owner can correct the issue.""" if not sender.is_bot or sender.bot_owner is None: return arg_dict = { "bot_identity": f"`{sender.delivery_email}`", "stream_id": stream_id, "stream_name": f"#**{stream_name}**", "new_stream_link": "#streams/new", } if sender.bot_owner is not None: with override_language(sender.bot_owner.default_language): if stream is None: if stream_id is not None: content = _("Your bot {bot_identity} tried to send a message to stream ID " "{stream_id}, but there is no stream with that ID.").format(**arg_dict) else: assert(stream_name is not None) content = _("Your bot {bot_identity} tried to send a message to stream " "{stream_name}, but that stream does not exist. " "Click [here]({new_stream_link}) to create it.").format(**arg_dict) else: if num_subscribers_for_stream_id(stream.id) > 0: return content = _("Your bot {bot_identity} tried to send a message to " "stream {stream_name}. The stream exists but " "does not have any subscribers.").format(**arg_dict) send_rate_limited_pm_notification_to_bot_owner(sender, realm, content) def validate_stream_name_with_pm_notification(stream_name: str, realm: Realm, sender: UserProfile) -> Stream: stream_name = stream_name.strip() check_stream_name(stream_name) try: stream = get_stream(stream_name, realm) send_pm_if_empty_stream(stream, realm, sender) except Stream.DoesNotExist: send_pm_if_empty_stream(None, realm, sender, stream_name=stream_name) raise StreamDoesNotExistError(escape(stream_name)) return stream def validate_stream_id_with_pm_notification(stream_id: int, realm: Realm, sender: UserProfile) -> Stream: try: stream = get_stream_by_id_in_realm(stream_id, realm) send_pm_if_empty_stream(stream, realm, sender) except Stream.DoesNotExist: send_pm_if_empty_stream(None, realm, sender, stream_id=stream_id) raise StreamWithIDDoesNotExistError(stream_id) return stream def check_private_message_policy(realm: Realm, sender: UserProfile, user_profiles: Sequence[UserProfile]) -> None: if realm.private_message_policy == Realm.PRIVATE_MESSAGE_POLICY_DISABLED: if sender.is_bot or (len(user_profiles) == 1 and user_profiles[0].is_bot): # We allow PMs only between users and bots, to avoid # breaking the tutorial as well as automated # notifications from system bots to users. return raise JsonableError(_("Private messages are disabled in this organization.")) # check_message: # Returns message ready for sending with do_send_message on success or the error message (string) on error. def check_message(sender: UserProfile, client: Client, addressee: Addressee, message_content_raw: str, realm: Optional[Realm]=None, forged: bool=False, forged_timestamp: Optional[float]=None, forwarder_user_profile: Optional[UserProfile]=None, local_id: Optional[str]=None, sender_queue_id: Optional[str]=None, widget_content: Optional[str]=None) -> Dict[str, Any]: """See https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html for high-level documentation on this subsystem. """ stream = None message_content = message_content_raw.rstrip() if len(message_content) == 0: raise JsonableError(_("Message must not be empty")) if '\x00' in message_content: raise JsonableError(_("Message must not contain null bytes")) message_content = truncate_body(message_content) if realm is None: realm = sender.realm if addressee.is_stream(): topic_name = addressee.topic() topic_name = truncate_topic(topic_name) stream_name = addressee.stream_name() stream_id = addressee.stream_id() if stream_name is not None: stream = validate_stream_name_with_pm_notification(stream_name, realm, sender) elif stream_id is not None: stream = validate_stream_id_with_pm_notification(stream_id, realm, sender) else: stream = addressee.stream() assert stream is not None recipient = stream.recipient # This will raise JsonableError if there are problems. if sender.bot_type != sender.OUTGOING_WEBHOOK_BOT: access_stream_for_send_message( sender=sender, stream=stream, forwarder_user_profile=forwarder_user_profile) elif addressee.is_private(): user_profiles = addressee.user_profiles() mirror_message = client and client.name in ["zephyr_mirror", "irc_mirror", "jabber_mirror", "JabberMirror"] check_private_message_policy(realm, sender, user_profiles) # API Super-users who set the `forged` flag are allowed to # forge messages sent by any user, so we disable the # `forwarded_mirror_message` security check in that case. forwarded_mirror_message = mirror_message and not forged try: recipient = recipient_for_user_profiles(user_profiles, forwarded_mirror_message, forwarder_user_profile, sender) except ValidationError as e: assert isinstance(e.messages[0], str) raise JsonableError(e.messages[0]) else: # This is defensive code--Addressee already validates # the message type. raise AssertionError("Invalid message type") message = Message() message.sender = sender message.content = message_content message.recipient = recipient if addressee.is_stream(): message.set_topic_name(topic_name) if forged and forged_timestamp is not None: # Forged messages come with a timestamp message.date_sent = timestamp_to_datetime(forged_timestamp) else: message.date_sent = timezone_now() message.sending_client = client # We render messages later in the process. assert message.rendered_content is None if client.name == "zephyr_mirror": id = already_sent_mirrored_message_id(message) if id is not None: return {'message': id} if widget_content is not None: try: widget_content = ujson.loads(widget_content) except Exception: raise JsonableError(_('Widgets: API programmer sent invalid JSON content')) try: check_widget_content(widget_content) except ValidationError as error: raise JsonableError(_('Widgets: {error_msg}').format( error_msg=error.message, )) return {'message': message, 'stream': stream, 'local_id': local_id, 'sender_queue_id': sender_queue_id, 'realm': realm, 'widget_content': widget_content} def _internal_prep_message(realm: Realm, sender: UserProfile, addressee: Addressee, content: str) -> Optional[Dict[str, Any]]: """ Create a message object and checks it, but doesn't send it or save it to the database. The internal function that calls this can therefore batch send a bunch of created messages together as one database query. Call do_send_messages with a list of the return values of this method. """ # Remove any null bytes from the content if len(content) > MAX_MESSAGE_LENGTH: content = content[0:3900] + "\n\n[message was too long and has been truncated]" # If we have a stream name, and the stream doesn't exist, we # create it here (though this code path should probably be removed # eventually, moving that responsibility to the caller). If # addressee.stream_name() is None (i.e. we're sending to a stream # by ID), we skip this, as the stream object must already exist. if addressee.is_stream(): stream_name = addressee.stream_name() if stream_name is not None: ensure_stream(realm, stream_name, acting_user=sender) try: return check_message(sender, get_client("Internal"), addressee, content, realm=realm) except JsonableError as e: logging.exception("Error queueing internal message by %s: %s", sender.delivery_email, e.msg) return None def internal_prep_stream_message( realm: Realm, sender: UserProfile, stream: Stream, topic: str, content: str, ) -> Optional[Dict[str, Any]]: """ See _internal_prep_message for details of how this works. """ addressee = Addressee.for_stream(stream, topic) return _internal_prep_message( realm=realm, sender=sender, addressee=addressee, content=content, ) def internal_prep_stream_message_by_name( realm: Realm, sender: UserProfile, stream_name: str, topic: str, content: str, ) -> Optional[Dict[str, Any]]: """ See _internal_prep_message for details of how this works. """ addressee = Addressee.for_stream_name(stream_name, topic) return _internal_prep_message( realm=realm, sender=sender, addressee=addressee, content=content, ) def internal_prep_private_message(realm: Realm, sender: UserProfile, recipient_user: UserProfile, content: str) -> Optional[Dict[str, Any]]: """ See _internal_prep_message for details of how this works. """ addressee = Addressee.for_user_profile(recipient_user) return _internal_prep_message( realm=realm, sender=sender, addressee=addressee, content=content, ) def internal_send_private_message(realm: Realm, sender: UserProfile, recipient_user: UserProfile, content: str) -> Optional[int]: message = internal_prep_private_message(realm, sender, recipient_user, content) if message is None: return None message_ids = do_send_messages([message]) return message_ids[0] def internal_send_stream_message( realm: Realm, sender: UserProfile, stream: Stream, topic: str, content: str, email_gateway: bool=False) -> Optional[int]: message = internal_prep_stream_message( realm, sender, stream, topic, content, ) if message is None: return None message_ids = do_send_messages([message], email_gateway=email_gateway) return message_ids[0] def internal_send_stream_message_by_name( realm: Realm, sender: UserProfile, stream_name: str, topic: str, content: str, ) -> Optional[int]: message = internal_prep_stream_message_by_name( realm, sender, stream_name, topic, content, ) if message is None: return None message_ids = do_send_messages([message]) return message_ids[0] def internal_send_huddle_message(realm: Realm, sender: UserProfile, emails: List[str], content: str) -> Optional[int]: addressee = Addressee.for_private(emails, realm) message = _internal_prep_message( realm=realm, sender=sender, addressee=addressee, content=content, ) if message is None: return None message_ids = do_send_messages([message]) return message_ids[0] def pick_color(user_profile: UserProfile, subs: Iterable[Subscription]) -> str: # These colors are shared with the palette in subs.js. used_colors = [sub.color for sub in subs if sub.active] available_colors = [s for s in STREAM_ASSIGNMENT_COLORS if s not in used_colors] if available_colors: return available_colors[0] else: return STREAM_ASSIGNMENT_COLORS[len(used_colors) % len(STREAM_ASSIGNMENT_COLORS)] def validate_user_access_to_subscribers(user_profile: Optional[UserProfile], stream: Stream) -> None: """ Validates whether the user can view the subscribers of a stream. Raises a JsonableError if: * The user and the stream are in different realms * The realm is MIT and the stream is not invite only. * The stream is invite only, requesting_user is passed, and that user does not subscribe to the stream. """ validate_user_access_to_subscribers_helper( user_profile, {"realm_id": stream.realm_id, "invite_only": stream.invite_only}, # We use a lambda here so that we only compute whether the # user is subscribed if we have to lambda user_profile: subscribed_to_stream(user_profile, stream.id)) def validate_user_access_to_subscribers_helper( user_profile: Optional[UserProfile], stream_dict: Mapping[str, Any], check_user_subscribed: Callable[[UserProfile], bool], ) -> None: """Helper for validate_user_access_to_subscribers that doesn't require a full stream object. This function is a bit hard to read, because it is carefully optimized for performance in the two code paths we call it from: * In `bulk_get_subscriber_user_ids`, we already know whether the user was subscribed via `sub_dict`, and so we want to avoid a database query at all (especially since it calls this in a loop); * In `validate_user_access_to_subscribers`, we want to only check if the user is subscribed when we absolutely have to, since it costs a database query. The `check_user_subscribed` argument is a function that reports whether the user is subscribed to the stream. Note also that we raise a ValidationError in cases where the caller is doing the wrong thing (maybe these should be AssertionErrors), and JsonableError for 400 type errors. """ if user_profile is None: raise ValidationError("Missing user to validate access for") if user_profile.realm_id != stream_dict["realm_id"]: raise ValidationError("Requesting user not in given realm") # Guest users can access subscribed public stream's subscribers if user_profile.is_guest: if check_user_subscribed(user_profile): return # We could put an AssertionError here; in that we don't have # any code paths that would allow a guest user to access other # streams in the first place. if not user_profile.can_access_public_streams() and not stream_dict["invite_only"]: raise JsonableError(_("Subscriber data is not available for this stream")) # Organization administrators can view subscribers for all streams. if user_profile.is_realm_admin: return if (stream_dict["invite_only"] and not check_user_subscribed(user_profile)): raise JsonableError(_("Unable to retrieve subscribers for private stream")) def bulk_get_subscriber_user_ids(stream_dicts: Iterable[Mapping[str, Any]], user_profile: UserProfile, sub_dict: Mapping[int, bool], stream_recipient: StreamRecipientMap) -> Dict[int, List[int]]: """sub_dict maps stream_id => whether the user is subscribed to that stream.""" target_stream_dicts = [] for stream_dict in stream_dicts: stream_recipient.populate_with(stream_id=stream_dict["id"], recipient_id=stream_dict["recipient_id"]) try: validate_user_access_to_subscribers_helper( user_profile, stream_dict, lambda user_profile: sub_dict[stream_dict["id"]], ) except JsonableError: continue target_stream_dicts.append(stream_dict) stream_ids = [stream['id'] for stream in target_stream_dicts] recipient_ids = sorted([ stream_recipient.recipient_id_for(stream_id) for stream_id in stream_ids ]) result: Dict[int, List[int]] = {stream["id"]: [] for stream in stream_dicts} if not recipient_ids: return result ''' The raw SQL below leads to more than a 2x speedup when tested with 20k+ total subscribers. (For large realms with lots of default streams, this function deals with LOTS of data, so it is important to optimize.) ''' query = SQL(''' SELECT zerver_subscription.recipient_id, zerver_subscription.user_profile_id FROM zerver_subscription INNER JOIN zerver_userprofile ON zerver_userprofile.id = zerver_subscription.user_profile_id WHERE zerver_subscription.recipient_id in %(recipient_ids)s AND zerver_subscription.active AND zerver_userprofile.is_active ORDER BY zerver_subscription.recipient_id, zerver_subscription.user_profile_id ''') cursor = connection.cursor() cursor.execute(query, {"recipient_ids": tuple(recipient_ids)}) rows = cursor.fetchall() cursor.close() recip_to_stream_id = stream_recipient.recipient_to_stream_id_dict() ''' Using groupby/itemgetter here is important for performance, at scale. It makes it so that all interpreter overhead is just O(N) in nature. ''' for recip_id, recip_rows in itertools.groupby(rows, itemgetter(0)): user_profile_ids = [r[1] for r in recip_rows] stream_id = recip_to_stream_id[recip_id] result[stream_id] = list(user_profile_ids) return result def get_subscribers_query(stream: Stream, requesting_user: Optional[UserProfile]) -> QuerySet: # TODO: Make a generic stub for QuerySet """ Build a query to get the subscribers list for a stream, raising a JsonableError if: 'realm' is optional in stream. The caller can refine this query with select_related(), values(), etc. depending on whether it wants objects or just certain fields """ validate_user_access_to_subscribers(requesting_user, stream) # Note that non-active users may still have "active" subscriptions, because we # want to be able to easily reactivate them with their old subscriptions. This # is why the query here has to look at the UserProfile.is_active flag. subscriptions = get_active_subscriptions_for_stream_id(stream.id).filter( user_profile__is_active=True, ) return subscriptions def get_subscriber_emails(stream: Stream, requesting_user: Optional[UserProfile]=None) -> List[str]: subscriptions_query = get_subscribers_query(stream, requesting_user) subscriptions = subscriptions_query.values('user_profile__email') return [subscription['user_profile__email'] for subscription in subscriptions] def notify_subscriptions_added(user_profile: UserProfile, sub_pairs: Iterable[Tuple[Subscription, Stream]], stream_user_ids: Callable[[Stream], List[int]], recent_traffic: Dict[int, int], no_log: bool=False) -> None: if not no_log: log_event({'type': 'subscription_added', 'user': user_profile.email, 'names': [stream.name for sub, stream in sub_pairs], 'realm': user_profile.realm.string_id}) sub_dicts = [] for (subscription, stream) in sub_pairs: sub_dict = stream.to_dict() for field_name in Subscription.API_FIELDS: if field_name == "active": # Skip the "active" field, it's implied by context continue sub_dict[field_name] = getattr(subscription, field_name) sub_dict['in_home_view'] = not subscription.is_muted sub_dict['email_address'] = encode_email_address(stream, show_sender=True) sub_dict['stream_weekly_traffic'] = get_average_weekly_stream_traffic( stream.id, stream.date_created, recent_traffic) sub_dict['subscribers'] = stream_user_ids(stream) sub_dicts.append(sub_dict) # Send a notification to the user who subscribed. event = dict(type="subscription", op="add", subscriptions=sub_dicts) send_event(user_profile.realm, event, [user_profile.id]) def get_peer_user_ids_for_stream_change(stream: Stream, altered_user_ids: Iterable[int], subscribed_user_ids: Iterable[int]) -> Set[int]: ''' altered_user_ids is the user_ids that we are adding/removing subscribed_user_ids is the already-subscribed user_ids Based on stream policy, we notify the correct bystanders, while not notifying altered_users (who get subscribers via another event) ''' if stream.invite_only: # PRIVATE STREAMS # Realm admins can access all private stream subscribers. Send them an # event even if they aren't subscribed to stream. realm_admin_ids = [user.id for user in stream.realm.get_admin_users_and_bots()] user_ids_to_notify = [] user_ids_to_notify.extend(realm_admin_ids) user_ids_to_notify.extend(subscribed_user_ids) return set(user_ids_to_notify) - set(altered_user_ids) else: # PUBLIC STREAMS # We now do "peer_add" or "peer_remove" events even for streams # users were never subscribed to, in order for the neversubscribed # structure to stay up-to-date. return set(active_non_guest_user_ids(stream.realm_id)) - set(altered_user_ids) def get_user_ids_for_streams(streams: Iterable[Stream]) -> Dict[int, List[int]]: stream_ids = [stream.id for stream in streams] all_subs = get_active_subscriptions_for_stream_ids(stream_ids).filter( user_profile__is_active=True, ).values( 'recipient__type_id', 'user_profile_id', ).order_by( 'recipient__type_id', ) get_stream_id = itemgetter('recipient__type_id') all_subscribers_by_stream: Dict[int, List[int]] = defaultdict(list) for stream_id, rows in itertools.groupby(all_subs, get_stream_id): user_ids = [row['user_profile_id'] for row in rows] all_subscribers_by_stream[stream_id] = user_ids return all_subscribers_by_stream def get_last_message_id() -> int: # We generally use this function to populate RealmAuditLog, and # the max id here is actually systemwide, not per-realm. I # assume there's some advantage in not filtering by realm. last_id = Message.objects.aggregate(Max('id'))['id__max'] if last_id is None: # During initial realm creation, there might be 0 messages in # the database; in that case, the `aggregate` query returns # None. Since we want an int for "beginning of time", use -1. last_id = -1 return last_id SubT = Tuple[List[Tuple[UserProfile, Stream]], List[Tuple[UserProfile, Stream]]] def bulk_add_subscriptions(streams: Iterable[Stream], users: Iterable[UserProfile], color_map: Mapping[str, str]={}, from_stream_creation: bool=False, acting_user: Optional[UserProfile]=None) -> SubT: users = list(users) recipients_map: Dict[int, int] = {stream.id: stream.recipient_id for stream in streams} recipient_ids: List[int] = [recipient_id for recipient_id in recipients_map.values()] stream_map: Dict[int, Stream] = {} for stream in streams: stream_map[recipients_map[stream.id]] = stream subs_by_user: Dict[int, List[Subscription]] = defaultdict(list) all_subs_query = get_stream_subscriptions_for_users(users).select_related('user_profile') for sub in all_subs_query: subs_by_user[sub.user_profile_id].append(sub) realm = users[0].realm already_subscribed: List[Tuple[UserProfile, Stream]] = [] subs_to_activate: List[Tuple[Subscription, Stream]] = [] new_subs: List[Tuple[UserProfile, int, Stream]] = [] for user_profile in users: needs_new_sub: Set[int] = set(recipient_ids) for sub in subs_by_user[user_profile.id]: if sub.recipient_id in needs_new_sub: needs_new_sub.remove(sub.recipient_id) if sub.active: already_subscribed.append((user_profile, stream_map[sub.recipient_id])) else: subs_to_activate.append((sub, stream_map[sub.recipient_id])) # Mark the sub as active, without saving, so that # pick_color will consider this to be an active # subscription when picking colors sub.active = True for recipient_id in needs_new_sub: new_subs.append((user_profile, recipient_id, stream_map[recipient_id])) subs_to_add: List[Tuple[Subscription, Stream]] = [] for (user_profile, recipient_id, stream) in new_subs: if stream.name in color_map: color = color_map[stream.name] else: color = pick_color(user_profile, subs_by_user[user_profile.id]) sub_to_add = Subscription(user_profile=user_profile, active=True, color=color, recipient_id=recipient_id) subs_by_user[user_profile.id].append(sub_to_add) subs_to_add.append((sub_to_add, stream)) # TODO: XXX: This transaction really needs to be done at the serializeable # transaction isolation level. with transaction.atomic(): occupied_streams_before = list(get_occupied_streams(realm)) Subscription.objects.bulk_create([sub for (sub, stream) in subs_to_add]) sub_ids = [sub.id for (sub, stream) in subs_to_activate] Subscription.objects.filter(id__in=sub_ids).update(active=True) occupied_streams_after = list(get_occupied_streams(realm)) # Log Subscription Activities in RealmAuditLog event_time = timezone_now() event_last_message_id = get_last_message_id() all_subscription_logs: (List[RealmAuditLog]) = [] for (sub, stream) in subs_to_add: all_subscription_logs.append(RealmAuditLog(realm=realm, acting_user=acting_user, modified_user=sub.user_profile, modified_stream=stream, event_last_message_id=event_last_message_id, event_type=RealmAuditLog.SUBSCRIPTION_CREATED, event_time=event_time)) for (sub, stream) in subs_to_activate: all_subscription_logs.append(RealmAuditLog(realm=realm, acting_user=acting_user, modified_user=sub.user_profile, modified_stream=stream, event_last_message_id=event_last_message_id, event_type=RealmAuditLog.SUBSCRIPTION_ACTIVATED, event_time=event_time)) # Now since we have all log objects generated we can do a bulk insert RealmAuditLog.objects.bulk_create(all_subscription_logs) new_occupied_streams = [stream for stream in set(occupied_streams_after) - set(occupied_streams_before) if not stream.invite_only] if new_occupied_streams and not from_stream_creation: event: Dict[str, object] = dict( type="stream", op="occupy", streams=[stream.to_dict() for stream in new_occupied_streams], ) send_event(realm, event, active_user_ids(realm.id)) # Notify all existing users on streams that users have joined # First, get all users subscribed to the streams that we care about # We fetch all subscription information upfront, as it's used throughout # the following code and we want to minize DB queries all_subscribers_by_stream = get_user_ids_for_streams(streams=streams) def fetch_stream_subscriber_user_ids(stream: Stream) -> List[int]: if stream.is_in_zephyr_realm and not stream.invite_only: return [] user_ids = all_subscribers_by_stream[stream.id] return user_ids sub_tuples_by_user: Dict[int, List[Tuple[Subscription, Stream]]] = defaultdict(list) new_streams: Set[Tuple[int, int]] = set() for (sub, stream) in subs_to_add + subs_to_activate: sub_tuples_by_user[sub.user_profile.id].append((sub, stream)) new_streams.add((sub.user_profile.id, stream.id)) # We now send several types of events to notify browsers. The # first batch is notifications to users on invite-only streams # that the stream exists. for stream in streams: if not stream.is_public(): # Users newly added to invite-only streams # need a `create` notification. The former, because # they need the stream to exist before # they get the "subscribe" notification, and the latter so # they can manage the new stream. # Realm admins already have all created private streams. realm_admin_ids = [user.id for user in realm.get_admin_users_and_bots()] new_users_ids = [user.id for user in users if (user.id, stream.id) in new_streams and user.id not in realm_admin_ids] send_stream_creation_event(stream, new_users_ids) stream_ids = {stream.id for stream in streams} recent_traffic = get_streams_traffic(stream_ids=stream_ids) # The second batch is events for the users themselves that they # were subscribed to the new streams. for user_profile in users: if len(sub_tuples_by_user[user_profile.id]) == 0: continue sub_pairs = sub_tuples_by_user[user_profile.id] notify_subscriptions_added(user_profile, sub_pairs, fetch_stream_subscriber_user_ids, recent_traffic) # The second batch is events for other users who are tracking the # subscribers lists of streams in their browser; everyone for # public streams and only existing subscribers for private streams. for stream in streams: if stream.is_in_zephyr_realm and not stream.invite_only: continue new_user_ids = [user.id for user in users if (user.id, stream.id) in new_streams] subscribed_user_ids = all_subscribers_by_stream[stream.id] peer_user_ids = get_peer_user_ids_for_stream_change( stream=stream, altered_user_ids=new_user_ids, subscribed_user_ids=subscribed_user_ids, ) if peer_user_ids: for new_user_id in new_user_ids: event = dict(type="subscription", op="peer_add", stream_id=stream.id, user_id=new_user_id) send_event(realm, event, peer_user_ids) return ([(user_profile, stream) for (user_profile, recipient_id, stream) in new_subs] + [(sub.user_profile, stream) for (sub, stream) in subs_to_activate], already_subscribed) def get_available_notification_sounds() -> List[str]: notification_sounds_path = static_path('audio/notification_sounds') available_notification_sounds = [] for file_name in os.listdir(notification_sounds_path): root, ext = os.path.splitext(file_name) if '.' in root: # nocoverage # Exclude e.g. zulip.abcd1234.ogg (generated by production hash-naming) # to avoid spurious duplicates. continue if ext == '.ogg': available_notification_sounds.append(root) return available_notification_sounds def notify_subscriptions_removed(user_profile: UserProfile, streams: Iterable[Stream], no_log: bool=False) -> None: if not no_log: log_event({'type': 'subscription_removed', 'user': user_profile.email, 'names': [stream.name for stream in streams], 'realm': user_profile.realm.string_id}) payload = [dict(name=stream.name, stream_id=stream.id) for stream in streams] event = dict(type="subscription", op="remove", subscriptions=payload) send_event(user_profile.realm, event, [user_profile.id]) SubAndRemovedT = Tuple[List[Tuple[UserProfile, Stream]], List[Tuple[UserProfile, Stream]]] def bulk_remove_subscriptions(users: Iterable[UserProfile], streams: Iterable[Stream], acting_client: Client, acting_user: Optional[UserProfile]=None) -> SubAndRemovedT: users = list(users) streams = list(streams) stream_dict = {stream.id: stream for stream in streams} existing_subs_by_user = get_bulk_stream_subscriber_info(users, stream_dict) def get_non_subscribed_tups() -> List[Tuple[UserProfile, Stream]]: stream_ids = {stream.id for stream in streams} not_subscribed: List[Tuple[UserProfile, Stream]] = [] for user_profile in users: user_sub_stream_info = existing_subs_by_user[user_profile.id] subscribed_stream_ids = { stream.id for (sub, stream) in user_sub_stream_info } not_subscribed_stream_ids = stream_ids - subscribed_stream_ids for stream_id in not_subscribed_stream_ids: stream = stream_dict[stream_id] not_subscribed.append((user_profile, stream)) return not_subscribed not_subscribed = get_non_subscribed_tups() subs_to_deactivate: List[Tuple[Subscription, Stream]] = [] sub_ids_to_deactivate: List[int] = [] # This loop just flattens out our data into big lists for # bulk operations. for tup_list in existing_subs_by_user.values(): for (sub, stream) in tup_list: subs_to_deactivate.append((sub, stream)) sub_ids_to_deactivate.append(sub.id) our_realm = users[0].realm # TODO: XXX: This transaction really needs to be done at the serializeable # transaction isolation level. with transaction.atomic(): occupied_streams_before = list(get_occupied_streams(our_realm)) Subscription.objects.filter( id__in=sub_ids_to_deactivate, ) .update(active=False) occupied_streams_after = list(get_occupied_streams(our_realm)) # Log Subscription Activities in RealmAuditLog event_time = timezone_now() event_last_message_id = get_last_message_id() all_subscription_logs: (List[RealmAuditLog]) = [] for (sub, stream) in subs_to_deactivate: all_subscription_logs.append(RealmAuditLog(realm=sub.user_profile.realm, acting_user=acting_user, modified_user=sub.user_profile, modified_stream=stream, event_last_message_id=event_last_message_id, event_type=RealmAuditLog.SUBSCRIPTION_DEACTIVATED, event_time=event_time)) # Now since we have all log objects generated we can do a bulk insert RealmAuditLog.objects.bulk_create(all_subscription_logs) altered_user_dict: Dict[int, List[UserProfile]] = defaultdict(list) streams_by_user: Dict[int, List[Stream]] = defaultdict(list) for (sub, stream) in subs_to_deactivate: streams_by_user[sub.user_profile_id].append(stream) altered_user_dict[stream.id].append(sub.user_profile) for user_profile in users: if len(streams_by_user[user_profile.id]) == 0: continue notify_subscriptions_removed(user_profile, streams_by_user[user_profile.id]) event = {'type': 'mark_stream_messages_as_read', 'client_id': acting_client.id, 'user_profile_id': user_profile.id, 'stream_ids': [stream.id for stream in streams]} queue_json_publish("deferred_work", event) all_subscribers_by_stream = get_user_ids_for_streams(streams=streams) def send_peer_remove_event(stream: Stream) -> None: if stream.is_in_zephyr_realm and not stream.invite_only: return altered_users = altered_user_dict[stream.id] altered_user_ids = [u.id for u in altered_users] subscribed_user_ids = all_subscribers_by_stream[stream.id] peer_user_ids = get_peer_user_ids_for_stream_change( stream=stream, altered_user_ids=altered_user_ids, subscribed_user_ids=subscribed_user_ids, ) if peer_user_ids: for removed_user in altered_users: event = dict(type="subscription", op="peer_remove", stream_id=stream.id, user_id=removed_user.id) send_event(our_realm, event, peer_user_ids) for stream in streams: send_peer_remove_event(stream=stream) new_vacant_streams = [stream for stream in set(occupied_streams_before) - set(occupied_streams_after)] new_vacant_private_streams = [stream for stream in new_vacant_streams if stream.invite_only] new_vacant_public_streams = [stream for stream in new_vacant_streams if not stream.invite_only] if new_vacant_public_streams: event = dict(type="stream", op="vacate", streams=[stream.to_dict() for stream in new_vacant_public_streams]) send_event(our_realm, event, active_user_ids(our_realm.id)) if new_vacant_private_streams: # Deactivate any newly-vacant private streams for stream in new_vacant_private_streams: do_deactivate_stream(stream, acting_user=acting_user) return ( [(sub.user_profile, stream) for (sub, stream) in subs_to_deactivate], not_subscribed, ) def log_subscription_property_change(user_email: str, stream_name: str, property: str, value: Any) -> None: event = {'type': 'subscription_property', 'property': property, 'user': user_email, 'stream_name': stream_name, 'value': value} log_event(event) def do_change_subscription_property(user_profile: UserProfile, sub: Subscription, stream: Stream, property_name: str, value: Any, ) -> None: database_property_name = property_name event_property_name = property_name database_value = value event_value = value # For this property, is_muted is used in the database, but # in_home_view in the API, since we haven't migrated the events # API to the new name yet. if property_name == "in_home_view": database_property_name = "is_muted" database_value = not value if property_name == "is_muted": event_property_name = "in_home_view" event_value = not value setattr(sub, database_property_name, database_value) sub.save(update_fields=[database_property_name]) log_subscription_property_change(user_profile.email, stream.name, database_property_name, database_value) event = dict(type="subscription", op="update", email=user_profile.email, property=event_property_name, value=event_value, stream_id=stream.id, name=stream.name) send_event(user_profile.realm, event, [user_profile.id]) def do_change_password(user_profile: UserProfile, password: str, commit: bool=True) -> None: user_profile.set_password(password) if commit: user_profile.save(update_fields=["password"]) event_time = timezone_now() RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=user_profile, modified_user=user_profile, event_type=RealmAuditLog.USER_PASSWORD_CHANGED, event_time=event_time) def do_change_full_name(user_profile: UserProfile, full_name: str, acting_user: Optional[UserProfile]) -> None: old_name = user_profile.full_name user_profile.full_name = full_name user_profile.save(update_fields=["full_name"]) event_time = timezone_now() RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=acting_user, modified_user=user_profile, event_type=RealmAuditLog.USER_FULL_NAME_CHANGED, event_time=event_time, extra_data=old_name) payload = dict(user_id=user_profile.id, full_name=user_profile.full_name) send_event(user_profile.realm, dict(type='realm_user', op='update', person=payload), active_user_ids(user_profile.realm_id)) if user_profile.is_bot: send_event(user_profile.realm, dict(type='realm_bot', op='update', bot=payload), bot_owner_user_ids(user_profile)) def check_change_full_name(user_profile: UserProfile, full_name_raw: str, acting_user: UserProfile) -> str: """Verifies that the user's proposed full name is valid. The caller is responsible for checking check permissions. Returns the new full name, which may differ from what was passed in (because this function strips whitespace).""" new_full_name = check_full_name(full_name_raw) do_change_full_name(user_profile, new_full_name, acting_user) return new_full_name def check_change_bot_full_name(user_profile: UserProfile, full_name_raw: str, acting_user: UserProfile) -> None: new_full_name = check_full_name(full_name_raw) if new_full_name == user_profile.full_name: # Our web app will try to patch full_name even if the user didn't # modify the name in the form. We just silently ignore those # situations. return check_bot_name_available( realm_id=user_profile.realm_id, full_name=new_full_name, ) do_change_full_name(user_profile, new_full_name, acting_user) def do_change_bot_owner(user_profile: UserProfile, bot_owner: UserProfile, acting_user: UserProfile) -> None: previous_owner = user_profile.bot_owner user_profile.bot_owner = bot_owner user_profile.save() # Can't use update_fields because of how the foreign key works. event_time = timezone_now() RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=acting_user, modified_user=user_profile, event_type=RealmAuditLog.USER_BOT_OWNER_CHANGED, event_time=event_time) update_users = bot_owner_user_ids(user_profile) # For admins, update event is sent instead of delete/add # event. bot_data of admin contains all the # bots and none of them should be removed/(added again). # Delete the bot from previous owner's bot data. if previous_owner and not previous_owner.is_realm_admin: send_event(user_profile.realm, dict(type='realm_bot', op="delete", bot=dict( user_id=user_profile.id, )), {previous_owner.id}) # Do not send update event for previous bot owner. update_users = update_users - {previous_owner.id} # Notify the new owner that the bot has been added. if not bot_owner.is_realm_admin: add_event = created_bot_event(user_profile) send_event(user_profile.realm, add_event, {bot_owner.id}) # Do not send update event for bot_owner. update_users = update_users - {bot_owner.id} send_event(user_profile.realm, dict(type='realm_bot', op='update', bot=dict(user_id=user_profile.id, owner_id=user_profile.bot_owner.id, )), update_users) # Since `bot_owner_id` is included in the user profile dict we need # to update the users dict with the new bot owner id event: Dict[str, Any] = dict( type="realm_user", op="update", person=dict( user_id=user_profile.id, bot_owner_id=user_profile.bot_owner.id, ), ) send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id)) def do_change_tos_version(user_profile: UserProfile, tos_version: str) -> None: user_profile.tos_version = tos_version user_profile.save(update_fields=["tos_version"]) event_time = timezone_now() RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=user_profile, modified_user=user_profile, event_type=RealmAuditLog.USER_TOS_VERSION_CHANGED, event_time=event_time) def do_regenerate_api_key(user_profile: UserProfile, acting_user: UserProfile) -> str: old_api_key = user_profile.api_key new_api_key = generate_api_key() user_profile.api_key = new_api_key user_profile.save(update_fields=["api_key"]) # We need to explicitly delete the old API key from our caches, # because the on-save handler for flushing the UserProfile object # in zerver/lib/cache.py only has access to the new API key. cache_delete(user_profile_by_api_key_cache_key(old_api_key)) event_time = timezone_now() RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=acting_user, modified_user=user_profile, event_type=RealmAuditLog.USER_API_KEY_CHANGED, event_time=event_time) if user_profile.is_bot: send_event(user_profile.realm, dict(type='realm_bot', op='update', bot=dict(user_id=user_profile.id, api_key=new_api_key, )), bot_owner_user_ids(user_profile)) event = {'type': 'clear_push_device_tokens', 'user_profile_id': user_profile.id} queue_json_publish("deferred_work", event) return new_api_key def notify_avatar_url_change(user_profile: UserProfile) -> None: if user_profile.is_bot: send_event(user_profile.realm, dict(type='realm_bot', op='update', bot=dict(user_id=user_profile.id, avatar_url=avatar_url(user_profile), )), bot_owner_user_ids(user_profile)) payload = dict( avatar_source=user_profile.avatar_source, avatar_url=avatar_url(user_profile), avatar_url_medium=avatar_url(user_profile, medium=True), avatar_version=user_profile.avatar_version, # Even clients using client_gravatar don't need the email, # since we're sending the URL anyway. user_id=user_profile.id, ) send_event(user_profile.realm, dict(type='realm_user', op='update', person=payload), active_user_ids(user_profile.realm_id)) def do_change_avatar_fields(user_profile: UserProfile, avatar_source: str, skip_notify: bool=False, acting_user: Optional[UserProfile]=None) -> None: user_profile.avatar_source = avatar_source user_profile.avatar_version += 1 user_profile.save(update_fields=["avatar_source", "avatar_version"]) event_time = timezone_now() RealmAuditLog.objects.create(realm=user_profile.realm, modified_user=user_profile, event_type=RealmAuditLog.USER_AVATAR_SOURCE_CHANGED, extra_data={'avatar_source': avatar_source}, event_time=event_time, acting_user=acting_user) if not skip_notify: notify_avatar_url_change(user_profile) def do_delete_avatar_image(user: UserProfile, acting_user: Optional[UserProfile]=None) -> None: do_change_avatar_fields(user, UserProfile.AVATAR_FROM_GRAVATAR, acting_user=acting_user) delete_avatar_image(user) def do_change_icon_source(realm: Realm, icon_source: str, log: bool=True) -> None: realm.icon_source = icon_source realm.icon_version += 1 realm.save(update_fields=["icon_source", "icon_version"]) if log: log_event({'type': 'realm_change_icon', 'realm': realm.string_id, 'icon_source': icon_source}) send_event(realm, dict(type='realm', op='update_dict', property="icon", data=dict(icon_source=realm.icon_source, icon_url=realm_icon_url(realm))), active_user_ids(realm.id)) def do_change_logo_source(realm: Realm, logo_source: str, night: bool, acting_user: Optional[UserProfile]=None) -> None: if not night: realm.logo_source = logo_source realm.logo_version += 1 realm.save(update_fields=["logo_source", "logo_version"]) else: realm.night_logo_source = logo_source realm.night_logo_version += 1 realm.save(update_fields=["night_logo_source", "night_logo_version"]) RealmAuditLog.objects.create(event_type=RealmAuditLog.REALM_LOGO_CHANGED, realm=realm, event_time=timezone_now(), acting_user=acting_user) event = dict(type='realm', op='update_dict', property="night_logo" if night else "logo", data=get_realm_logo_data(realm, night)) send_event(realm, event, active_user_ids(realm.id)) def do_change_plan_type(realm: Realm, plan_type: int) -> None: old_value = realm.plan_type realm.plan_type = plan_type realm.save(update_fields=['plan_type']) RealmAuditLog.objects.create(event_type=RealmAuditLog.REALM_PLAN_TYPE_CHANGED, realm=realm, event_time=timezone_now(), extra_data={'old_value': old_value, 'new_value': plan_type}) if plan_type == Realm.STANDARD: realm.max_invites = Realm.INVITES_STANDARD_REALM_DAILY_MAX realm.message_visibility_limit = None realm.upload_quota_gb = Realm.UPLOAD_QUOTA_STANDARD elif plan_type == Realm.SELF_HOSTED: realm.max_invites = None # type: ignore[assignment] # Apparent mypy bug with Optional[int] setter. realm.message_visibility_limit = None realm.upload_quota_gb = None elif plan_type == Realm.STANDARD_FREE: realm.max_invites = Realm.INVITES_STANDARD_REALM_DAILY_MAX realm.message_visibility_limit = None realm.upload_quota_gb = Realm.UPLOAD_QUOTA_STANDARD elif plan_type == Realm.LIMITED: realm.max_invites = settings.INVITES_DEFAULT_REALM_DAILY_MAX realm.message_visibility_limit = Realm.MESSAGE_VISIBILITY_LIMITED realm.upload_quota_gb = Realm.UPLOAD_QUOTA_LIMITED else: raise AssertionError("Invalid plan type") update_first_visible_message_id(realm) realm.save(update_fields=['_max_invites', 'message_visibility_limit', 'upload_quota_gb']) event = {'type': 'realm', 'op': 'update', 'property': 'plan_type', 'value': plan_type, 'extra_data': {'upload_quota': realm.upload_quota_bytes()}} send_event(realm, event, active_user_ids(realm.id)) def do_change_default_sending_stream(user_profile: UserProfile, stream: Optional[Stream], log: bool=True) -> None: user_profile.default_sending_stream = stream user_profile.save(update_fields=['default_sending_stream']) if log: log_event({'type': 'user_change_default_sending_stream', 'user': user_profile.email, 'stream': str(stream)}) if user_profile.is_bot: if stream: stream_name: Optional[str] = stream.name else: stream_name = None send_event(user_profile.realm, dict(type='realm_bot', op='update', bot=dict(user_id=user_profile.id, default_sending_stream=stream_name, )), bot_owner_user_ids(user_profile)) def do_change_default_events_register_stream(user_profile: UserProfile, stream: Optional[Stream], log: bool=True) -> None: user_profile.default_events_register_stream = stream user_profile.save(update_fields=['default_events_register_stream']) if log: log_event({'type': 'user_change_default_events_register_stream', 'user': user_profile.email, 'stream': str(stream)}) if user_profile.is_bot: if stream: stream_name: Optional[str] = stream.name else: stream_name = None send_event(user_profile.realm, dict(type='realm_bot', op='update', bot=dict(user_id=user_profile.id, default_events_register_stream=stream_name, )), bot_owner_user_ids(user_profile)) def do_change_default_all_public_streams(user_profile: UserProfile, value: bool, log: bool=True) -> None: user_profile.default_all_public_streams = value user_profile.save(update_fields=['default_all_public_streams']) if log: log_event({'type': 'user_change_default_all_public_streams', 'user': user_profile.email, 'value': str(value)}) if user_profile.is_bot: send_event(user_profile.realm, dict(type='realm_bot', op='update', bot=dict(user_id=user_profile.id, default_all_public_streams=user_profile.default_all_public_streams, )), bot_owner_user_ids(user_profile)) def do_change_user_role(user_profile: UserProfile, value: int, acting_user: Optional[UserProfile]=None) -> None: old_value = user_profile.role user_profile.role = value user_profile.save(update_fields=["role"]) RealmAuditLog.objects.create( realm=user_profile.realm, modified_user=user_profile, acting_user=acting_user, event_type=RealmAuditLog.USER_ROLE_CHANGED, event_time=timezone_now(), extra_data=ujson.dumps({ RealmAuditLog.OLD_VALUE: old_value, RealmAuditLog.NEW_VALUE: value, RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm), })) event = dict(type="realm_user", op="update", person=dict(user_id=user_profile.id, role=user_profile.role)) send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id)) def do_change_is_api_super_user(user_profile: UserProfile, value: bool) -> None: user_profile.is_api_super_user = value user_profile.save(update_fields=["is_api_super_user"]) def do_change_stream_invite_only(stream: Stream, invite_only: bool, history_public_to_subscribers: Optional[bool]=None) -> None: history_public_to_subscribers = get_default_value_for_history_public_to_subscribers( stream.realm, invite_only, history_public_to_subscribers, ) stream.invite_only = invite_only stream.history_public_to_subscribers = history_public_to_subscribers stream.save(update_fields=['invite_only', 'history_public_to_subscribers']) event = dict( op="update", type="stream", property="invite_only", value=invite_only, history_public_to_subscribers=history_public_to_subscribers, stream_id=stream.id, name=stream.name, ) send_event(stream.realm, event, can_access_stream_user_ids(stream)) def do_change_stream_web_public(stream: Stream, is_web_public: bool) -> None: stream.is_web_public = is_web_public stream.save(update_fields=['is_web_public']) def do_change_stream_post_policy(stream: Stream, stream_post_policy: int) -> None: stream.stream_post_policy = stream_post_policy stream.save(update_fields=['stream_post_policy']) event = dict( op="update", type="stream", property="stream_post_policy", value=stream_post_policy, stream_id=stream.id, name=stream.name, ) send_event(stream.realm, event, can_access_stream_user_ids(stream)) # Backwards-compatibility code: We removed the # is_announcement_only property in early 2020, but we send a # duplicate event for legacy mobile clients that might want the # data. event = dict( op="update", type="stream", property="is_announcement_only", value=stream.stream_post_policy == Stream.STREAM_POST_POLICY_ADMINS, stream_id=stream.id, name=stream.name, ) send_event(stream.realm, event, can_access_stream_user_ids(stream)) def do_rename_stream(stream: Stream, new_name: str, user_profile: UserProfile, log: bool=True) -> Dict[str, str]: old_name = stream.name stream.name = new_name stream.save(update_fields=["name"]) if log: log_event({'type': 'stream_name_change', 'realm': stream.realm.string_id, 'new_name': new_name}) recipient_id = stream.recipient_id messages = Message.objects.filter(recipient_id=recipient_id).only("id") # Update the display recipient and stream, which are easy single # items to set. old_cache_key = get_stream_cache_key(old_name, stream.realm_id) new_cache_key = get_stream_cache_key(stream.name, stream.realm_id) if old_cache_key != new_cache_key: cache_delete(old_cache_key) cache_set(new_cache_key, stream) cache_set(display_recipient_cache_key(recipient_id), stream.name) # Delete cache entries for everything else, which is cheaper and # clearer than trying to set them. display_recipient is the out of # date field in all cases. cache_delete_many( to_dict_cache_key_id(message.id) for message in messages) new_email = encode_email_address(stream, show_sender=True) # We will tell our users to essentially # update stream.name = new_name where name = old_name # and update stream.email = new_email where name = old_name. # We could optimize this by trying to send one message, but the # client code really wants one property update at a time, and # updating stream names is a pretty infrequent operation. # More importantly, we want to key these updates by id, not name, # since id is the immutable primary key, and obviously name is not. data_updates = [ ['email_address', new_email], ['name', new_name], ] for property, value in data_updates: event = dict( op="update", type="stream", property=property, value=value, stream_id=stream.id, name=old_name, ) send_event(stream.realm, event, can_access_stream_user_ids(stream)) sender = get_system_bot(settings.NOTIFICATION_BOT) with override_language(stream.realm.default_language): internal_send_stream_message( stream.realm, sender, stream, Realm.STREAM_EVENTS_NOTIFICATION_TOPIC, _('{user_name} renamed stream {old_stream_name} to {new_stream_name}.').format( user_name=f"@_**{user_profile.full_name}|{user_profile.id}**", old_stream_name=f"**{old_name}**", new_stream_name=f"**{new_name}**", ), ) # Even though the token doesn't change, the web client needs to update the # email forwarding address to display the correctly-escaped new name. return {"email_address": new_email} def do_change_stream_description(stream: Stream, new_description: str) -> None: stream.description = new_description stream.rendered_description = render_stream_description(new_description) stream.save(update_fields=['description', 'rendered_description']) event = dict( type='stream', op='update', property='description', name=stream.name, stream_id=stream.id, value=new_description, rendered_description=stream.rendered_description, ) send_event(stream.realm, event, can_access_stream_user_ids(stream)) def do_change_stream_message_retention_days(stream: Stream, message_retention_days: Optional[int]=None) -> None: stream.message_retention_days = message_retention_days stream.save(update_fields=['message_retention_days']) event = dict( op="update", type="stream", property="message_retention_days", value=message_retention_days, stream_id=stream.id, name=stream.name, ) send_event(stream.realm, event, can_access_stream_user_ids(stream)) def do_create_realm(string_id: str, name: str, emails_restricted_to_domains: Optional[bool]=None) -> Realm: if Realm.objects.filter(string_id=string_id).exists(): raise AssertionError(f"Realm {string_id} already exists!") if not server_initialized(): logging.info("Server not yet initialized. Creating the internal realm first.") create_internal_realm() kwargs: Dict[str, Any] = {} if emails_restricted_to_domains is not None: kwargs['emails_restricted_to_domains'] = emails_restricted_to_domains realm = Realm(string_id=string_id, name=name, **kwargs) realm.save() # Create stream once Realm object has been saved notifications_stream = ensure_stream( realm, Realm.DEFAULT_NOTIFICATION_STREAM_NAME, stream_description="Everyone is added to this stream by default. Welcome! :octopus:", acting_user=None) realm.notifications_stream = notifications_stream # With the current initial streams situation, the only public # stream is the notifications_stream. DefaultStream.objects.create(stream=notifications_stream, realm=realm) signup_notifications_stream = ensure_stream( realm, Realm.INITIAL_PRIVATE_STREAM_NAME, invite_only=True, stream_description="A private stream for core team members.", acting_user=None) realm.signup_notifications_stream = signup_notifications_stream realm.save(update_fields=['notifications_stream', 'signup_notifications_stream']) if settings.BILLING_ENABLED: do_change_plan_type(realm, Realm.LIMITED) # Log the event log_event({"type": "realm_created", "string_id": string_id, "emails_restricted_to_domains": emails_restricted_to_domains}) sender = get_system_bot(settings.NOTIFICATION_BOT) admin_realm = sender.realm # Send a notification to the admin realm with override_language(admin_realm.default_language): signup_message = _("Signups enabled") try: signups_stream = get_signups_stream(admin_realm) topic = realm.display_subdomain internal_send_stream_message( admin_realm, sender, signups_stream, topic, signup_message, ) except Stream.DoesNotExist: # nocoverage # If the signups stream hasn't been created in the admin # realm, don't auto-create it to send to it; just do nothing. pass return realm def do_change_notification_settings(user_profile: UserProfile, name: str, value: Union[bool, int, str], log: bool=True) -> None: """Takes in a UserProfile object, the name of a global notification preference to update, and the value to update to """ notification_setting_type = UserProfile.notification_setting_types[name] assert isinstance(value, notification_setting_type), ( f'Cannot update {name}: {value} is not an instance of {notification_setting_type}') setattr(user_profile, name, value) # Disabling digest emails should clear a user's email queue if name == 'enable_digest_emails' and not value: clear_scheduled_emails([user_profile.id], ScheduledEmail.DIGEST) user_profile.save(update_fields=[name]) event = {'type': 'update_global_notifications', 'user': user_profile.email, 'notification_name': name, 'setting': value} if log: log_event(event) send_event(user_profile.realm, event, [user_profile.id]) def do_change_enter_sends(user_profile: UserProfile, enter_sends: bool) -> None: user_profile.enter_sends = enter_sends user_profile.save(update_fields=["enter_sends"]) def do_set_user_display_setting(user_profile: UserProfile, setting_name: str, setting_value: Union[bool, str, int]) -> None: property_type = UserProfile.property_types[setting_name] assert isinstance(setting_value, property_type) setattr(user_profile, setting_name, setting_value) user_profile.save(update_fields=[setting_name]) event = {'type': 'update_display_settings', 'user': user_profile.email, 'setting_name': setting_name, 'setting': setting_value} if setting_name == "default_language": assert isinstance(setting_value, str) event['language_name'] = get_language_name(setting_value) send_event(user_profile.realm, event, [user_profile.id]) # Updates to the timezone display setting are sent to all users if setting_name == "timezone": payload = dict(email=user_profile.email, user_id=user_profile.id, timezone=user_profile.timezone) send_event(user_profile.realm, dict(type='realm_user', op='update', person=payload), active_user_ids(user_profile.realm_id)) def lookup_default_stream_groups(default_stream_group_names: List[str], realm: Realm) -> List[DefaultStreamGroup]: default_stream_groups = [] for group_name in default_stream_group_names: try: default_stream_group = DefaultStreamGroup.objects.get( name=group_name, realm=realm) except DefaultStreamGroup.DoesNotExist: raise JsonableError(_('Invalid default stream group {}').format(group_name)) default_stream_groups.append(default_stream_group) return default_stream_groups def notify_default_streams(realm: Realm) -> None: event = dict( type="default_streams", default_streams=streams_to_dicts_sorted(get_default_streams_for_realm(realm.id)), ) send_event(realm, event, active_non_guest_user_ids(realm.id)) def notify_default_stream_groups(realm: Realm) -> None: event = dict( type="default_stream_groups", default_stream_groups=default_stream_groups_to_dicts_sorted(get_default_stream_groups(realm)), ) send_event(realm, event, active_non_guest_user_ids(realm.id)) def do_add_default_stream(stream: Stream) -> None: realm_id = stream.realm_id stream_id = stream.id if not DefaultStream.objects.filter(realm_id=realm_id, stream_id=stream_id).exists(): DefaultStream.objects.create(realm_id=realm_id, stream_id=stream_id) notify_default_streams(stream.realm) def do_remove_default_stream(stream: Stream) -> None: realm_id = stream.realm_id stream_id = stream.id DefaultStream.objects.filter(realm_id=realm_id, stream_id=stream_id).delete() notify_default_streams(stream.realm) def do_create_default_stream_group(realm: Realm, group_name: str, description: str, streams: List[Stream]) -> None: default_streams = get_default_streams_for_realm(realm.id) for stream in streams: if stream in default_streams: raise JsonableError(_( "'{stream_name}' is a default stream and cannot be added to '{group_name}'", ).format(stream_name=stream.name, group_name=group_name)) check_default_stream_group_name(group_name) (group, created) = DefaultStreamGroup.objects.get_or_create( name=group_name, realm=realm, description=description) if not created: raise JsonableError(_( "Default stream group '{group_name}' already exists", ).format(group_name=group_name)) group.streams.set(streams) notify_default_stream_groups(realm) def do_add_streams_to_default_stream_group(realm: Realm, group: DefaultStreamGroup, streams: List[Stream]) -> None: default_streams = get_default_streams_for_realm(realm.id) for stream in streams: if stream in default_streams: raise JsonableError(_( "'{stream_name}' is a default stream and cannot be added to '{group_name}'", ).format(stream_name=stream.name, group_name=group.name)) if stream in group.streams.all(): raise JsonableError(_( "Stream '{stream_name}' is already present in default stream group '{group_name}'", ).format(stream_name=stream.name, group_name=group.name)) group.streams.add(stream) group.save() notify_default_stream_groups(realm) def do_remove_streams_from_default_stream_group(realm: Realm, group: DefaultStreamGroup, streams: List[Stream]) -> None: for stream in streams: if stream not in group.streams.all(): raise JsonableError(_( "Stream '{stream_name}' is not present in default stream group '{group_name}'", ).format(stream_name=stream.name, group_name=group.name)) group.streams.remove(stream) group.save() notify_default_stream_groups(realm) def do_change_default_stream_group_name(realm: Realm, group: DefaultStreamGroup, new_group_name: str) -> None: if group.name == new_group_name: raise JsonableError(_("This default stream group is already named '{}'").format(new_group_name)) if DefaultStreamGroup.objects.filter(name=new_group_name, realm=realm).exists(): raise JsonableError(_("Default stream group '{}' already exists").format(new_group_name)) group.name = new_group_name group.save() notify_default_stream_groups(realm) def do_change_default_stream_group_description(realm: Realm, group: DefaultStreamGroup, new_description: str) -> None: group.description = new_description group.save() notify_default_stream_groups(realm) def do_remove_default_stream_group(realm: Realm, group: DefaultStreamGroup) -> None: group.delete() notify_default_stream_groups(realm) def get_default_streams_for_realm(realm_id: int) -> List[Stream]: return [default.stream for default in DefaultStream.objects.select_related().filter(realm_id=realm_id)] def get_default_subs(user_profile: UserProfile) -> List[Stream]: # Right now default streams are realm-wide. This wrapper gives us flexibility # to some day further customize how we set up default streams for new users. return get_default_streams_for_realm(user_profile.realm_id) # returns default streams in json serializeable format def streams_to_dicts_sorted(streams: List[Stream]) -> List[Dict[str, Any]]: return sorted([stream.to_dict() for stream in streams], key=lambda elt: elt["name"]) def default_stream_groups_to_dicts_sorted(groups: List[DefaultStreamGroup]) -> List[Dict[str, Any]]: return sorted([group.to_dict() for group in groups], key=lambda elt: elt["name"]) def do_update_user_activity_interval(user_profile: UserProfile, log_time: datetime.datetime) -> None: effective_end = log_time + UserActivityInterval.MIN_INTERVAL_LENGTH # This code isn't perfect, because with various races we might end # up creating two overlapping intervals, but that shouldn't happen # often, and can be corrected for in post-processing try: last = UserActivityInterval.objects.filter(user_profile=user_profile).order_by("-end")[0] # There are two ways our intervals could overlap: # (1) The start of the new interval could be inside the old interval # (2) The end of the new interval could be inside the old interval # In either case, we just extend the old interval to include the new interval. if ((log_time <= last.end and log_time >= last.start) or (effective_end <= last.end and effective_end >= last.start)): last.end = max(last.end, effective_end) last.start = min(last.start, log_time) last.save(update_fields=["start", "end"]) return except IndexError: pass # Otherwise, the intervals don't overlap, so we should make a new one UserActivityInterval.objects.create(user_profile=user_profile, start=log_time, end=effective_end) @statsd_increment('user_activity') def do_update_user_activity(user_profile_id: int, client_id: int, query: str, count: int, log_time: datetime.datetime) -> None: (activity, created) = UserActivity.objects.get_or_create( user_profile_id = user_profile_id, client_id = client_id, query = query, defaults={'last_visit': log_time, 'count': count}) if not created: activity.count += count activity.last_visit = log_time activity.save(update_fields=["last_visit", "count"]) def send_presence_changed(user_profile: UserProfile, presence: UserPresence) -> None: presence_dict = presence.to_dict() event = dict(type="presence", email=user_profile.email, user_id=user_profile.id, server_timestamp=time.time(), presence={presence_dict['client']: presence_dict}) send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id)) def consolidate_client(client: Client) -> Client: # The web app reports a client as 'website' # The desktop app reports a client as ZulipDesktop # due to it setting a custom user agent. We want both # to count as web users # Alias ZulipDesktop to website if client.name in ['ZulipDesktop']: return get_client('website') else: return client @statsd_increment('user_presence') def do_update_user_presence(user_profile: UserProfile, client: Client, log_time: datetime.datetime, status: int) -> None: client = consolidate_client(client) defaults = dict( timestamp=log_time, status=status, realm_id=user_profile.realm_id, ) (presence, created) = UserPresence.objects.get_or_create( user_profile = user_profile, client = client, defaults = defaults, ) stale_status = (log_time - presence.timestamp) > datetime.timedelta(minutes=1, seconds=10) was_idle = presence.status == UserPresence.IDLE became_online = (status == UserPresence.ACTIVE) and (stale_status or was_idle) # If an object was created, it has already been saved. # # We suppress changes from ACTIVE to IDLE before stale_status is reached; # this protects us from the user having two clients open: one active, the # other idle. Without this check, we would constantly toggle their status # between the two states. if not created and stale_status or was_idle or status == presence.status: # The following block attempts to only update the "status" # field in the event that it actually changed. This is # important to avoid flushing the UserPresence cache when the # data it would return to a client hasn't actually changed # (see the UserPresence post_save hook for details). presence.timestamp = log_time update_fields = ["timestamp"] if presence.status != status: presence.status = status update_fields.append("status") presence.save(update_fields=update_fields) if not user_profile.realm.presence_disabled and (created or became_online): # Push event to all users in the realm so they see the new user # appear in the presence list immediately, or the newly online # user without delay. Note that we won't send an update here for a # timestamp update, because we rely on the browser to ping us every 50 # seconds for realm-wide status updates, and those updates should have # recent timestamps, which means the browser won't think active users # have gone idle. If we were more aggressive in this function about # sending timestamp updates, we could eliminate the ping responses, but # that's not a high priority for now, considering that most of our non-MIT # realms are pretty small. send_presence_changed(user_profile, presence) def update_user_activity_interval(user_profile: UserProfile, log_time: datetime.datetime) -> None: event = {'user_profile_id': user_profile.id, 'time': datetime_to_timestamp(log_time)} queue_json_publish("user_activity_interval", event) def update_user_presence(user_profile: UserProfile, client: Client, log_time: datetime.datetime, status: int, new_user_input: bool) -> None: event = {'user_profile_id': user_profile.id, 'status': status, 'time': datetime_to_timestamp(log_time), 'client': client.name} queue_json_publish("user_presence", event) if new_user_input: update_user_activity_interval(user_profile, log_time) def do_update_user_status(user_profile: UserProfile, away: Optional[bool], status_text: Optional[str], client_id: int) -> None: if away: status = UserStatus.AWAY else: status = UserStatus.NORMAL realm = user_profile.realm update_user_status( user_profile_id=user_profile.id, status=status, status_text=status_text, client_id=client_id, ) event = dict( type='user_status', user_id=user_profile.id, ) if away is not None: event['away'] = away if status_text is not None: event['status_text'] = status_text send_event(realm, event, active_user_ids(realm.id)) def do_mark_all_as_read(user_profile: UserProfile, client: Client) -> int: log_statsd_event('bankruptcy') # First, we clear mobile push notifications. This is safer in the # event that the below logic times out and we're killed. all_push_message_ids = UserMessage.objects.filter( user_profile=user_profile, ).extra( where=[UserMessage.where_active_push_notification()], ).values_list("message_id", flat=True)[0:10000] do_clear_mobile_push_notifications_for_ids([user_profile.id], all_push_message_ids) msgs = UserMessage.objects.filter( user_profile=user_profile, ).extra( where=[UserMessage.where_unread()], ) count = msgs.update( flags=F('flags').bitor(UserMessage.flags.read), ) event = dict( type='update_message_flags', operation='add', flag='read', messages=[], # we don't send messages, since the client reloads anyway all=True, ) event_time = timezone_now() send_event(user_profile.realm, event, [user_profile.id]) do_increment_logging_stat(user_profile, COUNT_STATS['messages_read::hour'], None, event_time, increment=count) do_increment_logging_stat(user_profile, COUNT_STATS['messages_read_interactions::hour'], None, event_time, increment=min(1, count)) return count def do_mark_stream_messages_as_read(user_profile: UserProfile, client: Client, stream: Stream, topic_name: Optional[str]=None) -> int: log_statsd_event('mark_stream_as_read') msgs = UserMessage.objects.filter( user_profile=user_profile, ) recipient = stream.recipient msgs = msgs.filter(message__recipient=recipient) if topic_name: msgs = filter_by_topic_name_via_message( query=msgs, topic_name=topic_name, ) msgs = msgs.extra( where=[UserMessage.where_unread()], ) message_ids = list(msgs.values_list('message__id', flat=True)) count = msgs.update( flags=F('flags').bitor(UserMessage.flags.read), ) event = dict( type='update_message_flags', operation='add', flag='read', messages=message_ids, all=False, ) event_time = timezone_now() send_event(user_profile.realm, event, [user_profile.id]) do_clear_mobile_push_notifications_for_ids([user_profile.id], message_ids) do_increment_logging_stat(user_profile, COUNT_STATS['messages_read::hour'], None, event_time, increment=count) do_increment_logging_stat(user_profile, COUNT_STATS['messages_read_interactions::hour'], None, event_time, increment=min(1, count)) return count def do_update_mobile_push_notification(message: Message, prior_mention_user_ids: Set[int], stream_push_user_ids: Set[int]) -> None: # Called during the message edit code path to remove mobile push # notifications for users who are no longer mentioned following # the edit. See #15428 for details. # # A perfect implementation would also support updating the message # in a sent notification if a message was edited to mention a # group rather than a user (or vise versa), though it is likely # not worth the effort to do such a change. if not message.is_stream_message(): return remove_notify_users = prior_mention_user_ids - message.mentions_user_ids - stream_push_user_ids do_clear_mobile_push_notifications_for_ids(list(remove_notify_users), [message.id]) def do_clear_mobile_push_notifications_for_ids(user_profile_ids: List[int], message_ids: List[int]) -> None: if len(message_ids) == 0: return # This function supports clearing notifications for several users # only for the message-edit use case where we'll have a single message_id. assert len(user_profile_ids) == 1 or len(message_ids) == 1 messages_by_user = defaultdict(list) notifications_to_update = list(UserMessage.objects.filter( message_id__in=message_ids, user_profile_id__in=user_profile_ids, ).extra( where=[UserMessage.where_active_push_notification()], ).values_list('user_profile_id', 'message_id')) for (user_id, message_id) in notifications_to_update: messages_by_user[user_id].append(message_id) for (user_profile_id, event_message_ids) in messages_by_user.items(): queue_json_publish("missedmessage_mobile_notifications", { "type": "remove", "user_profile_id": user_profile_id, "message_ids": event_message_ids, }) def do_update_message_flags(user_profile: UserProfile, client: Client, operation: str, flag: str, messages: List[int]) -> int: valid_flags = [item for item in UserMessage.flags if item not in UserMessage.NON_API_FLAGS] if flag not in valid_flags: raise JsonableError(_("Invalid flag: '{}'").format(flag)) if flag in UserMessage.NON_EDITABLE_FLAGS: raise JsonableError(_("Flag not editable: '{}'").format(flag)) flagattr = getattr(UserMessage.flags, flag) msgs = UserMessage.objects.filter(user_profile=user_profile, message__id__in=messages) # This next block allows you to star any message, even those you # didn't receive (e.g. because you're looking at a public stream # you're not subscribed to, etc.). The problem is that starring # is a flag boolean on UserMessage, and UserMessage rows are # normally created only when you receive a message to support # searching your personal history. So we need to create one. We # add UserMessage.flags.historical, so that features that need # "messages you actually received" can exclude these UserMessages. if msgs.count() == 0: if not len(messages) == 1: raise JsonableError(_("Invalid message(s)")) if flag != "starred": raise JsonableError(_("Invalid message(s)")) # Validate that the user could have read the relevant message message = access_message(user_profile, messages[0])[0] # OK, this is a message that you legitimately have access # to via narrowing to the stream it is on, even though you # didn't actually receive it. So we create a historical, # read UserMessage message row for you to star. UserMessage.objects.create(user_profile=user_profile, message=message, flags=UserMessage.flags.historical | UserMessage.flags.read) if operation == 'add': count = msgs.update(flags=F('flags').bitor(flagattr)) elif operation == 'remove': count = msgs.update(flags=F('flags').bitand(~flagattr)) else: raise AssertionError("Invalid message flags operation") event = {'type': 'update_message_flags', 'operation': operation, 'flag': flag, 'messages': messages, 'all': False} send_event(user_profile.realm, event, [user_profile.id]) if flag == "read" and operation == "add": event_time = timezone_now() do_clear_mobile_push_notifications_for_ids([user_profile.id], messages) do_increment_logging_stat(user_profile, COUNT_STATS['messages_read::hour'], None, event_time, increment=count) do_increment_logging_stat(user_profile, COUNT_STATS['messages_read_interactions::hour'], None, event_time, increment=min(1, count)) return count class MessageUpdateUserInfoResult(TypedDict): message_user_ids: Set[int] mention_user_ids: Set[int] def notify_topic_moved_streams(user_profile: UserProfile, old_stream: Stream, old_topic: str, new_stream: Stream, new_topic: Optional[str], send_notification_to_old_thread: bool, send_notification_to_new_thread: bool) -> None: # Since moving content between streams is highly disruptive, # it's worth adding a couple tombstone messages showing what # happened. sender = get_system_bot(settings.NOTIFICATION_BOT) if new_topic is None: new_topic = old_topic user_mention = f"@_**{user_profile.full_name}|{user_profile.id}**" old_topic_link = f"#**{old_stream.name}>{old_topic}**" new_topic_link = f"#**{new_stream.name}>{new_topic}**" if send_notification_to_new_thread: with override_language(new_stream.realm.default_language): internal_send_stream_message( new_stream.realm, sender, new_stream, new_topic, _("This topic was moved here from {old_location} by {user}").format( old_location=old_topic_link, user=user_mention, ), ) if send_notification_to_old_thread: with override_language(old_stream.realm.default_language): # Send a notification to the old stream that the topic was moved. internal_send_stream_message( old_stream.realm, sender, old_stream, old_topic, _("This topic was moved by {user} to {new_location}").format( user=user_mention, new_location=new_topic_link, ), ) def get_user_info_for_message_updates(message_id: int) -> MessageUpdateUserInfoResult: # We exclude UserMessage.flags.historical rows since those # users did not receive the message originally, and thus # probably are not relevant for reprocessed alert_words, # mentions and similar rendering features. This may be a # decision we change in the future. query = UserMessage.objects.filter( message=message_id, flags=~UserMessage.flags.historical, ).values('user_profile_id', 'flags') rows = list(query) message_user_ids = { row['user_profile_id'] for row in rows } mask = UserMessage.flags.mentioned | UserMessage.flags.wildcard_mentioned mention_user_ids = { row['user_profile_id'] for row in rows if int(row['flags']) & mask } return dict( message_user_ids=message_user_ids, mention_user_ids=mention_user_ids, ) def update_user_message_flags(message: Message, ums: Iterable[UserMessage]) -> None: wildcard = message.mentions_wildcard mentioned_ids = message.mentions_user_ids ids_with_alert_words = message.user_ids_with_alert_words changed_ums: Set[UserMessage] = set() def update_flag(um: UserMessage, should_set: bool, flag: int) -> None: if should_set: if not (um.flags & flag): um.flags |= flag changed_ums.add(um) else: if (um.flags & flag): um.flags &= ~flag changed_ums.add(um) for um in ums: has_alert_word = um.user_profile_id in ids_with_alert_words update_flag(um, has_alert_word, UserMessage.flags.has_alert_word) mentioned = um.user_profile_id in mentioned_ids update_flag(um, mentioned, UserMessage.flags.mentioned) update_flag(um, wildcard, UserMessage.flags.wildcard_mentioned) for um in changed_ums: um.save(update_fields=['flags']) def update_to_dict_cache(changed_messages: List[Message], realm_id: Optional[int]=None) -> List[int]: """Updates the message as stored in the to_dict cache (for serving messages).""" items_for_remote_cache = {} message_ids = [] changed_messages_to_dict = MessageDict.to_dict_uncached(changed_messages, realm_id) for msg_id, msg in changed_messages_to_dict.items(): message_ids.append(msg_id) key = to_dict_cache_key_id(msg_id) items_for_remote_cache[key] = (msg,) cache_set_many(items_for_remote_cache) return message_ids # We use transaction.atomic to support select_for_update in the attachment codepath. @transaction.atomic def do_update_embedded_data(user_profile: UserProfile, message: Message, content: Optional[str], rendered_content: Optional[str]) -> None: event: Dict[str, Any] = { 'type': 'update_message', 'sender': user_profile.email, 'message_id': message.id} changed_messages = [message] ums = UserMessage.objects.filter(message=message.id) if content is not None: update_user_message_flags(message, ums) message.content = content message.rendered_content = rendered_content message.rendered_content_version = markdown_version event["content"] = content event["rendered_content"] = rendered_content message.save(update_fields=["content", "rendered_content"]) event['message_ids'] = update_to_dict_cache(changed_messages) def user_info(um: UserMessage) -> Dict[str, Any]: return { 'id': um.user_profile_id, 'flags': um.flags_list(), } send_event(user_profile.realm, event, list(map(user_info, ums))) class DeleteMessagesEvent(TypedDict, total=False): type: str message_ids: List[int] message_type: str sender_id: int recipient_id: int topic: str stream_id: int # We use transaction.atomic to support select_for_update in the attachment codepath. @transaction.atomic def do_update_message(user_profile: UserProfile, message: Message, new_stream: Optional[Stream], topic_name: Optional[str], propagate_mode: str, send_notification_to_old_thread: bool, send_notification_to_new_thread: bool, content: Optional[str], rendered_content: Optional[str], prior_mention_user_ids: Set[int], mention_user_ids: Set[int], mention_data: Optional[MentionData]=None) -> int: """ The main function for message editing. A message edit event can modify: * the message's content (in which case the caller will have set both content and rendered_content), * the topic, in which case the caller will have set topic_name * or both With topic edits, propagate_mode determines whether other message also have their topics edited. """ timestamp = timezone_now() message.last_edit_time = timestamp event: Dict[str, Any] = { 'type': 'update_message', 'user_id': user_profile.id, 'edit_timestamp': datetime_to_timestamp(timestamp), 'message_id': message.id, } edit_history_event: Dict[str, Any] = { 'user_id': user_profile.id, 'timestamp': event['edit_timestamp'], } changed_messages = [message] stream_being_edited = None if message.is_stream_message(): stream_id = message.recipient.type_id stream_being_edited = get_stream_by_id_in_realm(stream_id, user_profile.realm) event['stream_name'] = stream_being_edited.name ums = UserMessage.objects.filter(message=message.id) if content is not None: assert rendered_content is not None # mention_data is required if there's a content edit. assert mention_data is not None # add data from group mentions to mentions_user_ids. for group_id in message.mentions_user_group_ids: members = mention_data.get_group_members(group_id) message.mentions_user_ids.update(members) update_user_message_flags(message, ums) # One could imagine checking realm.allow_edit_history here and # modifying the events based on that setting, but doing so # doesn't really make sense. We need to send the edit event # to clients regardless, and a client already had access to # the original/pre-edit content of the message anyway. That # setting must be enforced on the client side, and making a # change here simply complicates the logic for clients parsing # edit history events. event['orig_content'] = message.content event['orig_rendered_content'] = message.rendered_content edit_history_event["prev_content"] = message.content edit_history_event["prev_rendered_content"] = message.rendered_content edit_history_event["prev_rendered_content_version"] = message.rendered_content_version message.content = content message.rendered_content = rendered_content message.rendered_content_version = markdown_version event["content"] = content event["rendered_content"] = rendered_content event['prev_rendered_content_version'] = message.rendered_content_version event['is_me_message'] = Message.is_status_message(content, rendered_content) # message.has_image and message.has_link will have been # already updated by markdown rendering in the caller. message.has_attachment = check_attachment_reference_change(message) if message.is_stream_message(): if topic_name is not None: new_topic_name = topic_name else: new_topic_name = message.topic_name() stream_topic: Optional[StreamTopicTarget] = StreamTopicTarget( stream_id=stream_id, topic_name=new_topic_name, ) else: stream_topic = None info = get_recipient_info( recipient=message.recipient, sender_id=message.sender_id, stream_topic=stream_topic, possible_wildcard_mention=mention_data.message_has_wildcards(), ) event['push_notify_user_ids'] = list(info['push_notify_user_ids']) event['stream_push_user_ids'] = list(info['stream_push_user_ids']) event['stream_email_user_ids'] = list(info['stream_email_user_ids']) event['prior_mention_user_ids'] = list(prior_mention_user_ids) event['mention_user_ids'] = list(mention_user_ids) event['presence_idle_user_ids'] = filter_presence_idle_user_ids(info['active_user_ids']) if message.mentions_wildcard: event['wildcard_mention_user_ids'] = list(info['wildcard_mention_user_ids']) else: event['wildcard_mention_user_ids'] = [] do_update_mobile_push_notification(message, prior_mention_user_ids, info['stream_push_user_ids']) if topic_name is not None or new_stream is not None: orig_topic_name = message.topic_name() event["propagate_mode"] = propagate_mode event["stream_id"] = message.recipient.type_id if new_stream is not None: assert content is None assert message.is_stream_message() assert stream_being_edited is not None edit_history_event['prev_stream'] = stream_being_edited.id event[ORIG_TOPIC] = orig_topic_name message.recipient_id = new_stream.recipient_id event["new_stream_id"] = new_stream.id event["propagate_mode"] = propagate_mode # When messages are moved from one stream to another, some # users may lose access to those messages, including guest # users and users not subscribed to the new stream (if it is a # private stream). For those users, their experience is as # though the messages were deleted, and we should send a # delete_message event to them instead. subscribers = get_active_subscriptions_for_stream_id( stream_id).select_related("user_profile") subs_to_new_stream = list(get_active_subscriptions_for_stream_id( new_stream.id).select_related("user_profile")) new_stream_sub_ids = [user.user_profile_id for user in subs_to_new_stream] # Get users who aren't subscribed to the new_stream. subs_losing_usermessages = [ sub for sub in subscribers if sub.user_profile_id not in new_stream_sub_ids ] # Users who can longer access the message without some action # from administrators. # # TODO: Extend this list to also contain users losing access # due to the messages moving to a private stream they are not # subscribed to. subs_losing_access = [ sub for sub in subs_losing_usermessages if sub.user_profile.is_guest ] ums = ums.exclude(user_profile_id__in=[ sub.user_profile_id for sub in subs_losing_usermessages]) if topic_name is not None: topic_name = truncate_topic(topic_name) message.set_topic_name(topic_name) # These fields have legacy field names. event[ORIG_TOPIC] = orig_topic_name event[TOPIC_NAME] = topic_name event[TOPIC_LINKS] = topic_links(message.sender.realm_id, topic_name) edit_history_event[LEGACY_PREV_TOPIC] = orig_topic_name delete_event_notify_user_ids: List[int] = [] if propagate_mode in ["change_later", "change_all"]: assert topic_name is not None or new_stream is not None messages_list = update_messages_for_topic_edit( message=message, propagate_mode=propagate_mode, orig_topic_name=orig_topic_name, topic_name=topic_name, new_stream=new_stream, ) changed_messages += messages_list if new_stream is not None: assert stream_being_edited is not None message_ids = [msg.id for msg in changed_messages] # Delete UserMessage objects for users who will no # longer have access to these messages. Note: This could be # very expensive, since it's N guest users x M messages. UserMessage.objects.filter( user_profile_id__in=[sub.user_profile_id for sub in subs_losing_usermessages], message_id__in=message_ids, ).delete() delete_event: DeleteMessagesEvent = { 'type': 'delete_message', 'message_ids': message_ids, 'message_type': 'stream', 'stream_id': stream_being_edited.id, 'topic': orig_topic_name, } delete_event_notify_user_ids = [sub.user_profile_id for sub in subs_losing_access] send_event(user_profile.realm, delete_event, delete_event_notify_user_ids) if message.edit_history is not None: edit_history = ujson.loads(message.edit_history) edit_history.insert(0, edit_history_event) else: edit_history = [edit_history_event] message.edit_history = ujson.dumps(edit_history) # This does message.save(update_fields=[...]) save_message_for_edit_use_case(message=message) realm_id: Optional[int] = None if stream_being_edited is not None: realm_id = stream_being_edited.realm_id event['message_ids'] = update_to_dict_cache(changed_messages, realm_id) def user_info(um: UserMessage) -> Dict[str, Any]: return { 'id': um.user_profile_id, 'flags': um.flags_list(), } # The following blocks arranges that users who are subscribed to a # stream and can see history from before they subscribed get # live-update when old messages are edited (e.g. if the user does # a topic edit themself). # # We still don't send an update event to users who are not # subscribed to this stream and don't have a UserMessage row. This # means if a non-subscriber is viewing the narrow, they won't get # a real-time updates. This is a balance between sending # message-edit notifications for every public stream to every user # in the organization (too expansive, and also not what we do for # newly sent messages anyway) and having magical live-updates # where possible. users_to_be_notified = list(map(user_info, ums)) if stream_being_edited is not None: if stream_being_edited.is_history_public_to_subscribers: subscribers = get_active_subscriptions_for_stream_id(stream_id) # We exclude long-term idle users, since they by # definition have no active clients. subscribers = subscribers.exclude(user_profile__long_term_idle=True) # Remove duplicates by excluding the id of users already # in users_to_be_notified list. This is the case where a # user both has a UserMessage row and is a current # Subscriber subscribers = subscribers.exclude(user_profile_id__in=[um.user_profile_id for um in ums]) if new_stream is not None: assert delete_event_notify_user_ids is not None subscribers = subscribers.exclude(user_profile_id__in=delete_event_notify_user_ids) # All users that are subscribed to the stream must be # notified when a message is edited subscriber_ids = [user.user_profile_id for user in subscribers] if new_stream is not None: # TODO: Guest users don't see the new moved topic # unless breadcrumb message for new stream is # enabled. Excluding these users from receiving this # event helps us avoid a error trackeback for our # clients. We should figure out a way to inform the # guest users of this new topic if sending a 'message' # event for these messages is not an option. # # Don't send this event to guest subs who are not # subscribers of the old stream but are subscribed to # the new stream; clients will be confused. old_stream_unsubbed_guests = [ sub for sub in subs_to_new_stream if sub.user_profile.is_guest and sub.user_profile_id not in subscriber_ids ] subscribers = subscribers.exclude(user_profile_id__in=[ sub.user_profile_id for sub in old_stream_unsubbed_guests]) subscriber_ids = [user.user_profile_id for user in subscribers] users_to_be_notified += list(map(subscriber_info, subscriber_ids)) send_event(user_profile.realm, event, users_to_be_notified) if (len(changed_messages) > 0 and new_stream is not None and stream_being_edited is not None): # Notify users that the topic was moved. notify_topic_moved_streams(user_profile, stream_being_edited, orig_topic_name, new_stream, topic_name, send_notification_to_old_thread, send_notification_to_new_thread) return len(changed_messages) def do_delete_messages(realm: Realm, messages: Iterable[Message]) -> None: # messages in delete_message event belong to the same topic # or is a single private message, as any other behaviour is not possible with # the current callers to this method. messages = list(messages) message_ids = [message.id for message in messages] if not message_ids: return event: DeleteMessagesEvent = { 'type': 'delete_message', 'message_ids': message_ids, } sample_message = messages[0] message_type = "stream" users_to_notify = [] if not sample_message.is_stream_message(): assert len(messages) == 1 message_type = "private" ums = UserMessage.objects.filter(message_id__in=message_ids) users_to_notify = [um.user_profile_id for um in ums] # TODO: We should plan to remove `sender_id` here. event['recipient_id'] = sample_message.recipient_id event['sender_id'] = sample_message.sender_id archiving_chunk_size = retention.MESSAGE_BATCH_SIZE if message_type == "stream": stream_id = sample_message.recipient.type_id event['stream_id'] = stream_id event['topic'] = sample_message.topic_name() subscribers = get_active_subscriptions_for_stream_id(stream_id) # We exclude long-term idle users, since they by definition have no active clients. subscribers = subscribers.exclude(user_profile__long_term_idle=True) subscriber_ids = [user.user_profile_id for user in subscribers] users_to_notify = list(map(subscriber_info, subscriber_ids)) archiving_chunk_size = retention.STREAM_MESSAGE_BATCH_SIZE move_messages_to_archive(message_ids, realm=realm, chunk_size=archiving_chunk_size) event['message_type'] = message_type send_event(realm, event, users_to_notify) def do_delete_messages_by_sender(user: UserProfile) -> None: message_ids = list(Message.objects.filter(sender=user).values_list('id', flat=True).order_by('id')) if message_ids: move_messages_to_archive(message_ids, chunk_size=retention.STREAM_MESSAGE_BATCH_SIZE) def get_streams_traffic(stream_ids: Set[int]) -> Dict[int, int]: stat = COUNT_STATS['messages_in_stream:is_bot:day'] traffic_from = timezone_now() - datetime.timedelta(days=28) query = StreamCount.objects.filter(property=stat.property, end_time__gt=traffic_from) query = query.filter(stream_id__in=stream_ids) traffic_list = query.values('stream_id').annotate(value=Sum('value')) traffic_dict = {} for traffic in traffic_list: traffic_dict[traffic["stream_id"]] = traffic["value"] return traffic_dict def round_to_2_significant_digits(number: int) -> int: return int(round(number, 2 - len(str(number)))) STREAM_TRAFFIC_CALCULATION_MIN_AGE_DAYS = 7 def get_average_weekly_stream_traffic(stream_id: int, stream_date_created: datetime.datetime, recent_traffic: Dict[int, int]) -> Optional[int]: try: stream_traffic = recent_traffic[stream_id] except KeyError: stream_traffic = 0 stream_age = (timezone_now() - stream_date_created).days if stream_age >= 28: average_weekly_traffic = int(stream_traffic // 4) elif stream_age >= STREAM_TRAFFIC_CALCULATION_MIN_AGE_DAYS: average_weekly_traffic = int(stream_traffic * 7 // stream_age) else: return None if average_weekly_traffic == 0 and stream_traffic > 0: average_weekly_traffic = 1 return round_to_2_significant_digits(average_weekly_traffic) SubHelperT = Tuple[List[Dict[str, Any]], List[Dict[str, Any]], List[Dict[str, Any]]] def get_web_public_subs(realm: Realm) -> SubHelperT: color_idx = 0 def get_next_color() -> str: nonlocal color_idx color = STREAM_ASSIGNMENT_COLORS[color_idx] color_idx = (color_idx + 1) % len(STREAM_ASSIGNMENT_COLORS) return color subscribed = [] for stream in Stream.objects.filter(realm=realm, is_web_public=True, deactivated=False): stream_dict = stream.to_dict() # Add versions of the Subscription fields based on a simulated # new user subscription set. stream_dict['is_muted'] = False stream_dict['color'] = get_next_color() stream_dict['desktop_notifications'] = True stream_dict['audible_notifications'] = True stream_dict['push_notifications'] = True stream_dict['email_notifications'] = True stream_dict['pin_to_top'] = False stream_weekly_traffic = get_average_weekly_stream_traffic(stream.id, stream.date_created, {}) stream_dict['stream_weekly_traffic'] = stream_weekly_traffic stream_dict['email_address'] = '' subscribed.append(stream_dict) return (subscribed, [], []) # In general, it's better to avoid using .values() because it makes # the code pretty ugly, but in this case, it has significant # performance impact for loading / for users with large numbers of # subscriptions, so it's worth optimizing. def gather_subscriptions_helper(user_profile: UserProfile, include_subscribers: bool=True) -> SubHelperT: sub_dicts = get_stream_subscriptions_for_user(user_profile).values( *Subscription.API_FIELDS, "recipient_id").order_by("recipient_id") sub_dicts = list(sub_dicts) sub_recipient_ids = [ sub['recipient_id'] for sub in sub_dicts ] stream_recipient = StreamRecipientMap() stream_recipient.populate_for_recipient_ids(sub_recipient_ids) stream_ids: Set[int] = set() for sub in sub_dicts: sub['stream_id'] = stream_recipient.stream_id_for(sub['recipient_id']) stream_ids.add(sub['stream_id']) recent_traffic = get_streams_traffic(stream_ids=stream_ids) all_streams = get_active_streams(user_profile.realm).select_related( "realm").values( *Stream.API_FIELDS, # date_created is used as an input for the stream_weekly_traffic computed field. "date_created", # The realm_id and recipient_id are generally not needed in the API. "realm_id", "recipient_id", # email_token isn't public to some users with access to # the stream, so doesn't belong in API_FIELDS. "email_token") stream_dicts = [stream for stream in all_streams if stream['id'] in stream_ids] stream_hash = {} for stream in stream_dicts: stream_hash[stream["id"]] = stream all_streams_id = [stream["id"] for stream in all_streams] subscribed = [] unsubscribed = [] never_subscribed = [] # Deactivated streams aren't in stream_hash. streams = [stream_hash[sub["stream_id"]] for sub in sub_dicts if sub["stream_id"] in stream_hash] streams_subscribed_map = {sub["stream_id"]: sub["active"] for sub in sub_dicts} # Add never subscribed streams to streams_subscribed_map streams_subscribed_map.update({stream['id']: False for stream in all_streams if stream not in streams}) if include_subscribers: subscriber_map: Mapping[int, Optional[List[int]]] = bulk_get_subscriber_user_ids( all_streams, user_profile, streams_subscribed_map, stream_recipient, ) else: # If we're not including subscribers, always return None, # which the below code needs to check for anyway. subscriber_map = defaultdict(lambda: None) sub_unsub_stream_ids = set() for sub in sub_dicts: sub_unsub_stream_ids.add(sub["stream_id"]) stream = stream_hash.get(sub["stream_id"]) if not stream: # This stream has been deactivated, don't include it. continue # We first construct a dictionary based on the standard Stream # and Subscription models' API_FIELDS. stream_dict = {} for field_name in Stream.API_FIELDS: if field_name == "id": stream_dict['stream_id'] = stream["id"] continue stream_dict[field_name] = stream[field_name] # Copy Subscription.API_FIELDS except for "active", which is # used to determine where to the put the field. for field_name in Subscription.API_FIELDS: stream_dict[field_name] = sub[field_name] # Backwards-compatibility for clients that haven't been # updated for the in_home_view => is_muted API migration. stream_dict['in_home_view'] = not stream_dict['is_muted'] # Backwards-compatibility for clients that haven't been # updated for the is_announcement_only -> stream_post_policy # migration. stream_dict['is_announcement_only'] = \ stream['stream_post_policy'] == Stream.STREAM_POST_POLICY_ADMINS # Add a few computed fields not directly from the data models. stream_dict['stream_weekly_traffic'] = get_average_weekly_stream_traffic( stream["id"], stream["date_created"], recent_traffic) stream_dict['email_address'] = encode_email_address_helper( stream["name"], stream["email_token"], show_sender=True) # Construct and add subscribers data subscribers: Optional[List[int]] = subscriber_map[stream["id"]] # Important: don't show the subscribers if the stream is invite only # and this user isn't on it anymore (or a realm administrator). if stream["invite_only"] and not (sub["active"] or user_profile.is_realm_admin): subscribers = None # Guest users lose access to subscribers when they are unsubscribed. if not sub["active"] and user_profile.is_guest: subscribers = None if subscribers is not None: stream_dict['subscribers'] = subscribers # is_active is represented in this structure by which list we include it in. is_active = stream_dict.pop("active") if is_active: subscribed.append(stream_dict) else: unsubscribed.append(stream_dict) all_streams_id_set = set(all_streams_id) if user_profile.can_access_public_streams(): never_subscribed_stream_ids = all_streams_id_set - sub_unsub_stream_ids else: never_subscribed_stream_ids = set() never_subscribed_streams = [ns_stream_dict for ns_stream_dict in all_streams if ns_stream_dict['id'] in never_subscribed_stream_ids] for stream in never_subscribed_streams: is_public = (not stream['invite_only']) if is_public or user_profile.is_realm_admin: stream_dict = {} for field_name in Stream.API_FIELDS: if field_name == "id": stream_dict['stream_id'] = stream["id"] continue stream_dict[field_name] = stream[field_name] stream_dict['stream_weekly_traffic'] = get_average_weekly_stream_traffic( stream["id"], stream["date_created"], recent_traffic) # Backwards-compatibility addition of removed field. stream_dict['is_announcement_only'] = \ stream['stream_post_policy'] == Stream.STREAM_POST_POLICY_ADMINS if is_public or user_profile.is_realm_admin: subscribers = subscriber_map[stream["id"]] if subscribers is not None: stream_dict['subscribers'] = subscribers never_subscribed.append(stream_dict) return (sorted(subscribed, key=lambda x: x['name']), sorted(unsubscribed, key=lambda x: x['name']), sorted(never_subscribed, key=lambda x: x['name'])) def gather_subscriptions( user_profile: UserProfile, include_subscribers: bool=False, ) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]: subscribed, unsubscribed, _ = gather_subscriptions_helper( user_profile, include_subscribers=include_subscribers) if include_subscribers: user_ids = set() for subs in [subscribed, unsubscribed]: for sub in subs: if 'subscribers' in sub: for subscriber in sub['subscribers']: user_ids.add(subscriber) email_dict = get_emails_from_user_ids(list(user_ids)) for subs in [subscribed, unsubscribed]: for sub in subs: if 'subscribers' in sub: sub['subscribers'] = sorted([ email_dict[user_id] for user_id in sub['subscribers'] ]) return (subscribed, unsubscribed) def get_active_presence_idle_user_ids(realm: Realm, sender_id: int, message_type: str, active_user_ids: Set[int], user_flags: Dict[int, List[str]]) -> List[int]: ''' Given a list of active_user_ids, we build up a subset of those users who fit these criteria: * They are likely to need notifications (either due to mentions, alert words, or being PM'ed). * They are no longer "present" according to the UserPresence table. ''' if realm.presence_disabled: return [] is_pm = message_type == 'private' user_ids = set() for user_id in active_user_ids: flags: Iterable[str] = user_flags.get(user_id, []) mentioned = 'mentioned' in flags or 'wildcard_mentioned' in flags private_message = is_pm and user_id != sender_id alerted = 'has_alert_word' in flags if mentioned or private_message or alerted: user_ids.add(user_id) return filter_presence_idle_user_ids(user_ids) def filter_presence_idle_user_ids(user_ids: Set[int]) -> List[int]: # Given a set of user IDs (the recipients of a message), accesses # the UserPresence table to determine which of these users are # currently idle and should potentially get email notifications # (and push notifications with with # user_profile.enable_online_push_notifications=False). # # We exclude any presence data from ZulipMobile for the purpose of # triggering these notifications; the mobile app can more # effectively do its own client-side filtering of notification # sounds/etc. for the case that the user is actively doing a PM # conversation in the app. if not user_ids: return [] # Matches presence.js constant OFFLINE_THRESHOLD_SECS = 140 recent = timezone_now() - datetime.timedelta(seconds=OFFLINE_THRESHOLD_SECS) rows = UserPresence.objects.filter( user_profile_id__in=user_ids, status=UserPresence.ACTIVE, timestamp__gte=recent, ).exclude(client__name="ZulipMobile").distinct('user_profile_id').values('user_profile_id') active_user_ids = {row['user_profile_id'] for row in rows} idle_user_ids = user_ids - active_user_ids return sorted(list(idle_user_ids)) def do_send_confirmation_email(invitee: PreregistrationUser, referrer: UserProfile) -> str: """ Send the confirmation/welcome e-mail to an invited user. """ activation_url = create_confirmation_link(invitee, Confirmation.INVITATION) context = {'referrer_full_name': referrer.full_name, 'referrer_email': referrer.delivery_email, 'activate_url': activation_url, 'referrer_realm_name': referrer.realm.name} from_name = f"{referrer.full_name} (via Zulip)" send_email('zerver/emails/invitation', to_emails=[invitee.email], from_name=from_name, from_address=FromAddress.tokenized_no_reply_address(), language=referrer.realm.default_language, context=context, realm=referrer.realm) return activation_url def email_not_system_bot(email: str) -> None: if is_cross_realm_bot_email(email): msg = email_reserved_for_system_bots_error(email) code = msg raise ValidationError( msg, code=code, params=dict(deactivated=False), ) class InvitationError(JsonableError): code = ErrorCode.INVITATION_FAILED data_fields = ['errors', 'sent_invitations'] def __init__(self, msg: str, errors: List[Tuple[str, str, bool]], sent_invitations: bool) -> None: self._msg: str = msg self.errors: List[Tuple[str, str, bool]] = errors self.sent_invitations: bool = sent_invitations def estimate_recent_invites(realms: Iterable[Realm], *, days: int) -> int: '''An upper bound on the number of invites sent in the last `days` days''' recent_invites = RealmCount.objects.filter( realm__in=realms, property='invites_sent::day', end_time__gte=timezone_now() - datetime.timedelta(days=days), ).aggregate(Sum('value'))['value__sum'] if recent_invites is None: return 0 return recent_invites def check_invite_limit(realm: Realm, num_invitees: int) -> None: '''Discourage using invitation emails as a vector for carrying spam.''' msg = _("You do not have enough remaining invites. " "Please contact {email} to have your limit raised. " "No invitations were sent.").format(email=settings.ZULIP_ADMINISTRATOR) if not settings.OPEN_REALM_CREATION: return recent_invites = estimate_recent_invites([realm], days=1) if num_invitees + recent_invites > realm.max_invites: raise InvitationError(msg, [], sent_invitations=False) default_max = settings.INVITES_DEFAULT_REALM_DAILY_MAX newrealm_age = datetime.timedelta(days=settings.INVITES_NEW_REALM_DAYS) if realm.date_created <= timezone_now() - newrealm_age: # If this isn't a "newly-created" realm, we're done. The # remaining code applies an aggregate limit across all # "new" realms, to address sudden bursts of spam realms. return if realm.max_invites > default_max: # If a user is on a realm where we've bumped up # max_invites, then we exempt them from invite limits. return new_realms = Realm.objects.filter( date_created__gte=timezone_now() - newrealm_age, _max_invites__lte=default_max, ).all() for days, count in settings.INVITES_NEW_REALM_LIMIT_DAYS: recent_invites = estimate_recent_invites(new_realms, days=days) if num_invitees + recent_invites > count: raise InvitationError(msg, [], sent_invitations=False) def do_invite_users(user_profile: UserProfile, invitee_emails: SizedTextIterable, streams: Iterable[Stream], invite_as: int=PreregistrationUser.INVITE_AS['MEMBER']) -> None: check_invite_limit(user_profile.realm, len(invitee_emails)) realm = user_profile.realm if not realm.invite_required: # Inhibit joining an open realm to send spam invitations. min_age = datetime.timedelta(days=settings.INVITES_MIN_USER_AGE_DAYS) if (user_profile.date_joined > timezone_now() - min_age and not user_profile.is_realm_admin): raise InvitationError( _("Your account is too new to send invites for this organization. " "Ask an organization admin, or a more experienced user."), [], sent_invitations=False) good_emails: Set[str] = set() errors: List[Tuple[str, str, bool]] = [] validate_email_allowed_in_realm = get_realm_email_validator(user_profile.realm) for email in invitee_emails: if email == '': continue email_error = validate_email_is_valid( email, validate_email_allowed_in_realm, ) if email_error: errors.append((email, email_error, False)) else: good_emails.add(email) ''' good_emails are emails that look ok so far, but we still need to make sure they're not gonna conflict with existing users ''' error_dict = get_existing_user_errors(user_profile.realm, good_emails) skipped: List[Tuple[str, str, bool]] = [] for email in error_dict: msg, deactivated = error_dict[email] skipped.append((email, msg, deactivated)) good_emails.remove(email) validated_emails = list(good_emails) if errors: raise InvitationError( _("Some emails did not validate, so we didn't send any invitations."), errors + skipped, sent_invitations=False) if skipped and len(skipped) == len(invitee_emails): # All e-mails were skipped, so we didn't actually invite anyone. raise InvitationError(_("We weren't able to invite anyone."), skipped, sent_invitations=False) # We do this here rather than in the invite queue processor since this # is used for rate limiting invitations, rather than keeping track of # when exactly invitations were sent do_increment_logging_stat(user_profile.realm, COUNT_STATS['invites_sent::day'], None, timezone_now(), increment=len(validated_emails)) # Now that we are past all the possible errors, we actually create # the PreregistrationUser objects and trigger the email invitations. for email in validated_emails: # The logged in user is the referrer. prereg_user = PreregistrationUser(email=email, referred_by=user_profile, invited_as=invite_as, realm=user_profile.realm) prereg_user.save() stream_ids = [stream.id for stream in streams] prereg_user.streams.set(stream_ids) event = {"prereg_id": prereg_user.id, "referrer_id": user_profile.id} queue_json_publish("invites", event) if skipped: raise InvitationError(_("Some of those addresses are already using Zulip, " "so we didn't send them an invitation. We did send " "invitations to everyone else!"), skipped, sent_invitations=True) notify_invites_changed(user_profile) def do_get_user_invites(user_profile: UserProfile) -> List[Dict[str, Any]]: if user_profile.is_realm_admin: prereg_users = filter_to_valid_prereg_users( PreregistrationUser.objects.filter(referred_by__realm=user_profile.realm) ) else: prereg_users = filter_to_valid_prereg_users( PreregistrationUser.objects.filter(referred_by=user_profile) ) invites = [] for invitee in prereg_users: invites.append(dict(email=invitee.email, invited_by_user_id=invitee.referred_by.id, invited=datetime_to_timestamp(invitee.invited_at), id=invitee.id, invited_as=invitee.invited_as, is_multiuse=False)) if not user_profile.is_realm_admin: # We do not return multiuse invites to non-admin users. return invites lowest_datetime = timezone_now() - datetime.timedelta(days=settings.INVITATION_LINK_VALIDITY_DAYS) multiuse_confirmation_objs = Confirmation.objects.filter(realm=user_profile.realm, type=Confirmation.MULTIUSE_INVITE, date_sent__gte=lowest_datetime) for confirmation_obj in multiuse_confirmation_objs: invite = confirmation_obj.content_object invites.append(dict(invited_by_user_id=invite.referred_by.id, invited=datetime_to_timestamp(confirmation_obj.date_sent), id=invite.id, link_url=confirmation_url(confirmation_obj.confirmation_key, user_profile.realm, Confirmation.MULTIUSE_INVITE), invited_as=invite.invited_as, is_multiuse=True)) return invites def do_create_multiuse_invite_link(referred_by: UserProfile, invited_as: int, streams: Sequence[Stream] = []) -> str: realm = referred_by.realm invite = MultiuseInvite.objects.create(realm=realm, referred_by=referred_by) if streams: invite.streams.set(streams) invite.invited_as = invited_as invite.save() notify_invites_changed(referred_by) return create_confirmation_link(invite, Confirmation.MULTIUSE_INVITE) def do_revoke_user_invite(prereg_user: PreregistrationUser) -> None: email = prereg_user.email # Delete both the confirmation objects and the prereg_user object. # TODO: Probably we actually want to set the confirmation objects # to a "revoked" status so that we can give the invited user a better # error message. content_type = ContentType.objects.get_for_model(PreregistrationUser) Confirmation.objects.filter(content_type=content_type, object_id=prereg_user.id).delete() prereg_user.delete() clear_scheduled_invitation_emails(email) notify_invites_changed(prereg_user) def do_revoke_multi_use_invite(multiuse_invite: MultiuseInvite) -> None: content_type = ContentType.objects.get_for_model(MultiuseInvite) Confirmation.objects.filter(content_type=content_type, object_id=multiuse_invite.id).delete() multiuse_invite.delete() notify_invites_changed(multiuse_invite.referred_by) def do_resend_user_invite_email(prereg_user: PreregistrationUser) -> int: # These are two structurally for the caller's code path. assert prereg_user.referred_by is not None assert prereg_user.realm is not None check_invite_limit(prereg_user.referred_by.realm, 1) prereg_user.invited_at = timezone_now() prereg_user.save() do_increment_logging_stat(prereg_user.realm, COUNT_STATS['invites_sent::day'], None, prereg_user.invited_at) clear_scheduled_invitation_emails(prereg_user.email) # We don't store the custom email body, so just set it to None event = {"prereg_id": prereg_user.id, "referrer_id": prereg_user.referred_by.id, "email_body": None} queue_json_publish("invites", event) return datetime_to_timestamp(prereg_user.invited_at) def notify_realm_emoji(realm: Realm) -> None: event = dict(type="realm_emoji", op="update", realm_emoji=realm.get_emoji()) send_event(realm, event, active_user_ids(realm.id)) def check_add_realm_emoji(realm: Realm, name: str, author: UserProfile, image_file: File) -> Optional[RealmEmoji]: realm_emoji = RealmEmoji(realm=realm, name=name, author=author) realm_emoji.full_clean() realm_emoji.save() emoji_file_name = get_emoji_file_name(image_file.name, realm_emoji.id) # The only user-controlled portion of 'emoji_file_name' is an extension, # which can not contain '..' or '/' or '\', making it difficult to exploit emoji_file_name = mark_sanitized(emoji_file_name) emoji_uploaded_successfully = False try: upload_emoji_image(image_file, emoji_file_name, author) emoji_uploaded_successfully = True finally: if not emoji_uploaded_successfully: realm_emoji.delete() return None else: realm_emoji.file_name = emoji_file_name realm_emoji.save(update_fields=['file_name']) notify_realm_emoji(realm_emoji.realm) return realm_emoji def do_remove_realm_emoji(realm: Realm, name: str) -> None: emoji = RealmEmoji.objects.get(realm=realm, name=name, deactivated=False) emoji.deactivated = True emoji.save(update_fields=['deactivated']) notify_realm_emoji(realm) def notify_alert_words(user_profile: UserProfile, words: Iterable[str]) -> None: event = dict(type="alert_words", alert_words=words) send_event(user_profile.realm, event, [user_profile.id]) def do_add_alert_words(user_profile: UserProfile, alert_words: Iterable[str]) -> None: words = add_user_alert_words(user_profile, alert_words) notify_alert_words(user_profile, words) def do_remove_alert_words(user_profile: UserProfile, alert_words: Iterable[str]) -> None: words = remove_user_alert_words(user_profile, alert_words) notify_alert_words(user_profile, words) def do_mute_topic(user_profile: UserProfile, stream: Stream, recipient: Recipient, topic: str, date_muted: Optional[datetime.datetime]=None) -> None: if date_muted is None: date_muted = timezone_now() add_topic_mute(user_profile, stream.id, recipient.id, topic, date_muted) event = dict(type="muted_topics", muted_topics=get_topic_mutes(user_profile)) send_event(user_profile.realm, event, [user_profile.id]) def do_unmute_topic(user_profile: UserProfile, stream: Stream, topic: str) -> None: remove_topic_mute(user_profile, stream.id, topic) event = dict(type="muted_topics", muted_topics=get_topic_mutes(user_profile)) send_event(user_profile.realm, event, [user_profile.id]) def do_mark_hotspot_as_read(user: UserProfile, hotspot: str) -> None: UserHotspot.objects.get_or_create(user=user, hotspot=hotspot) event = dict(type="hotspots", hotspots=get_next_hotspots(user)) send_event(user.realm, event, [user.id]) def notify_realm_filters(realm: Realm) -> None: realm_filters = realm_filters_for_realm(realm.id) event = dict(type="realm_filters", realm_filters=realm_filters) send_event(realm, event, active_user_ids(realm.id)) # NOTE: Regexes must be simple enough that they can be easily translated to JavaScript # RegExp syntax. In addition to JS-compatible syntax, the following features are available: # * Named groups will be converted to numbered groups automatically # * Inline-regex flags will be stripped, and where possible translated to RegExp-wide flags def do_add_realm_filter(realm: Realm, pattern: str, url_format_string: str) -> int: pattern = pattern.strip() url_format_string = url_format_string.strip() realm_filter = RealmFilter( realm=realm, pattern=pattern, url_format_string=url_format_string) realm_filter.full_clean() realm_filter.save() notify_realm_filters(realm) return realm_filter.id def do_remove_realm_filter(realm: Realm, pattern: Optional[str]=None, id: Optional[int]=None) -> None: if pattern is not None: RealmFilter.objects.get(realm=realm, pattern=pattern).delete() else: RealmFilter.objects.get(realm=realm, pk=id).delete() notify_realm_filters(realm) def get_emails_from_user_ids(user_ids: Sequence[int]) -> Dict[int, str]: # We may eventually use memcached to speed this up, but the DB is fast. return UserProfile.emails_from_ids(user_ids) def do_add_realm_domain(realm: Realm, domain: str, allow_subdomains: bool) -> (RealmDomain): realm_domain = RealmDomain.objects.create(realm=realm, domain=domain, allow_subdomains=allow_subdomains) event = dict(type="realm_domains", op="add", realm_domain=dict(domain=realm_domain.domain, allow_subdomains=realm_domain.allow_subdomains)) send_event(realm, event, active_user_ids(realm.id)) return realm_domain def do_change_realm_domain(realm_domain: RealmDomain, allow_subdomains: bool) -> None: realm_domain.allow_subdomains = allow_subdomains realm_domain.save(update_fields=['allow_subdomains']) event = dict(type="realm_domains", op="change", realm_domain=dict(domain=realm_domain.domain, allow_subdomains=realm_domain.allow_subdomains)) send_event(realm_domain.realm, event, active_user_ids(realm_domain.realm_id)) def do_remove_realm_domain(realm_domain: RealmDomain, acting_user: Optional[UserProfile]=None) -> None: realm = realm_domain.realm domain = realm_domain.domain realm_domain.delete() if RealmDomain.objects.filter(realm=realm).count() == 0 and realm.emails_restricted_to_domains: # If this was the last realm domain, we mark the realm as no # longer restricted to domain, because the feature doesn't do # anything if there are no domains, and this is probably less # confusing than the alternative. do_set_realm_property(realm, 'emails_restricted_to_domains', False, acting_user=acting_user) event = dict(type="realm_domains", op="remove", domain=domain) send_event(realm, event, active_user_ids(realm.id)) def get_occupied_streams(realm: Realm) -> QuerySet: # TODO: Make a generic stub for QuerySet """ Get streams with subscribers """ exists_expression = Exists( Subscription.objects.filter(active=True, user_profile__is_active=True, user_profile__realm=realm, recipient_id=OuterRef('recipient_id')), ) occupied_streams = Stream.objects.filter(realm=realm, deactivated=False) \ .annotate(occupied=exists_expression).filter(occupied=True) return occupied_streams def get_web_public_streams(realm: Realm) -> List[Dict[str, Any]]: query = Stream.objects.filter(realm=realm, deactivated=False, is_web_public=True) streams = Stream.get_client_data(query) return streams def do_get_streams( user_profile: UserProfile, include_public: bool=True, include_subscribed: bool=True, include_all_active: bool=False, include_default: bool=False, include_owner_subscribed: bool=False, ) -> List[Dict[str, Any]]: if include_all_active and not user_profile.is_api_super_user: raise JsonableError(_("User not authorized for this query")) include_public = include_public and user_profile.can_access_public_streams() # Start out with all streams in the realm with subscribers query = get_occupied_streams(user_profile.realm) if include_all_active: streams = Stream.get_client_data(query) else: # We construct a query as the or (|) of the various sources # this user requested streams from. query_filter: Optional[Q] = None def add_filter_option(option: Q) -> None: nonlocal query_filter if query_filter is None: query_filter = option else: query_filter |= option if include_subscribed: subscribed_stream_ids = get_subscribed_stream_ids_for_user(user_profile) recipient_check = Q(id__in=set(subscribed_stream_ids)) add_filter_option(recipient_check) if include_public: invite_only_check = Q(invite_only=False) add_filter_option(invite_only_check) if include_owner_subscribed and user_profile.is_bot: bot_owner = user_profile.bot_owner assert bot_owner is not None owner_stream_ids = get_subscribed_stream_ids_for_user(bot_owner) owner_subscribed_check = Q(id__in=set(owner_stream_ids)) add_filter_option(owner_subscribed_check) if query_filter is not None: query = query.filter(query_filter) streams = Stream.get_client_data(query) else: # Don't bother going to the database with no valid sources streams = [] streams.sort(key=lambda elt: elt["name"]) if include_default: is_default = {} default_streams = get_default_streams_for_realm(user_profile.realm_id) for default_stream in default_streams: is_default[default_stream.id] = True for stream in streams: stream['is_default'] = is_default.get(stream["stream_id"], False) return streams def notify_attachment_update(user_profile: UserProfile, op: str, attachment_dict: Dict[str, Any]) -> None: event = { 'type': 'attachment', 'op': op, 'attachment': attachment_dict, "upload_space_used": user_profile.realm.currently_used_upload_space_bytes(), } send_event(user_profile.realm, event, [user_profile.id]) def do_claim_attachments(message: Message, potential_path_ids: List[str]) -> bool: claimed = False for path_id in potential_path_ids: user_profile = message.sender is_message_realm_public = False if message.is_stream_message(): is_message_realm_public = Stream.objects.get(id=message.recipient.type_id).is_public() if not validate_attachment_request(user_profile, path_id): # Technically, there are 2 cases here: # * The user put something in their message that has the form # of an upload, but doesn't correspond to a file that doesn't # exist. validate_attachment_request will return None. # * The user is trying to send a link to a file they don't have permission to # access themselves. validate_attachment_request will return False. # # Either case is unusual and suggests a UI bug that got # the user in this situation, so we log in these cases. logging.warning( "User %s tried to share upload %s in message %s, but lacks permission", user_profile.id, path_id, message.id, ) continue claimed = True attachment = claim_attachment(user_profile, path_id, message, is_message_realm_public) notify_attachment_update(user_profile, "update", attachment.to_dict()) return claimed def do_delete_old_unclaimed_attachments(weeks_ago: int) -> None: old_unclaimed_attachments = get_old_unclaimed_attachments(weeks_ago) for attachment in old_unclaimed_attachments: delete_message_image(attachment.path_id) attachment.delete() def check_attachment_reference_change(message: Message) -> bool: # For a unsaved message edit (message.* has been updated, but not # saved to the database), adjusts Attachment data to correspond to # the new content. prev_attachments = {a.path_id for a in message.attachment_set.all()} new_attachments = set(message.potential_attachment_path_ids) if new_attachments == prev_attachments: return bool(prev_attachments) to_remove = list(prev_attachments - new_attachments) if len(to_remove) > 0: attachments_to_update = Attachment.objects.filter(path_id__in=to_remove).select_for_update() message.attachment_set.remove(*attachments_to_update) to_add = list(new_attachments - prev_attachments) if len(to_add) > 0: do_claim_attachments(message, to_add) return message.attachment_set.exists() def notify_realm_custom_profile_fields(realm: Realm, operation: str) -> None: fields = custom_profile_fields_for_realm(realm.id) event = dict(type="custom_profile_fields", op=operation, fields=[f.as_dict() for f in fields]) send_event(realm, event, active_user_ids(realm.id)) def try_add_realm_default_custom_profile_field(realm: Realm, field_subtype: str) -> CustomProfileField: field_data = DEFAULT_EXTERNAL_ACCOUNTS[field_subtype] field = CustomProfileField(realm=realm, name=field_data['name'], field_type=CustomProfileField.EXTERNAL_ACCOUNT, hint=field_data['hint'], field_data=ujson.dumps(dict(subtype=field_subtype))) field.save() field.order = field.id field.save(update_fields=['order']) notify_realm_custom_profile_fields(realm, 'add') return field def try_add_realm_custom_profile_field(realm: Realm, name: str, field_type: int, hint: str='', field_data: Optional[ProfileFieldData]=None) -> CustomProfileField: field = CustomProfileField(realm=realm, name=name, field_type=field_type) field.hint = hint if (field.field_type == CustomProfileField.CHOICE or field.field_type == CustomProfileField.EXTERNAL_ACCOUNT): field.field_data = ujson.dumps(field_data or {}) field.save() field.order = field.id field.save(update_fields=['order']) notify_realm_custom_profile_fields(realm, 'add') return field def do_remove_realm_custom_profile_field(realm: Realm, field: CustomProfileField) -> None: """ Deleting a field will also delete the user profile data associated with it in CustomProfileFieldValue model. """ field.delete() notify_realm_custom_profile_fields(realm, 'delete') def do_remove_realm_custom_profile_fields(realm: Realm) -> None: CustomProfileField.objects.filter(realm=realm).delete() def try_update_realm_custom_profile_field(realm: Realm, field: CustomProfileField, name: str, hint: str='', field_data: Optional[ProfileFieldData]=None) -> None: field.name = name field.hint = hint if (field.field_type == CustomProfileField.CHOICE or field.field_type == CustomProfileField.EXTERNAL_ACCOUNT): field.field_data = ujson.dumps(field_data or {}) field.save() notify_realm_custom_profile_fields(realm, 'update') def try_reorder_realm_custom_profile_fields(realm: Realm, order: List[int]) -> None: order_mapping = {_[1]: _[0] for _ in enumerate(order)} fields = CustomProfileField.objects.filter(realm=realm) for field in fields: if field.id not in order_mapping: raise JsonableError(_("Invalid order mapping.")) for field in fields: field.order = order_mapping[field.id] field.save(update_fields=['order']) notify_realm_custom_profile_fields(realm, 'update') def notify_user_update_custom_profile_data(user_profile: UserProfile, field: Dict[str, Union[int, str, List[int], None]]) -> None: data = dict(id=field['id']) if field['type'] == CustomProfileField.USER: data["value"] = ujson.dumps(field['value']) else: data['value'] = field['value'] if field['rendered_value']: data['rendered_value'] = field['rendered_value'] payload = dict(user_id=user_profile.id, custom_profile_field=data) event = dict(type="realm_user", op="update", person=payload) send_event(user_profile.realm, event, active_user_ids(user_profile.realm.id)) def do_update_user_custom_profile_data_if_changed(user_profile: UserProfile, data: List[Dict[str, Union[int, str, List[int]]]], ) -> None: with transaction.atomic(): for field in data: field_value, created = CustomProfileFieldValue.objects.get_or_create( user_profile=user_profile, field_id=field['id']) if not created and field_value.value == str(field['value']): # If the field value isn't actually being changed to a different one, # and always_notify is disabled, we have nothing to do here for this field. # Note: field_value.value is a TextField() so we need to cast field['value'] # to a string for the comparison in this if. continue field_value.value = field['value'] if field_value.field.is_renderable(): field_value.rendered_value = render_stream_description(str(field['value'])) field_value.save(update_fields=['value', 'rendered_value']) else: field_value.save(update_fields=['value']) notify_user_update_custom_profile_data(user_profile, { "id": field_value.field_id, "value": field_value.value, "rendered_value": field_value.rendered_value, "type": field_value.field.field_type}) def check_remove_custom_profile_field_value(user_profile: UserProfile, field_id: int) -> None: try: field = CustomProfileField.objects.get(realm=user_profile.realm, id=field_id) field_value = CustomProfileFieldValue.objects.get(field=field, user_profile=user_profile) field_value.delete() notify_user_update_custom_profile_data(user_profile, {'id': field_id, 'value': None, 'rendered_value': None, 'type': field.field_type}) except CustomProfileField.DoesNotExist: raise JsonableError(_('Field id {id} not found.').format(id=field_id)) except CustomProfileFieldValue.DoesNotExist: pass def do_send_create_user_group_event(user_group: UserGroup, members: List[UserProfile]) -> None: event = dict(type="user_group", op="add", group=dict(name=user_group.name, members=[member.id for member in members], description=user_group.description, id=user_group.id, ), ) send_event(user_group.realm, event, active_user_ids(user_group.realm_id)) def check_add_user_group(realm: Realm, name: str, initial_members: List[UserProfile], description: str) -> None: try: user_group = create_user_group(name, initial_members, realm, description=description) do_send_create_user_group_event(user_group, initial_members) except django.db.utils.IntegrityError: raise JsonableError(_("User group '{}' already exists.").format(name)) def do_send_user_group_update_event(user_group: UserGroup, data: Dict[str, Any]) -> None: event = dict(type="user_group", op='update', group_id=user_group.id, data=data) send_event(user_group.realm, event, active_user_ids(user_group.realm_id)) def do_update_user_group_name(user_group: UserGroup, name: str) -> None: try: user_group.name = name user_group.save(update_fields=['name']) except django.db.utils.IntegrityError: raise JsonableError(_("User group '{}' already exists.").format(name)) do_send_user_group_update_event(user_group, dict(name=name)) def do_update_user_group_description(user_group: UserGroup, description: str) -> None: user_group.description = description user_group.save(update_fields=['description']) do_send_user_group_update_event(user_group, dict(description=description)) def do_update_outgoing_webhook_service(bot_profile: UserProfile, service_interface: int, service_payload_url: str) -> None: # TODO: First service is chosen because currently one bot can only have one service. # Update this once multiple services are supported. service = get_bot_services(bot_profile.id)[0] service.base_url = service_payload_url service.interface = service_interface service.save() send_event(bot_profile.realm, dict(type='realm_bot', op='update', bot=dict(user_id=bot_profile.id, services = [dict(base_url=service.base_url, interface=service.interface, token=service.token)], ), ), bot_owner_user_ids(bot_profile)) def do_update_bot_config_data(bot_profile: UserProfile, config_data: Dict[str, str]) -> None: for key, value in config_data.items(): set_bot_config(bot_profile, key, value) updated_config_data = get_bot_config(bot_profile) send_event(bot_profile.realm, dict(type='realm_bot', op='update', bot=dict(user_id=bot_profile.id, services = [dict(config_data=updated_config_data)], ), ), bot_owner_user_ids(bot_profile)) def get_service_dicts_for_bot(user_profile_id: int) -> List[Dict[str, Any]]: user_profile = get_user_profile_by_id(user_profile_id) services = get_bot_services(user_profile_id) service_dicts: List[Dict[str, Any]] = [] if user_profile.bot_type == UserProfile.OUTGOING_WEBHOOK_BOT: service_dicts = [{'base_url': service.base_url, 'interface': service.interface, 'token': service.token, } for service in services] elif user_profile.bot_type == UserProfile.EMBEDDED_BOT: try: service_dicts = [{'config_data': get_bot_config(user_profile), 'service_name': services[0].name, }] # A ConfigError just means that there are no config entries for user_profile. except ConfigError: pass return service_dicts def get_service_dicts_for_bots(bot_dicts: List[Dict[str, Any]], realm: Realm) -> Dict[int, List[Dict[str, Any]]]: bot_profile_ids = [bot_dict['id'] for bot_dict in bot_dicts] bot_services_by_uid: Dict[int, List[Service]] = defaultdict(list) for service in Service.objects.filter(user_profile_id__in=bot_profile_ids): bot_services_by_uid[service.user_profile_id].append(service) embedded_bot_ids = [bot_dict['id'] for bot_dict in bot_dicts if bot_dict['bot_type'] == UserProfile.EMBEDDED_BOT] embedded_bot_configs = get_bot_configs(embedded_bot_ids) service_dicts_by_uid: Dict[int, List[Dict[str, Any]]] = {} for bot_dict in bot_dicts: bot_profile_id = bot_dict["id"] bot_type = bot_dict["bot_type"] services = bot_services_by_uid[bot_profile_id] service_dicts: List[Dict[str, Any]] = [] if bot_type == UserProfile.OUTGOING_WEBHOOK_BOT: service_dicts = [{'base_url': service.base_url, 'interface': service.interface, 'token': service.token, } for service in services] elif bot_type == UserProfile.EMBEDDED_BOT: if bot_profile_id in embedded_bot_configs.keys(): bot_config = embedded_bot_configs[bot_profile_id] service_dicts = [{'config_data': bot_config, 'service_name': services[0].name, }] service_dicts_by_uid[bot_profile_id] = service_dicts return service_dicts_by_uid def get_owned_bot_dicts(user_profile: UserProfile, include_all_realm_bots_if_admin: bool=True) -> List[Dict[str, Any]]: if user_profile.is_realm_admin and include_all_realm_bots_if_admin: result = get_bot_dicts_in_realm(user_profile.realm) else: result = UserProfile.objects.filter(realm=user_profile.realm, is_bot=True, bot_owner=user_profile).values(*bot_dict_fields) services_by_ids = get_service_dicts_for_bots(result, user_profile.realm) return [{'email': botdict['email'], 'user_id': botdict['id'], 'full_name': botdict['full_name'], 'bot_type': botdict['bot_type'], 'is_active': botdict['is_active'], 'api_key': botdict['api_key'], 'default_sending_stream': botdict['default_sending_stream__name'], 'default_events_register_stream': botdict['default_events_register_stream__name'], 'default_all_public_streams': botdict['default_all_public_streams'], 'owner_id': botdict['bot_owner__id'], 'avatar_url': avatar_url_from_dict(botdict), 'services': services_by_ids[botdict['id']], } for botdict in result] def do_send_user_group_members_update_event(event_name: str, user_group: UserGroup, user_ids: List[int]) -> None: event = dict(type="user_group", op=event_name, group_id=user_group.id, user_ids=user_ids) send_event(user_group.realm, event, active_user_ids(user_group.realm_id)) def bulk_add_members_to_user_group(user_group: UserGroup, user_profiles: List[UserProfile]) -> None: memberships = [UserGroupMembership(user_group_id=user_group.id, user_profile=user_profile) for user_profile in user_profiles] UserGroupMembership.objects.bulk_create(memberships) user_ids = [up.id for up in user_profiles] do_send_user_group_members_update_event('add_members', user_group, user_ids) def remove_members_from_user_group(user_group: UserGroup, user_profiles: List[UserProfile]) -> None: UserGroupMembership.objects.filter( user_group_id=user_group.id, user_profile__in=user_profiles).delete() user_ids = [up.id for up in user_profiles] do_send_user_group_members_update_event('remove_members', user_group, user_ids) def do_send_delete_user_group_event(realm: Realm, user_group_id: int, realm_id: int) -> None: event = dict(type="user_group", op="remove", group_id=user_group_id) send_event(realm, event, active_user_ids(realm_id)) def check_delete_user_group(user_group_id: int, user_profile: UserProfile) -> None: user_group = access_user_group_by_id(user_group_id, user_profile) user_group.delete() do_send_delete_user_group_event(user_profile.realm, user_group_id, user_profile.realm.id) def do_send_realm_reactivation_email(realm: Realm) -> None: url = create_confirmation_link(realm, Confirmation.REALM_REACTIVATION) context = {'confirmation_url': url, 'realm_uri': realm.uri, 'realm_name': realm.name} language = realm.default_language send_email_to_admins( 'zerver/emails/realm_reactivation', realm, from_address=FromAddress.tokenized_no_reply_address(), from_name=FromAddress.security_email_from_name(language=language), language=language, context=context) def do_set_zoom_token(user: UserProfile, token: Optional[Dict[str, object]]) -> None: user.zoom_token = token user.save(update_fields=["zoom_token"]) send_event( user.realm, dict(type="has_zoom_token", value=token is not None), [user.id], ) def notify_realm_export(user_profile: UserProfile) -> None: # In the future, we may want to send this event to all realm admins. event = dict(type='realm_export', exports=get_realm_exports_serialized(user_profile)) send_event(user_profile.realm, event, [user_profile.id]) def do_delete_realm_export(user_profile: UserProfile, export: RealmAuditLog) -> None: # Give mypy a hint so it knows `ujson.loads` # isn't being passed an `Optional[str]`. export_extra_data = export.extra_data assert export_extra_data is not None export_data = ujson.loads(export_extra_data) export_path = export_data.get('export_path') if export_path: # Allow removal even if the export failed. delete_export_tarball(export_path) export_data.update({'deleted_timestamp': timezone_now().timestamp()}) export.extra_data = ujson.dumps(export_data) export.save(update_fields=['extra_data']) notify_realm_export(user_profile) def get_topic_messages(user_profile: UserProfile, stream: Stream, topic_name: str) -> List[Message]: query = UserMessage.objects.filter( user_profile=user_profile, message__recipient=stream.recipient, ).order_by("id") return [um.message for um in filter_by_topic_name_via_message(query, topic_name)]
_internal_prep_message
Create a message object and checks it, but doesn't send it or save it to the database. The internal function that calls this can therefore batch send a bunch of created messages together as one database query. Call do_send_messages with a list of the return values of this method.
import datetime import itertools import logging import os import platform import time from collections import defaultdict from operator import itemgetter from typing import ( AbstractSet, Any, Callable, Dict, Iterable, List, Mapping, MutableMapping, Optional, Sequence, Set, Tuple, Union, ) import django.db.utils import ujson from django.conf import settings from django.contrib.contenttypes.models import ContentType from django.core.exceptions import ValidationError from django.core.files import File from django.db import IntegrityError, connection, transaction from django.db.models import Count, Exists, F, Max, OuterRef, Q, Sum from django.db.models.query import QuerySet from django.utils.html import escape from django.utils.timezone import now as timezone_now from django.utils.translation import override as override_language from django.utils.translation import ugettext as _ from psycopg2.extras import execute_values from psycopg2.sql import SQL from typing_extensions import TypedDict from analytics.lib.counts import COUNT_STATS, RealmCount, do_increment_logging_stat from analytics.models import StreamCount from confirmation import settings as confirmation_settings from confirmation.models import ( Confirmation, confirmation_url, create_confirmation_link, generate_key, ) from zerver.decorator import statsd_increment from zerver.lib import retention as retention from zerver.lib.addressee import Addressee from zerver.lib.alert_words import ( add_user_alert_words, get_alert_word_automaton, remove_user_alert_words, ) from zerver.lib.avatar import avatar_url, avatar_url_from_dict from zerver.lib.bot_config import ConfigError, get_bot_config, get_bot_configs, set_bot_config from zerver.lib.bulk_create import bulk_create_users from zerver.lib.cache import ( bot_dict_fields, cache_delete, cache_delete_many, cache_set, cache_set_many, cache_with_key, delete_user_profile_caches, display_recipient_cache_key, flush_user_profile, to_dict_cache_key_id, user_profile_by_api_key_cache_key, user_profile_by_email_cache_key, ) from zerver.lib.context_managers import lockfile from zerver.lib.create_user import create_user, get_display_email_address from zerver.lib.email_mirror_helpers import encode_email_address, encode_email_address_helper from zerver.lib.email_notifications import enqueue_welcome_emails from zerver.lib.email_validation import ( email_reserved_for_system_bots_error, get_existing_user_errors, get_realm_email_validator, validate_email_is_valid, ) from zerver.lib.emoji import get_emoji_file_name from zerver.lib.exceptions import ( ErrorCode, JsonableError, MarkdownRenderingException, StreamDoesNotExistError, StreamWithIDDoesNotExistError, ) from zerver.lib.export import get_realm_exports_serialized from zerver.lib.external_accounts import DEFAULT_EXTERNAL_ACCOUNTS from zerver.lib.hotspots import get_next_hotspots from zerver.lib.i18n import get_language_name from zerver.lib.markdown import MentionData, topic_links from zerver.lib.markdown import version as markdown_version from zerver.lib.message import ( MessageDict, access_message, render_markdown, truncate_body, truncate_topic, update_first_visible_message_id, ) from zerver.lib.pysa import mark_sanitized from zerver.lib.queue import queue_json_publish from zerver.lib.realm_icon import realm_icon_url from zerver.lib.realm_logo import get_realm_logo_data from zerver.lib.retention import move_messages_to_archive from zerver.lib.send_email import ( FromAddress, clear_scheduled_emails, clear_scheduled_invitation_emails, send_email, send_email_to_admins, ) from zerver.lib.server_initialization import create_internal_realm, server_initialized from zerver.lib.sessions import delete_user_sessions from zerver.lib.storage import static_path from zerver.lib.stream_recipient import StreamRecipientMap from zerver.lib.stream_subscription import ( get_active_subscriptions_for_stream_id, get_active_subscriptions_for_stream_ids, get_bulk_stream_subscriber_info, get_stream_subscriptions_for_user, get_stream_subscriptions_for_users, get_subscribed_stream_ids_for_user, num_subscribers_for_stream_id, ) from zerver.lib.stream_topic import StreamTopicTarget from zerver.lib.streams import ( access_stream_for_send_message, check_stream_name, create_stream_if_needed, get_default_value_for_history_public_to_subscribers, render_stream_description, send_stream_creation_event, subscribed_to_stream, ) from zerver.lib.timestamp import datetime_to_timestamp, timestamp_to_datetime from zerver.lib.topic import ( LEGACY_PREV_TOPIC, ORIG_TOPIC, TOPIC_LINKS, TOPIC_NAME, filter_by_exact_message_topic, filter_by_topic_name_via_message, save_message_for_edit_use_case, update_messages_for_topic_edit, ) from zerver.lib.topic_mutes import add_topic_mute, get_topic_mutes, remove_topic_mute from zerver.lib.types import ProfileFieldData from zerver.lib.upload import ( claim_attachment, delete_avatar_image, delete_export_tarball, delete_message_image, upload_emoji_image, ) from zerver.lib.user_groups import access_user_group_by_id, create_user_group from zerver.lib.user_status import update_user_status from zerver.lib.users import ( check_bot_name_available, check_full_name, format_user_row, get_api_key, user_profile_to_user_row, ) from zerver.lib.utils import generate_api_key, log_statsd_event from zerver.lib.validator import check_widget_content from zerver.lib.widget import do_widget_post_save_actions from zerver.models import ( MAX_MESSAGE_LENGTH, Attachment, Client, CustomProfileField, CustomProfileFieldValue, DefaultStream, DefaultStreamGroup, EmailChangeStatus, Message, MultiuseInvite, PreregistrationUser, Reaction, Realm, RealmAuditLog, RealmDomain, RealmEmoji, RealmFilter, Recipient, ScheduledEmail, ScheduledMessage, Service, Stream, SubMessage, Subscription, UserActivity, UserActivityInterval, UserGroup, UserGroupMembership, UserHotspot, UserMessage, UserPresence, UserProfile, UserStatus, active_non_guest_user_ids, active_user_ids, custom_profile_fields_for_realm, filter_to_valid_prereg_users, get_active_streams, get_bot_dicts_in_realm, get_bot_services, get_client, get_default_stream_groups, get_huddle_recipient, get_huddle_user_ids, get_old_unclaimed_attachments, get_stream, get_stream_by_id_in_realm, get_stream_cache_key, get_system_bot, get_user_by_delivery_email, get_user_by_id_in_realm_including_cross_realm, get_user_profile_by_id, is_cross_realm_bot_email, query_for_ids, realm_filters_for_realm, stream_name_in_use, validate_attachment_request, ) from zerver.tornado.event_queue import send_event if settings.BILLING_ENABLED: from corporate.lib.stripe import downgrade_now, update_license_ledger_if_needed # This will be used to type annotate parameters in a function if the function # works on both str and unicode in python 2 but in python 3 it only works on str. SizedTextIterable = Union[Sequence[str], AbstractSet[str]] ONBOARDING_TOTAL_MESSAGES = 1000 ONBOARDING_UNREAD_MESSAGES = 20 STREAM_ASSIGNMENT_COLORS = [ "#76ce90", "#fae589", "#a6c7e5", "#e79ab5", "#bfd56f", "#f4ae55", "#b0a5fd", "#addfe5", "#f5ce6e", "#c2726a", "#94c849", "#bd86e5", "#ee7e4a", "#a6dcbf", "#95a5fd", "#53a063", "#9987e1", "#e4523d", "#c2c2c2", "#4f8de4", "#c6a8ad", "#e7cc4d", "#c8bebf", "#a47462"] def subscriber_info(user_id: int) -> Dict[str, Any]: return { 'id': user_id, 'flags': ['read'] } # Store an event in the log for re-importing messages def log_event(event: MutableMapping[str, Any]) -> None: if settings.EVENT_LOG_DIR is None: return if "timestamp" not in event: event["timestamp"] = time.time() if not os.path.exists(settings.EVENT_LOG_DIR): os.mkdir(settings.EVENT_LOG_DIR) template = os.path.join(settings.EVENT_LOG_DIR, '%s.' + platform.node() + timezone_now().strftime('.%Y-%m-%d')) with lockfile(template % ('lock',)): with open(template % ('events',), 'a') as log: log.write(ujson.dumps(event) + '\n') def can_access_stream_user_ids(stream: Stream) -> Set[int]: # return user ids of users who can access the attributes of # a stream, such as its name/description. if stream.is_public(): # For a public stream, this is everyone in the realm # except unsubscribed guest users return public_stream_user_ids(stream) else: # for a private stream, it's subscribers plus realm admins. return private_stream_user_ids( stream.id) | {user.id for user in stream.realm.get_admin_users_and_bots()} def private_stream_user_ids(stream_id: int) -> Set[int]: # TODO: Find similar queries elsewhere and de-duplicate this code. subscriptions = get_active_subscriptions_for_stream_id(stream_id) return {sub['user_profile_id'] for sub in subscriptions.values('user_profile_id')} def public_stream_user_ids(stream: Stream) -> Set[int]: guest_subscriptions = get_active_subscriptions_for_stream_id( stream.id).filter(user_profile__role=UserProfile.ROLE_GUEST) guest_subscriptions = {sub['user_profile_id'] for sub in guest_subscriptions.values('user_profile_id')} return set(active_non_guest_user_ids(stream.realm_id)) | guest_subscriptions def bot_owner_user_ids(user_profile: UserProfile) -> Set[int]: is_private_bot = ( user_profile.default_sending_stream and user_profile.default_sending_stream.invite_only or user_profile.default_events_register_stream and user_profile.default_events_register_stream.invite_only) if is_private_bot: return {user_profile.bot_owner_id} else: users = {user.id for user in user_profile.realm.get_human_admin_users()} users.add(user_profile.bot_owner_id) return users def realm_user_count(realm: Realm) -> int: return UserProfile.objects.filter(realm=realm, is_active=True, is_bot=False).count() def realm_user_count_by_role(realm: Realm) -> Dict[str, Any]: human_counts = {UserProfile.ROLE_REALM_ADMINISTRATOR: 0, UserProfile.ROLE_REALM_OWNER: 0, UserProfile.ROLE_MEMBER: 0, UserProfile.ROLE_GUEST: 0} for value_dict in list(UserProfile.objects.filter( realm=realm, is_bot=False, is_active=True).values('role').annotate(Count('role'))): human_counts[value_dict['role']] = value_dict['role__count'] bot_count = UserProfile.objects.filter(realm=realm, is_bot=True, is_active=True).count() return { RealmAuditLog.ROLE_COUNT_HUMANS: human_counts, RealmAuditLog.ROLE_COUNT_BOTS: bot_count, } def get_signups_stream(realm: Realm) -> Stream: # This one-liner helps us work around a lint rule. return get_stream("signups", realm) def notify_new_user(user_profile: UserProfile) -> None: sender_email = settings.NOTIFICATION_BOT sender = get_system_bot(sender_email) user_count = realm_user_count(user_profile.realm) signup_notifications_stream = user_profile.realm.get_signup_notifications_stream() # Send notification to realm signup notifications stream if it exists # Don't send notification for the first user in a realm if signup_notifications_stream is not None and user_count > 1: with override_language(user_profile.realm.default_language): message = _("{user} just signed up for Zulip. (total: {user_count})").format( user=f"@_**{user_profile.full_name}|{user_profile.id}**", user_count=user_count ) internal_send_stream_message( user_profile.realm, sender, signup_notifications_stream, _("signups"), message ) # We also send a notification to the Zulip administrative realm admin_realm = sender.realm try: # Check whether the stream exists signups_stream = get_signups_stream(admin_realm) with override_language(admin_realm.default_language): # We intentionally use the same strings as above to avoid translation burden. message = _("{user} just signed up for Zulip. (total: {user_count})").format( user=f"{user_profile.full_name} <`{user_profile.email}`>", user_count=user_count ) internal_send_stream_message( admin_realm, sender, signups_stream, user_profile.realm.display_subdomain, message ) except Stream.DoesNotExist: # If the signups stream hasn't been created in the admin # realm, don't auto-create it to send to it; just do nothing. pass def notify_invites_changed(user_profile: UserProfile) -> None: event = dict(type="invites_changed") admin_ids = [user.id for user in user_profile.realm.get_admin_users_and_bots()] send_event(user_profile.realm, event, admin_ids) def add_new_user_history(user_profile: UserProfile, streams: Iterable[Stream]) -> None: """Give you the last ONBOARDING_TOTAL_MESSAGES messages on your public streams, so you have something to look at in your home view once you finish the tutorial. The most recent ONBOARDING_UNREAD_MESSAGES are marked unread. """ one_week_ago = timezone_now() - datetime.timedelta(weeks=1) recipient_ids = [stream.recipient_id for stream in streams if not stream.invite_only] recent_messages = Message.objects.filter(recipient_id__in=recipient_ids, date_sent__gt=one_week_ago).order_by("-id") message_ids_to_use = list(reversed(recent_messages.values_list( 'id', flat=True)[0:ONBOARDING_TOTAL_MESSAGES])) if len(message_ids_to_use) == 0: return # Handle the race condition where a message arrives between # bulk_add_subscriptions above and the Message query just above already_ids = set(UserMessage.objects.filter(message_id__in=message_ids_to_use, user_profile=user_profile).values_list("message_id", flat=True)) # Mark the newest ONBOARDING_UNREAD_MESSAGES as unread. marked_unread = 0 ums_to_create = [] for message_id in reversed(message_ids_to_use): if message_id in already_ids: continue um = UserMessage(user_profile=user_profile, message_id=message_id) if marked_unread < ONBOARDING_UNREAD_MESSAGES: marked_unread += 1 else: um.flags = UserMessage.flags.read ums_to_create.append(um) UserMessage.objects.bulk_create(reversed(ums_to_create)) # Does the processing for a new user account: # * Subscribes to default/invitation streams # * Fills in some recent historical messages # * Notifies other users in realm and Zulip about the signup # * Deactivates PreregistrationUser objects # * subscribe the user to newsletter if newsletter_data is specified def process_new_human_user(user_profile: UserProfile, prereg_user: Optional[PreregistrationUser]=None, newsletter_data: Optional[Mapping[str, str]]=None, default_stream_groups: Sequence[DefaultStreamGroup]=[], realm_creation: bool=False) -> None: mit_beta_user = user_profile.realm.is_zephyr_mirror_realm if prereg_user is not None: prereg_user.status = confirmation_settings.STATUS_ACTIVE prereg_user.save(update_fields=['status']) streams = prereg_user.streams.all() acting_user: Optional[UserProfile] = prereg_user.referred_by else: streams = [] acting_user = None # If the user's invitation didn't explicitly list some streams, we # add the default streams if len(streams) == 0: streams = get_default_subs(user_profile) for default_stream_group in default_stream_groups: default_stream_group_streams = default_stream_group.streams.all() for stream in default_stream_group_streams: if stream not in streams: streams.append(stream) bulk_add_subscriptions(streams, [user_profile], acting_user=acting_user) add_new_user_history(user_profile, streams) # mit_beta_users don't have a referred_by field if not mit_beta_user and prereg_user is not None and prereg_user.referred_by is not None: # This is a cross-realm private message. with override_language(prereg_user.referred_by.default_language): internal_send_private_message( user_profile.realm, get_system_bot(settings.NOTIFICATION_BOT), prereg_user.referred_by, _("{user} accepted your invitation to join Zulip!").format(user=f"{user_profile.full_name} <`{user_profile.email}`>") ) # Mark any other PreregistrationUsers that are STATUS_ACTIVE as # inactive so we can keep track of the PreregistrationUser we # actually used for analytics if prereg_user is not None: PreregistrationUser.objects.filter( email__iexact=user_profile.delivery_email).exclude(id=prereg_user.id)\ .update(status=confirmation_settings.STATUS_REVOKED) if prereg_user.referred_by is not None: notify_invites_changed(user_profile) else: PreregistrationUser.objects.filter(email__iexact=user_profile.delivery_email)\ .update(status=confirmation_settings.STATUS_REVOKED) notify_new_user(user_profile) # Clear any scheduled invitation emails to prevent them # from being sent after the user is created. clear_scheduled_invitation_emails(user_profile.delivery_email) if user_profile.realm.send_welcome_emails: enqueue_welcome_emails(user_profile, realm_creation) # We have an import loop here; it's intentional, because we want # to keep all the onboarding code in zerver/lib/onboarding.py. from zerver.lib.onboarding import send_initial_pms send_initial_pms(user_profile) if newsletter_data is not None: # If the user was created automatically via the API, we may # not want to register them for the newsletter queue_json_publish( "signups", { 'email_address': user_profile.delivery_email, 'user_id': user_profile.id, 'merge_fields': { 'NAME': user_profile.full_name, 'REALM_ID': user_profile.realm_id, 'OPTIN_IP': newsletter_data["IP"], 'OPTIN_TIME': datetime.datetime.isoformat(timezone_now().replace(microsecond=0)), }, }, lambda event: None) def notify_created_user(user_profile: UserProfile) -> None: user_row = user_profile_to_user_row(user_profile) person = format_user_row(user_profile.realm, user_profile, user_row, # Since we don't know what the client # supports at this point in the code, we # just assume client_gravatar and # user_avatar_url_field_optional = False :( client_gravatar=False, user_avatar_url_field_optional=False, # We assume there's no custom profile # field data for a new user; initial # values are expected to be added in a # later event. custom_profile_field_data={}) event: Dict[str, Any] = dict(type="realm_user", op="add", person=person) send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id)) def created_bot_event(user_profile: UserProfile) -> Dict[str, Any]: def stream_name(stream: Optional[Stream]) -> Optional[str]: if not stream: return None return stream.name default_sending_stream_name = stream_name(user_profile.default_sending_stream) default_events_register_stream_name = stream_name(user_profile.default_events_register_stream) bot = dict(email=user_profile.email, user_id=user_profile.id, full_name=user_profile.full_name, bot_type=user_profile.bot_type, is_active=user_profile.is_active, api_key=get_api_key(user_profile), default_sending_stream=default_sending_stream_name, default_events_register_stream=default_events_register_stream_name, default_all_public_streams=user_profile.default_all_public_streams, avatar_url=avatar_url(user_profile), services = get_service_dicts_for_bot(user_profile.id), ) # Set the owner key only when the bot has an owner. # The default bots don't have an owner. So don't # set the owner key while reactivating them. if user_profile.bot_owner is not None: bot['owner_id'] = user_profile.bot_owner.id return dict(type="realm_bot", op="add", bot=bot) def notify_created_bot(user_profile: UserProfile) -> None: event = created_bot_event(user_profile) send_event(user_profile.realm, event, bot_owner_user_ids(user_profile)) def create_users(realm: Realm, name_list: Iterable[Tuple[str, str]], bot_type: Optional[int]=None) -> None: user_set = set() for full_name, email in name_list: user_set.add((email, full_name, True)) bulk_create_users(realm, user_set, bot_type) def do_create_user(email: str, password: Optional[str], realm: Realm, full_name: str, bot_type: Optional[int]=None, role: Optional[int]=None, bot_owner: Optional[UserProfile]=None, tos_version: Optional[str]=None, timezone: str="", avatar_source: str=UserProfile.AVATAR_FROM_GRAVATAR, default_sending_stream: Optional[Stream]=None, default_events_register_stream: Optional[Stream]=None, default_all_public_streams: Optional[bool]=None, prereg_user: Optional[PreregistrationUser]=None, newsletter_data: Optional[Dict[str, str]]=None, default_stream_groups: Sequence[DefaultStreamGroup]=[], source_profile: Optional[UserProfile]=None, realm_creation: bool=False, acting_user: Optional[UserProfile]=None) -> UserProfile: user_profile = create_user(email=email, password=password, realm=realm, full_name=full_name, role=role, bot_type=bot_type, bot_owner=bot_owner, tos_version=tos_version, timezone=timezone, avatar_source=avatar_source, default_sending_stream=default_sending_stream, default_events_register_stream=default_events_register_stream, default_all_public_streams=default_all_public_streams, source_profile=source_profile) event_time = user_profile.date_joined if not acting_user: acting_user = user_profile RealmAuditLog.objects.create( realm=user_profile.realm, acting_user=acting_user, modified_user=user_profile, event_type=RealmAuditLog.USER_CREATED, event_time=event_time, extra_data=ujson.dumps({ RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm), })) do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'], user_profile.is_bot, event_time) if settings.BILLING_ENABLED: update_license_ledger_if_needed(user_profile.realm, event_time) # Note that for bots, the caller will send an additional event # with bot-specific info like services. notify_created_user(user_profile) if bot_type is None: process_new_human_user(user_profile, prereg_user=prereg_user, newsletter_data=newsletter_data, default_stream_groups=default_stream_groups, realm_creation=realm_creation) return user_profile def do_activate_user(user_profile: UserProfile, acting_user: Optional[UserProfile]=None) -> None: user_profile.is_active = True user_profile.is_mirror_dummy = False user_profile.set_unusable_password() user_profile.date_joined = timezone_now() user_profile.tos_version = settings.TOS_VERSION user_profile.save(update_fields=["is_active", "date_joined", "password", "is_mirror_dummy", "tos_version"]) event_time = user_profile.date_joined RealmAuditLog.objects.create( realm=user_profile.realm, modified_user=user_profile, acting_user=acting_user, event_type=RealmAuditLog.USER_ACTIVATED, event_time=event_time, extra_data=ujson.dumps({ RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm), })) do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'], user_profile.is_bot, event_time) if settings.BILLING_ENABLED: update_license_ledger_if_needed(user_profile.realm, event_time) notify_created_user(user_profile) def do_reactivate_user(user_profile: UserProfile, acting_user: Optional[UserProfile]=None) -> None: # Unlike do_activate_user, this is meant for re-activating existing users, # so it doesn't reset their password, etc. user_profile.is_active = True user_profile.save(update_fields=["is_active"]) event_time = timezone_now() RealmAuditLog.objects.create( realm=user_profile.realm, modified_user=user_profile, acting_user=acting_user, event_type=RealmAuditLog.USER_REACTIVATED, event_time=event_time, extra_data=ujson.dumps({ RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm), })) do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'], user_profile.is_bot, event_time) if settings.BILLING_ENABLED: update_license_ledger_if_needed(user_profile.realm, event_time) notify_created_user(user_profile) if user_profile.is_bot: notify_created_bot(user_profile) def active_humans_in_realm(realm: Realm) -> Sequence[UserProfile]: return UserProfile.objects.filter(realm=realm, is_active=True, is_bot=False) def do_set_realm_property(realm: Realm, name: str, value: Any, acting_user: Optional[UserProfile] = None) -> None: """Takes in a realm object, the name of an attribute to update, the value to update and and the user who initiated the update. """ property_type = Realm.property_types[name] assert isinstance(value, property_type), ( f'Cannot update {name}: {value} is not an instance of {property_type}') old_value = getattr(realm, name) setattr(realm, name, value) realm.save(update_fields=[name]) event = dict( type='realm', op='update', property=name, value=value, ) send_event(realm, event, active_user_ids(realm.id)) event_time = timezone_now() RealmAuditLog.objects.create( realm=realm, event_type=RealmAuditLog.REALM_PROPERTY_CHANGED, event_time=event_time, acting_user=acting_user, extra_data=ujson.dumps({ RealmAuditLog.OLD_VALUE: {'property': name, 'value': old_value}, RealmAuditLog.NEW_VALUE: {'property': name, 'value': value} })) if name == "email_address_visibility": if Realm.EMAIL_ADDRESS_VISIBILITY_EVERYONE not in [old_value, value]: # We use real email addresses on UserProfile.email only if # EMAIL_ADDRESS_VISIBILITY_EVERYONE is configured, so # changes between values that will not require changing # that field, so we can save work and return here. return user_profiles = UserProfile.objects.filter(realm=realm, is_bot=False) for user_profile in user_profiles: user_profile.email = get_display_email_address(user_profile, realm) # TODO: Design a bulk event for this or force-reload all clients send_user_email_update_event(user_profile) UserProfile.objects.bulk_update(user_profiles, ['email']) for user_profile in user_profiles: flush_user_profile(sender=UserProfile, instance=user_profile) def do_set_realm_authentication_methods(realm: Realm, authentication_methods: Dict[str, bool], acting_user: Optional[UserProfile]=None) -> None: old_value = realm.authentication_methods_dict() for key, value in list(authentication_methods.items()): index = getattr(realm.authentication_methods, key).number realm.authentication_methods.set_bit(index, int(value)) realm.save(update_fields=['authentication_methods']) updated_value = realm.authentication_methods_dict() RealmAuditLog.objects.create( realm=realm, event_type=RealmAuditLog.REALM_PROPERTY_CHANGED, event_time=timezone_now(), acting_user=acting_user, extra_data=ujson.dumps({ RealmAuditLog.OLD_VALUE: {'property': 'authentication_methods', 'value': old_value}, RealmAuditLog.NEW_VALUE: {'property': 'authentication_methods', 'value': updated_value} })) event = dict( type="realm", op="update_dict", property='default', data=dict(authentication_methods=updated_value), ) send_event(realm, event, active_user_ids(realm.id)) def do_set_realm_message_editing(realm: Realm, allow_message_editing: bool, message_content_edit_limit_seconds: int, allow_community_topic_editing: bool) -> None: realm.allow_message_editing = allow_message_editing realm.message_content_edit_limit_seconds = message_content_edit_limit_seconds realm.allow_community_topic_editing = allow_community_topic_editing realm.save(update_fields=['allow_message_editing', 'allow_community_topic_editing', 'message_content_edit_limit_seconds', ], ) event = dict( type="realm", op="update_dict", property="default", data=dict(allow_message_editing=allow_message_editing, message_content_edit_limit_seconds=message_content_edit_limit_seconds, allow_community_topic_editing=allow_community_topic_editing), ) send_event(realm, event, active_user_ids(realm.id)) def do_set_realm_message_deleting(realm: Realm, message_content_delete_limit_seconds: int) -> None: realm.message_content_delete_limit_seconds = message_content_delete_limit_seconds realm.save(update_fields=['message_content_delete_limit_seconds']) event = dict( type="realm", op="update_dict", property="default", data=dict(message_content_delete_limit_seconds=message_content_delete_limit_seconds), ) send_event(realm, event, active_user_ids(realm.id)) def do_set_realm_notifications_stream(realm: Realm, stream: Optional[Stream], stream_id: int) -> None: realm.notifications_stream = stream realm.save(update_fields=['notifications_stream']) event = dict( type="realm", op="update", property="notifications_stream_id", value=stream_id, ) send_event(realm, event, active_user_ids(realm.id)) def do_set_realm_signup_notifications_stream(realm: Realm, stream: Optional[Stream], stream_id: int) -> None: realm.signup_notifications_stream = stream realm.save(update_fields=['signup_notifications_stream']) event = dict( type="realm", op="update", property="signup_notifications_stream_id", value=stream_id, ) send_event(realm, event, active_user_ids(realm.id)) def do_deactivate_realm(realm: Realm, acting_user: Optional[UserProfile]=None) -> None: """ Deactivate this realm. Do NOT deactivate the users -- we need to be able to tell the difference between users that were intentionally deactivated, e.g. by a realm admin, and users who can't currently use Zulip because their realm has been deactivated. """ if realm.deactivated: return realm.deactivated = True realm.save(update_fields=["deactivated"]) if settings.BILLING_ENABLED: downgrade_now(realm) event_time = timezone_now() RealmAuditLog.objects.create( realm=realm, event_type=RealmAuditLog.REALM_DEACTIVATED, event_time=event_time, acting_user=acting_user, extra_data=ujson.dumps({ RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(realm), })) ScheduledEmail.objects.filter(realm=realm).delete() for user in active_humans_in_realm(realm): # Don't deactivate the users, but do delete their sessions so they get # bumped to the login screen, where they'll get a realm deactivation # notice when they try to log in. delete_user_sessions(user) event = dict(type="realm", op="deactivated", realm_id=realm.id) send_event(realm, event, active_user_ids(realm.id)) def do_reactivate_realm(realm: Realm) -> None: realm.deactivated = False realm.save(update_fields=["deactivated"]) event_time = timezone_now() RealmAuditLog.objects.create( realm=realm, event_type=RealmAuditLog.REALM_REACTIVATED, event_time=event_time, extra_data=ujson.dumps({ RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(realm), })) def do_change_realm_subdomain(realm: Realm, new_subdomain: str) -> None: realm.string_id = new_subdomain realm.save(update_fields=["string_id"]) def do_scrub_realm(realm: Realm, acting_user: Optional[UserProfile]=None) -> None: users = UserProfile.objects.filter(realm=realm) for user in users: do_delete_messages_by_sender(user) do_delete_avatar_image(user, acting_user=acting_user) user.full_name = f"Scrubbed {generate_key()[:15]}" scrubbed_email = f"scrubbed-{generate_key()[:15]}@{realm.host}" user.email = scrubbed_email user.delivery_email = scrubbed_email user.save(update_fields=["full_name", "email", "delivery_email"]) do_remove_realm_custom_profile_fields(realm) Attachment.objects.filter(realm=realm).delete() RealmAuditLog.objects.create(realm=realm, event_time=timezone_now(), acting_user=acting_user, event_type=RealmAuditLog.REALM_SCRUBBED) def do_deactivate_user(user_profile: UserProfile, acting_user: Optional[UserProfile]=None, _cascade: bool=True) -> None: if not user_profile.is_active: return if user_profile.realm.is_zephyr_mirror_realm: # nocoverage # For zephyr mirror users, we need to make them a mirror dummy # again; otherwise, other users won't get the correct behavior # when trying to send messages to this person inside Zulip. # # Ideally, we need to also ensure their zephyr mirroring bot # isn't running, but that's a separate issue. user_profile.is_mirror_dummy = True user_profile.is_active = False user_profile.save(update_fields=["is_active"]) delete_user_sessions(user_profile) clear_scheduled_emails([user_profile.id]) event_time = timezone_now() RealmAuditLog.objects.create( realm=user_profile.realm, modified_user=user_profile, acting_user=acting_user, event_type=RealmAuditLog.USER_DEACTIVATED, event_time=event_time, extra_data=ujson.dumps({ RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm), })) do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'], user_profile.is_bot, event_time, increment=-1) if settings.BILLING_ENABLED: update_license_ledger_if_needed(user_profile.realm, event_time) event = dict(type="realm_user", op="remove", person=dict(user_id=user_profile.id, full_name=user_profile.full_name)) send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id)) if user_profile.is_bot: event = dict(type="realm_bot", op="remove", bot=dict(user_id=user_profile.id, full_name=user_profile.full_name)) send_event(user_profile.realm, event, bot_owner_user_ids(user_profile)) if _cascade: bot_profiles = UserProfile.objects.filter(is_bot=True, is_active=True, bot_owner=user_profile) for profile in bot_profiles: do_deactivate_user(profile, acting_user=acting_user, _cascade=False) def do_deactivate_stream(stream: Stream, log: bool=True, acting_user: Optional[UserProfile]=None) -> None: # Get the affected user ids *before* we deactivate everybody. affected_user_ids = can_access_stream_user_ids(stream) get_active_subscriptions_for_stream_id(stream.id).update(active=False) was_invite_only = stream.invite_only stream.deactivated = True stream.invite_only = True # Preserve as much as possible the original stream name while giving it a # special prefix that both indicates that the stream is deactivated and # frees up the original name for reuse. old_name = stream.name new_name = ("!DEACTIVATED:" + old_name)[:Stream.MAX_NAME_LENGTH] for i in range(20): if stream_name_in_use(new_name, stream.realm_id): # This stream has already been deactivated, keep prepending !s until # we have a unique stream name or you've hit a rename limit. new_name = ("!" + new_name)[:Stream.MAX_NAME_LENGTH] else: break # If you don't have a unique name at this point, this will fail later in the # code path. stream.name = new_name[:Stream.MAX_NAME_LENGTH] stream.save(update_fields=['name', 'deactivated', 'invite_only']) # If this is a default stream, remove it, properly sending a # notification to browser clients. if DefaultStream.objects.filter(realm_id=stream.realm_id, stream_id=stream.id).exists(): do_remove_default_stream(stream) default_stream_groups_for_stream = DefaultStreamGroup.objects.filter(streams__id=stream.id) for group in default_stream_groups_for_stream: do_remove_streams_from_default_stream_group(stream.realm, group, [stream]) # Remove the old stream information from remote cache. old_cache_key = get_stream_cache_key(old_name, stream.realm_id) cache_delete(old_cache_key) stream_dict = stream.to_dict() stream_dict.update(dict(name=old_name, invite_only=was_invite_only)) event = dict(type="stream", op="delete", streams=[stream_dict]) send_event(stream.realm, event, affected_user_ids) event_time = timezone_now() RealmAuditLog.objects.create(realm=stream.realm, acting_user=acting_user, modified_stream=stream, event_type=RealmAuditLog.STREAM_DEACTIVATED, event_time=event_time) def send_user_email_update_event(user_profile: UserProfile) -> None: payload = dict(user_id=user_profile.id, new_email=user_profile.email) send_event(user_profile.realm, dict(type='realm_user', op='update', person=payload), active_user_ids(user_profile.realm_id)) def do_change_user_delivery_email(user_profile: UserProfile, new_email: str) -> None: delete_user_profile_caches([user_profile]) user_profile.delivery_email = new_email if user_profile.email_address_is_realm_public(): user_profile.email = new_email user_profile.save(update_fields=["email", "delivery_email"]) else: user_profile.save(update_fields=["delivery_email"]) # We notify just the target user (and eventually org admins, only # when email_address_visibility=EMAIL_ADDRESS_VISIBILITY_ADMINS) # about their new delivery email, since that field is private. payload = dict(user_id=user_profile.id, delivery_email=new_email) event = dict(type='realm_user', op='update', person=payload) send_event(user_profile.realm, event, [user_profile.id]) if user_profile.avatar_source == UserProfile.AVATAR_FROM_GRAVATAR: # If the user is using Gravatar to manage their email address, # their Gravatar just changed, and we need to notify other # clients. notify_avatar_url_change(user_profile) if user_profile.email_address_is_realm_public(): # Additionally, if we're also changing the publicly visible # email, we send a new_email event as well. send_user_email_update_event(user_profile) event_time = timezone_now() RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=user_profile, modified_user=user_profile, event_type=RealmAuditLog.USER_EMAIL_CHANGED, event_time=event_time) def do_start_email_change_process(user_profile: UserProfile, new_email: str) -> None: old_email = user_profile.delivery_email obj = EmailChangeStatus.objects.create(new_email=new_email, old_email=old_email, user_profile=user_profile, realm=user_profile.realm) activation_url = create_confirmation_link(obj, Confirmation.EMAIL_CHANGE) from zerver.context_processors import common_context context = common_context(user_profile) context.update({ 'old_email': old_email, 'new_email': new_email, 'activate_url': activation_url, }) language = user_profile.default_language send_email('zerver/emails/confirm_new_email', to_emails=[new_email], from_name=FromAddress.security_email_from_name(language=language), from_address=FromAddress.tokenized_no_reply_address(), language=language, context=context, realm=user_profile.realm) def compute_irc_user_fullname(email: str) -> str: return email.split("@")[0] + " (IRC)" def compute_jabber_user_fullname(email: str) -> str: return email.split("@")[0] + " (XMPP)" @cache_with_key(lambda realm, email, f: user_profile_by_email_cache_key(email), timeout=3600*24*7) def create_mirror_user_if_needed(realm: Realm, email: str, email_to_fullname: Callable[[str], str]) -> UserProfile: try: return get_user_by_delivery_email(email, realm) except UserProfile.DoesNotExist: try: # Forge a user for this person return create_user( email=email, password=None, realm=realm, full_name=email_to_fullname(email), active=False, is_mirror_dummy=True, ) except IntegrityError: return get_user_by_delivery_email(email, realm) def send_welcome_bot_response(message: MutableMapping[str, Any]) -> None: welcome_bot = get_system_bot(settings.WELCOME_BOT) human_recipient_id = message['message'].sender.recipient_id if Message.objects.filter(sender=welcome_bot, recipient_id=human_recipient_id).count() < 2: content = ( _("Congratulations on your first reply!") + " " ":tada:" "\n" "\n" + _("Feel free to continue using this space to practice your new messaging " "skills. Or, try clicking on some of the stream names to your left!") ) internal_send_private_message( message['realm'], welcome_bot, message['message'].sender, content) def render_incoming_message(message: Message, content: str, user_ids: Set[int], realm: Realm, mention_data: Optional[MentionData]=None, email_gateway: bool=False) -> str: realm_alert_words_automaton = get_alert_word_automaton(realm) try: rendered_content = render_markdown( message=message, content=content, realm=realm, realm_alert_words_automaton = realm_alert_words_automaton, mention_data=mention_data, email_gateway=email_gateway, ) except MarkdownRenderingException: raise JsonableError(_('Unable to render message')) return rendered_content class RecipientInfoResult(TypedDict): active_user_ids: Set[int] push_notify_user_ids: Set[int] stream_email_user_ids: Set[int] stream_push_user_ids: Set[int] wildcard_mention_user_ids: Set[int] um_eligible_user_ids: Set[int] long_term_idle_user_ids: Set[int] default_bot_user_ids: Set[int] service_bot_tuples: List[Tuple[int, int]] def get_recipient_info(recipient: Recipient, sender_id: int, stream_topic: Optional[StreamTopicTarget], possibly_mentioned_user_ids: AbstractSet[int]=set(), possible_wildcard_mention: bool=True) -> RecipientInfoResult: stream_push_user_ids: Set[int] = set() stream_email_user_ids: Set[int] = set() wildcard_mention_user_ids: Set[int] = set() if recipient.type == Recipient.PERSONAL: # The sender and recipient may be the same id, so # de-duplicate using a set. message_to_user_ids = list({recipient.type_id, sender_id}) assert(len(message_to_user_ids) in [1, 2]) elif recipient.type == Recipient.STREAM: # Anybody calling us w/r/t a stream message needs to supply # stream_topic. We may eventually want to have different versions # of this function for different message types. assert(stream_topic is not None) user_ids_muting_topic = stream_topic.user_ids_muting_topic() subscription_rows = stream_topic.get_active_subscriptions().annotate( user_profile_email_notifications=F('user_profile__enable_stream_email_notifications'), user_profile_push_notifications=F('user_profile__enable_stream_push_notifications'), user_profile_wildcard_mentions_notify=F( 'user_profile__wildcard_mentions_notify'), ).values( 'user_profile_id', 'push_notifications', 'email_notifications', 'wildcard_mentions_notify', 'user_profile_email_notifications', 'user_profile_push_notifications', 'user_profile_wildcard_mentions_notify', 'is_muted', ).order_by('user_profile_id') message_to_user_ids = [ row['user_profile_id'] for row in subscription_rows ] def should_send(setting: str, row: Dict[str, Any]) -> bool: # This implements the structure that the UserProfile stream notification settings # are defaults, which can be overridden by the stream-level settings (if those # values are not null). if row['is_muted']: return False if row['user_profile_id'] in user_ids_muting_topic: return False if row[setting] is not None: return row[setting] return row['user_profile_' + setting] stream_push_user_ids = { row['user_profile_id'] for row in subscription_rows # Note: muting a stream overrides stream_push_notify if should_send('push_notifications', row) } stream_email_user_ids = { row['user_profile_id'] for row in subscription_rows # Note: muting a stream overrides stream_email_notify if should_send('email_notifications', row) } if possible_wildcard_mention: # If there's a possible wildcard mention, we need to # determine which users would receive a wildcard mention # notification for this message should the message indeed # contain a wildcard mention. # # We don't have separate values for push/email # notifications here; at this stage, we're just # determining whether this wildcard mention should be # treated as a mention (and follow the user's mention # notification preferences) or a normal message. wildcard_mention_user_ids = { row['user_profile_id'] for row in subscription_rows if should_send("wildcard_mentions_notify", row) } elif recipient.type == Recipient.HUDDLE: message_to_user_ids = get_huddle_user_ids(recipient) else: raise ValueError('Bad recipient type') message_to_user_id_set = set(message_to_user_ids) user_ids = set(message_to_user_id_set) # Important note: Because we haven't rendered markdown yet, we # don't yet know which of these possibly-mentioned users was # actually mentioned in the message (in other words, the # mention syntax might have been in a code block or otherwise # escaped). `get_ids_for` will filter these extra user rows # for our data structures not related to bots user_ids |= possibly_mentioned_user_ids if user_ids: query = UserProfile.objects.filter( is_active=True, ).values( 'id', 'enable_online_push_notifications', 'is_bot', 'bot_type', 'long_term_idle', ) # query_for_ids is fast highly optimized for large queries, and we # need this codepath to be fast (it's part of sending messages) query = query_for_ids( query=query, user_ids=sorted(list(user_ids)), field='id', ) rows = list(query) else: # TODO: We should always have at least one user_id as a recipient # of any message we send. Right now the exception to this # rule is `notify_new_user`, which, at least in a possibly # contrived test scenario, can attempt to send messages # to an inactive bot. When we plug that hole, we can avoid # this `else` clause and just `assert(user_ids)`. # # UPDATE: It's February 2020 (and a couple years after the above # comment was written). We have simplified notify_new_user # so that it should be a little easier to reason about. # There is currently some cleanup to how we handle cross # realm bots that is still under development. Once that # effort is complete, we should be able to address this # to-do. rows = [] def get_ids_for(f: Callable[[Dict[str, Any]], bool]) -> Set[int]: """Only includes users on the explicit message to line""" return { row['id'] for row in rows if f(row) } & message_to_user_id_set def is_service_bot(row: Dict[str, Any]) -> bool: return row['is_bot'] and (row['bot_type'] in UserProfile.SERVICE_BOT_TYPES) active_user_ids = get_ids_for(lambda r: True) push_notify_user_ids = get_ids_for( lambda r: r['enable_online_push_notifications'], ) # Service bots don't get UserMessage rows. um_eligible_user_ids = get_ids_for( lambda r: not is_service_bot(r), ) long_term_idle_user_ids = get_ids_for( lambda r: r['long_term_idle'], ) # These two bot data structures need to filter from the full set # of users who either are receiving the message or might have been # mentioned in it, and so can't use get_ids_for. # # Further in the do_send_messages code path, once # `mentioned_user_ids` has been computed via markdown, we'll filter # these data structures for just those users who are either a # direct recipient or were mentioned; for now, we're just making # sure we have the data we need for that without extra database # queries. default_bot_user_ids = { row['id'] for row in rows if row['is_bot'] and row['bot_type'] == UserProfile.DEFAULT_BOT } service_bot_tuples = [ (row['id'], row['bot_type']) for row in rows if is_service_bot(row) ] info: RecipientInfoResult = dict( active_user_ids=active_user_ids, push_notify_user_ids=push_notify_user_ids, stream_push_user_ids=stream_push_user_ids, stream_email_user_ids=stream_email_user_ids, wildcard_mention_user_ids=wildcard_mention_user_ids, um_eligible_user_ids=um_eligible_user_ids, long_term_idle_user_ids=long_term_idle_user_ids, default_bot_user_ids=default_bot_user_ids, service_bot_tuples=service_bot_tuples, ) return info def get_service_bot_events(sender: UserProfile, service_bot_tuples: List[Tuple[int, int]], mentioned_user_ids: Set[int], active_user_ids: Set[int], recipient_type: int) -> Dict[str, List[Dict[str, Any]]]: event_dict: Dict[str, List[Dict[str, Any]]] = defaultdict(list) # Avoid infinite loops by preventing messages sent by bots from generating # Service events. if sender.is_bot: return event_dict def maybe_add_event(user_profile_id: int, bot_type: int) -> None: if bot_type == UserProfile.OUTGOING_WEBHOOK_BOT: queue_name = 'outgoing_webhooks' elif bot_type == UserProfile.EMBEDDED_BOT: queue_name = 'embedded_bots' else: logging.error( 'Unexpected bot_type for Service bot id=%s: %s', user_profile_id, bot_type, ) return is_stream = (recipient_type == Recipient.STREAM) # Important note: service_bot_tuples may contain service bots # who were not actually mentioned in the message (e.g. if # mention syntax for that bot appeared in a code block). # Thus, it is important to filter any users who aren't part of # either mentioned_user_ids (the actual mentioned users) or # active_user_ids (the actual recipients). # # So even though this is implied by the logic below, we filter # these not-actually-mentioned users here, to help keep this # function future-proof. if user_profile_id not in mentioned_user_ids and user_profile_id not in active_user_ids: return # Mention triggers, for stream messages if is_stream and user_profile_id in mentioned_user_ids: trigger = 'mention' # PM triggers for personal and huddle messages elif (not is_stream) and (user_profile_id in active_user_ids): trigger = 'private_message' else: return event_dict[queue_name].append({ 'trigger': trigger, 'user_profile_id': user_profile_id, }) for user_profile_id, bot_type in service_bot_tuples: maybe_add_event( user_profile_id=user_profile_id, bot_type=bot_type, ) return event_dict def do_schedule_messages(messages: Sequence[Mapping[str, Any]]) -> List[int]: scheduled_messages: List[ScheduledMessage] = [] for message in messages: scheduled_message = ScheduledMessage() scheduled_message.sender = message['message'].sender scheduled_message.recipient = message['message'].recipient topic_name = message['message'].topic_name() scheduled_message.set_topic_name(topic_name=topic_name) scheduled_message.content = message['message'].content scheduled_message.sending_client = message['message'].sending_client scheduled_message.stream = message['stream'] scheduled_message.realm = message['realm'] scheduled_message.scheduled_timestamp = message['deliver_at'] if message['delivery_type'] == 'send_later': scheduled_message.delivery_type = ScheduledMessage.SEND_LATER elif message['delivery_type'] == 'remind': scheduled_message.delivery_type = ScheduledMessage.REMIND scheduled_messages.append(scheduled_message) ScheduledMessage.objects.bulk_create(scheduled_messages) return [scheduled_message.id for scheduled_message in scheduled_messages] def do_send_messages(messages_maybe_none: Sequence[Optional[MutableMapping[str, Any]]], email_gateway: bool=False, mark_as_read: Sequence[int]=[]) -> List[int]: """See https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html for high-level documentation on this subsystem. """ # Filter out messages which didn't pass internal_prep_message properly messages = [message for message in messages_maybe_none if message is not None] # Filter out zephyr mirror anomalies where the message was already sent already_sent_ids: List[int] = [] new_messages: List[MutableMapping[str, Any]] = [] for message in messages: if isinstance(message['message'], int): already_sent_ids.append(message['message']) else: new_messages.append(message) messages = new_messages links_for_embed: Set[str] = set() # For consistency, changes to the default values for these gets should also be applied # to the default args in do_send_message for message in messages: message['rendered_content'] = message.get('rendered_content', None) message['stream'] = message.get('stream', None) message['local_id'] = message.get('local_id', None) message['sender_queue_id'] = message.get('sender_queue_id', None) message['realm'] = message.get('realm', message['message'].sender.realm) mention_data = MentionData( realm_id=message['realm'].id, content=message['message'].content, ) message['mention_data'] = mention_data if message['message'].is_stream_message(): stream_id = message['message'].recipient.type_id stream_topic: Optional[StreamTopicTarget] = StreamTopicTarget( stream_id=stream_id, topic_name=message['message'].topic_name(), ) else: stream_topic = None info = get_recipient_info( recipient=message['message'].recipient, sender_id=message['message'].sender_id, stream_topic=stream_topic, possibly_mentioned_user_ids=mention_data.get_user_ids(), possible_wildcard_mention=mention_data.message_has_wildcards(), ) message['active_user_ids'] = info['active_user_ids'] message['push_notify_user_ids'] = info['push_notify_user_ids'] message['stream_push_user_ids'] = info['stream_push_user_ids'] message['stream_email_user_ids'] = info['stream_email_user_ids'] message['um_eligible_user_ids'] = info['um_eligible_user_ids'] message['long_term_idle_user_ids'] = info['long_term_idle_user_ids'] message['default_bot_user_ids'] = info['default_bot_user_ids'] message['service_bot_tuples'] = info['service_bot_tuples'] # Render our messages. assert message['message'].rendered_content is None rendered_content = render_incoming_message( message['message'], message['message'].content, message['active_user_ids'], message['realm'], mention_data=message['mention_data'], email_gateway=email_gateway, ) message['message'].rendered_content = rendered_content message['message'].rendered_content_version = markdown_version links_for_embed |= message['message'].links_for_preview # Add members of the mentioned user groups into `mentions_user_ids`. for group_id in message['message'].mentions_user_group_ids: members = message['mention_data'].get_group_members(group_id) message['message'].mentions_user_ids.update(members) # Only send data to Tornado about wildcard mentions if message # rendering determined the message had an actual wildcard # mention in it (and not e.g. wildcard mention syntax inside a # code block). if message['message'].mentions_wildcard: message['wildcard_mention_user_ids'] = info['wildcard_mention_user_ids'] else: message['wildcard_mention_user_ids'] = [] ''' Once we have the actual list of mentioned ids from message rendering, we can patch in "default bots" (aka normal bots) who were directly mentioned in this message as eligible to get UserMessage rows. ''' mentioned_user_ids = message['message'].mentions_user_ids default_bot_user_ids = message['default_bot_user_ids'] mentioned_bot_user_ids = default_bot_user_ids & mentioned_user_ids message['um_eligible_user_ids'] |= mentioned_bot_user_ids # Save the message receipts in the database user_message_flags: Dict[int, Dict[int, List[str]]] = defaultdict(dict) with transaction.atomic(): Message.objects.bulk_create([message['message'] for message in messages]) # Claim attachments in message for message in messages: if do_claim_attachments(message['message'], message['message'].potential_attachment_path_ids): message['message'].has_attachment = True message['message'].save(update_fields=['has_attachment']) ums: List[UserMessageLite] = [] for message in messages: # Service bots (outgoing webhook bots and embedded bots) don't store UserMessage rows; # they will be processed later. mentioned_user_ids = message['message'].mentions_user_ids user_messages = create_user_messages( message=message['message'], um_eligible_user_ids=message['um_eligible_user_ids'], long_term_idle_user_ids=message['long_term_idle_user_ids'], stream_push_user_ids = message['stream_push_user_ids'], stream_email_user_ids = message['stream_email_user_ids'], mentioned_user_ids=mentioned_user_ids, mark_as_read=mark_as_read, ) for um in user_messages: user_message_flags[message['message'].id][um.user_profile_id] = um.flags_list() ums.extend(user_messages) message['message'].service_queue_events = get_service_bot_events( sender=message['message'].sender, service_bot_tuples=message['service_bot_tuples'], mentioned_user_ids=mentioned_user_ids, active_user_ids=message['active_user_ids'], recipient_type=message['message'].recipient.type, ) bulk_insert_ums(ums) for message in messages: do_widget_post_save_actions(message) for message in messages: realm_id: Optional[int] = None if message['message'].is_stream_message(): if message['stream'] is None: stream_id = message['message'].recipient.type_id message['stream'] = Stream.objects.select_related().get(id=stream_id) assert message['stream'] is not None # assert needed because stubs for django are missing realm_id = message['stream'].realm_id # Deliver events to the real-time push system, as well as # enqueuing any additional processing triggered by the message. wide_message_dict = MessageDict.wide_dict(message['message'], realm_id) user_flags = user_message_flags.get(message['message'].id, {}) sender = message['message'].sender message_type = wide_message_dict['type'] presence_idle_user_ids = get_active_presence_idle_user_ids( realm=sender.realm, sender_id=sender.id, message_type=message_type, active_user_ids=message['active_user_ids'], user_flags=user_flags, ) event = dict( type='message', message=message['message'].id, message_dict=wide_message_dict, presence_idle_user_ids=presence_idle_user_ids, ) ''' TODO: We may want to limit user_ids to only those users who have UserMessage rows, if only for minor performance reasons. For now we queue events for all subscribers/sendees of the message, since downstream code may still do notifications that don't require UserMessage rows. Our automated tests have gotten better on this codepath, but we may have coverage gaps, so we should be careful about changing the next line. ''' user_ids = message['active_user_ids'] | set(user_flags.keys()) users = [ dict( id=user_id, flags=user_flags.get(user_id, []), always_push_notify=(user_id in message['push_notify_user_ids']), stream_push_notify=(user_id in message['stream_push_user_ids']), stream_email_notify=(user_id in message['stream_email_user_ids']), wildcard_mention_notify=(user_id in message['wildcard_mention_user_ids']), ) for user_id in user_ids ] if message['message'].is_stream_message(): # Note: This is where authorization for single-stream # get_updates happens! We only attach stream data to the # notify new_message request if it's a public stream, # ensuring that in the tornado server, non-public stream # messages are only associated to their subscribed users. assert message['stream'] is not None # assert needed because stubs for django are missing if message['stream'].is_public(): event['realm_id'] = message['stream'].realm_id event['stream_name'] = message['stream'].name if message['stream'].invite_only: event['invite_only'] = True if message['stream'].first_message_id is None: message['stream'].first_message_id = message['message'].id message['stream'].save(update_fields=["first_message_id"]) if message['local_id'] is not None: event['local_id'] = message['local_id'] if message['sender_queue_id'] is not None: event['sender_queue_id'] = message['sender_queue_id'] send_event(message['realm'], event, users) if links_for_embed: event_data = { 'message_id': message['message'].id, 'message_content': message['message'].content, 'message_realm_id': message['realm'].id, 'urls': links_for_embed} queue_json_publish('embed_links', event_data) if message['message'].recipient.type == Recipient.PERSONAL: welcome_bot_id = get_system_bot(settings.WELCOME_BOT).id if (welcome_bot_id in message['active_user_ids'] and welcome_bot_id != message['message'].sender_id): send_welcome_bot_response(message) for queue_name, events in message['message'].service_queue_events.items(): for event in events: queue_json_publish( queue_name, { "message": wide_message_dict, "trigger": event['trigger'], "user_profile_id": event["user_profile_id"], }, ) # Note that this does not preserve the order of message ids # returned. In practice, this shouldn't matter, as we only # mirror single zephyr messages at a time and don't otherwise # intermingle sending zephyr messages with other messages. return already_sent_ids + [message['message'].id for message in messages] class UserMessageLite: ''' The Django ORM is too slow for bulk operations. This class is optimized for the simple use case of inserting a bunch of rows into zerver_usermessage. ''' def __init__(self, user_profile_id: int, message_id: int, flags: int) -> None: self.user_profile_id = user_profile_id self.message_id = message_id self.flags = flags def flags_list(self) -> List[str]: return UserMessage.flags_list_for_flags(self.flags) def create_user_messages(message: Message, um_eligible_user_ids: AbstractSet[int], long_term_idle_user_ids: AbstractSet[int], stream_push_user_ids: AbstractSet[int], stream_email_user_ids: AbstractSet[int], mentioned_user_ids: AbstractSet[int], mark_as_read: Sequence[int] = []) -> List[UserMessageLite]: ums_to_create = [] for user_profile_id in um_eligible_user_ids: um = UserMessageLite( user_profile_id=user_profile_id, message_id=message.id, flags=0, ) ums_to_create.append(um) # These properties on the Message are set via # render_markdown by code in the markdown inline patterns wildcard = message.mentions_wildcard ids_with_alert_words = message.user_ids_with_alert_words for um in ums_to_create: if (um.user_profile_id == message.sender.id and message.sent_by_human()) or \ um.user_profile_id in mark_as_read: um.flags |= UserMessage.flags.read if wildcard: um.flags |= UserMessage.flags.wildcard_mentioned if um.user_profile_id in mentioned_user_ids: um.flags |= UserMessage.flags.mentioned if um.user_profile_id in ids_with_alert_words: um.flags |= UserMessage.flags.has_alert_word if message.recipient.type in [Recipient.HUDDLE, Recipient.PERSONAL]: um.flags |= UserMessage.flags.is_private # For long_term_idle (aka soft-deactivated) users, we are allowed # to optimize by lazily not creating UserMessage rows that would # have the default 0 flag set (since the soft-reactivation logic # knows how to create those when the user comes back). We need to # create the UserMessage rows for these long_term_idle users # non-lazily in a few cases: # # * There are nonzero flags (e.g. the user was mentioned), since # that case is rare and this saves a lot of complexity in # soft-reactivation. # # * If the user is going to be notified (e.g. they get push/email # notifications for every message on a stream), since in that # case the notifications code will call `access_message` on the # message to re-verify permissions, and for private streams, # will get an error if the UserMessage row doesn't exist yet. # # See https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html#soft-deactivation # for details on this system. user_messages = [] for um in ums_to_create: if (um.user_profile_id in long_term_idle_user_ids and um.user_profile_id not in stream_push_user_ids and um.user_profile_id not in stream_email_user_ids and message.is_stream_message() and int(um.flags) == 0): continue user_messages.append(um) return user_messages def bulk_insert_ums(ums: List[UserMessageLite]) -> None: ''' Doing bulk inserts this way is much faster than using Django, since we don't have any ORM overhead. Profiling with 1000 users shows a speedup of 0.436 -> 0.027 seconds, so we're talking about a 15x speedup. ''' if not ums: return vals = [ (um.user_profile_id, um.message_id, um.flags) for um in ums ] query = SQL(''' INSERT into zerver_usermessage (user_profile_id, message_id, flags) VALUES %s ''') with connection.cursor() as cursor: execute_values(cursor.cursor, query, vals) def do_add_submessage(realm: Realm, sender_id: int, message_id: int, msg_type: str, content: str, ) -> None: submessage = SubMessage( sender_id=sender_id, message_id=message_id, msg_type=msg_type, content=content, ) submessage.save() event = dict( type="submessage", msg_type=msg_type, message_id=message_id, submessage_id=submessage.id, sender_id=sender_id, content=content, ) ums = UserMessage.objects.filter(message_id=message_id) target_user_ids = [um.user_profile_id for um in ums] send_event(realm, event, target_user_ids) def notify_reaction_update(user_profile: UserProfile, message: Message, reaction: Reaction, op: str) -> None: user_dict = {'user_id': user_profile.id, 'email': user_profile.email, 'full_name': user_profile.full_name} event: Dict[str, Any] = { 'type': 'reaction', 'op': op, 'user_id': user_profile.id, # TODO: We plan to remove this redundant user_dict object once # clients are updated to support accessing use user_id. See # https://github.com/zulip/zulip/pull/14711 for details. 'user': user_dict, 'message_id': message.id, 'emoji_name': reaction.emoji_name, 'emoji_code': reaction.emoji_code, 'reaction_type': reaction.reaction_type, } # Update the cached message since new reaction is added. update_to_dict_cache([message]) # Recipients for message update events, including reactions, are # everyone who got the original message. This means reactions # won't live-update in preview narrows, but it's the right # performance tradeoff, since otherwise we'd need to send all # reactions to public stream messages to every browser for every # client in the organization, which doesn't scale. # # However, to ensure that reactions do live-update for any user # who has actually participated in reacting to a message, we add a # "historical" UserMessage row for any user who reacts to message, # subscribing them to future notifications. ums = UserMessage.objects.filter(message=message.id) send_event(user_profile.realm, event, [um.user_profile_id for um in ums]) def do_add_reaction(user_profile: UserProfile, message: Message, emoji_name: str, emoji_code: str, reaction_type: str) -> None: reaction = Reaction(user_profile=user_profile, message=message, emoji_name=emoji_name, emoji_code=emoji_code, reaction_type=reaction_type) try: reaction.save() except django.db.utils.IntegrityError: # nocoverage # This can happen when a race results in the check in views # code not catching an attempt to double-add a reaction, or # perhaps if the emoji_name/emoji_code mapping is busted. raise JsonableError(_("Reaction already exists.")) notify_reaction_update(user_profile, message, reaction, "add") def do_remove_reaction(user_profile: UserProfile, message: Message, emoji_code: str, reaction_type: str) -> None: reaction = Reaction.objects.filter(user_profile=user_profile, message=message, emoji_code=emoji_code, reaction_type=reaction_type).get() reaction.delete() notify_reaction_update(user_profile, message, reaction, "remove") def do_send_typing_notification( realm: Realm, sender: UserProfile, recipient_user_profiles: List[UserProfile], operator: str) -> None: sender_dict = {'user_id': sender.id, 'email': sender.email} # Include a list of recipients in the event body to help identify where the typing is happening recipient_dicts = [{'user_id': profile.id, 'email': profile.email} for profile in recipient_user_profiles] event = dict( type='typing', op=operator, sender=sender_dict, recipients=recipient_dicts, ) # Only deliver the notification to active user recipients user_ids_to_notify = [ user.id for user in recipient_user_profiles if user.is_active ] send_event(realm, event, user_ids_to_notify) # check_send_typing_notification: # Checks the typing notification and sends it def check_send_typing_notification(sender: UserProfile, user_ids: List[int], operator: str) -> None: realm = sender.realm if len(user_ids) == 0: raise JsonableError(_('Missing parameter: \'to\' (recipient)')) elif operator not in ('start', 'stop'): raise JsonableError(_('Invalid \'op\' value (should be start or stop)')) ''' The next chunk of code will go away when we upgrade old mobile users away from versions of mobile that send emails. For the small number of very outdated mobile clients, we do double work here in terms of fetching users, but this structure reduces lots of other unnecessary duplicated code and will make it convenient to mostly delete code when we desupport old versions of the app.''' if sender.id not in user_ids: user_ids.append(sender.id) # If any of the user_ids being sent in are invalid, we will # just reject the whole request, since a partial list of user_ids # can create confusion related to huddles. Plus it's a good # sign that a client is confused (or possibly even malicious) if # we get bad user_ids. user_profiles = [] for user_id in user_ids: try: # We include cross-bot realms as possible recipients, # so that clients can know which huddle conversation # is relevant here. user_profile = get_user_by_id_in_realm_including_cross_realm( user_id, sender.realm) except UserProfile.DoesNotExist: raise JsonableError(_("Invalid user ID {}").format(user_id)) user_profiles.append(user_profile) do_send_typing_notification( realm=realm, sender=sender, recipient_user_profiles=user_profiles, operator=operator, ) def ensure_stream(realm: Realm, stream_name: str, invite_only: bool=False, stream_description: str="", acting_user: Optional[UserProfile]=None) -> Stream: return create_stream_if_needed(realm, stream_name, invite_only=invite_only, stream_description=stream_description, acting_user=acting_user)[0] def get_recipient_from_user_profiles(recipient_profiles: Sequence[UserProfile], forwarded_mirror_message: bool, forwarder_user_profile: Optional[UserProfile], sender: UserProfile) -> Recipient: # Avoid mutating the passed in list of recipient_profiles. recipient_profiles_map = {} for user_profile in recipient_profiles: recipient_profiles_map[user_profile.id] = user_profile if forwarded_mirror_message: # In our mirroring integrations with some third-party # protocols, bots subscribed to the third-party protocol # forward to Zulip messages that they received in the # third-party service. The permissions model for that # forwarding is that users can only submit to Zulip private # messages they personally received, and here we do the check # for whether forwarder_user_profile is among the private # message recipients of the message. assert forwarder_user_profile is not None if forwarder_user_profile.id not in recipient_profiles_map: raise ValidationError(_("User not authorized for this query")) # If the private message is just between the sender and # another person, force it to be a personal internally if (len(recipient_profiles_map) == 2 and sender.id in recipient_profiles_map): del recipient_profiles_map[sender.id] assert len(recipient_profiles_map) != 0 if len(recipient_profiles_map) == 1: user_profile = list(recipient_profiles_map.values())[0] return user_profile.recipient # Otherwise, we need a huddle. Make sure the sender is included in huddle messages recipient_profiles_map[sender.id] = sender user_ids: Set[int] = {user_id for user_id in recipient_profiles_map} return get_huddle_recipient(user_ids) def validate_recipient_user_profiles(user_profiles: Sequence[UserProfile], sender: UserProfile, allow_deactivated: bool=False) -> Sequence[UserProfile]: recipient_profiles_map: Dict[int, UserProfile] = {} # We exempt cross-realm bots from the check that all the recipients # are in the same realm. realms = set() if not is_cross_realm_bot_email(sender.email): realms.add(sender.realm_id) for user_profile in user_profiles: if (not user_profile.is_active and not user_profile.is_mirror_dummy and not allow_deactivated) or user_profile.realm.deactivated: raise ValidationError(_("'{email}' is no longer using Zulip.").format(email=user_profile.email)) recipient_profiles_map[user_profile.id] = user_profile if not is_cross_realm_bot_email(user_profile.email): realms.add(user_profile.realm_id) if len(realms) > 1: raise ValidationError(_("You can't send private messages outside of your organization.")) return list(recipient_profiles_map.values()) def recipient_for_user_profiles(user_profiles: Sequence[UserProfile], forwarded_mirror_message: bool, forwarder_user_profile: Optional[UserProfile], sender: UserProfile, allow_deactivated: bool=False) -> Recipient: recipient_profiles = validate_recipient_user_profiles(user_profiles, sender, allow_deactivated=allow_deactivated) return get_recipient_from_user_profiles(recipient_profiles, forwarded_mirror_message, forwarder_user_profile, sender) def already_sent_mirrored_message_id(message: Message) -> Optional[int]: if message.recipient.type == Recipient.HUDDLE: # For huddle messages, we use a 10-second window because the # timestamps aren't guaranteed to actually match between two # copies of the same message. time_window = datetime.timedelta(seconds=10) else: time_window = datetime.timedelta(seconds=0) query = Message.objects.filter( sender=message.sender, recipient=message.recipient, content=message.content, sending_client=message.sending_client, date_sent__gte=message.date_sent - time_window, date_sent__lte=message.date_sent + time_window) messages = filter_by_exact_message_topic( query=query, message=message, ) if messages.exists(): return messages[0].id return None def extract_stream_indicator(s: str) -> Union[str, int]: # Users can pass stream name as either an id or a name, # and if they choose to pass a name, they may JSON encode # it for legacy reasons. try: data = ujson.loads(s) except (ValueError, TypeError): # If there was no JSON encoding, then we just # have a raw stream name. return s # We should stop supporting this odd use case # once we improve our documentation. if isinstance(data, list): if len(data) != 1: # nocoverage raise JsonableError(_("Expected exactly one stream")) data = data[0] if isinstance(data, str): # We had a JSON-encoded stream name. return data if isinstance(data, int): # We had a stream id. return data raise JsonableError(_("Invalid data type for stream")) def extract_private_recipients(s: str) -> Union[List[str], List[int]]: # We try to accept multiple incoming formats for recipients. # See test_extract_recipients() for examples of what we allow. try: data = ujson.loads(s) except (ValueError, TypeError): data = s if isinstance(data, str): data = data.split(',') if not isinstance(data, list): raise JsonableError(_("Invalid data type for recipients")) if not data: # We don't complain about empty message recipients here return data if isinstance(data[0], str): return get_validated_emails(data) if not isinstance(data[0], int): raise JsonableError(_("Invalid data type for recipients")) return get_validated_user_ids(data) def get_validated_user_ids(user_ids: Iterable[int]) -> List[int]: for user_id in user_ids: if not isinstance(user_id, int): raise JsonableError(_("Recipient lists may contain emails or user IDs, but not both.")) return list(set(user_ids)) def get_validated_emails(emails: Iterable[str]) -> List[str]: for email in emails: if not isinstance(email, str): raise JsonableError(_("Recipient lists may contain emails or user IDs, but not both.")) return list(filter(bool, {email.strip() for email in emails})) def check_send_stream_message(sender: UserProfile, client: Client, stream_name: str, topic: str, body: str, realm: Optional[Realm]=None) -> int: addressee = Addressee.for_stream_name(stream_name, topic) message = check_message(sender, client, addressee, body, realm) return do_send_messages([message])[0] def check_send_private_message(sender: UserProfile, client: Client, receiving_user: UserProfile, body: str) -> int: addressee = Addressee.for_user_profile(receiving_user) message = check_message(sender, client, addressee, body) return do_send_messages([message])[0] # check_send_message: # Returns the id of the sent message. Has same argspec as check_message. def check_send_message(sender: UserProfile, client: Client, message_type_name: str, message_to: Union[Sequence[int], Sequence[str]], topic_name: Optional[str], message_content: str, realm: Optional[Realm]=None, forged: bool=False, forged_timestamp: Optional[float]=None, forwarder_user_profile: Optional[UserProfile]=None, local_id: Optional[str]=None, sender_queue_id: Optional[str]=None, widget_content: Optional[str]=None) -> int: addressee = Addressee.legacy_build( sender, message_type_name, message_to, topic_name) message = check_message(sender, client, addressee, message_content, realm, forged, forged_timestamp, forwarder_user_profile, local_id, sender_queue_id, widget_content) return do_send_messages([message])[0] def check_schedule_message(sender: UserProfile, client: Client, message_type_name: str, message_to: Union[Sequence[str], Sequence[int]], topic_name: Optional[str], message_content: str, delivery_type: str, deliver_at: datetime.datetime, realm: Optional[Realm]=None, forwarder_user_profile: Optional[UserProfile]=None, ) -> int: addressee = Addressee.legacy_build( sender, message_type_name, message_to, topic_name) message = check_message(sender, client, addressee, message_content, realm=realm, forwarder_user_profile=forwarder_user_profile) message['deliver_at'] = deliver_at message['delivery_type'] = delivery_type recipient = message['message'].recipient if (delivery_type == 'remind' and (recipient.type != Recipient.STREAM and recipient.type_id != sender.id)): raise JsonableError(_("Reminders can only be set for streams.")) return do_schedule_messages([message])[0] def check_default_stream_group_name(group_name: str) -> None: if group_name.strip() == "": raise JsonableError(_("Invalid default stream group name '{}'").format(group_name)) if len(group_name) > DefaultStreamGroup.MAX_NAME_LENGTH: raise JsonableError(_("Default stream group name too long (limit: {} characters)").format( DefaultStreamGroup.MAX_NAME_LENGTH, )) for i in group_name: if ord(i) == 0: raise JsonableError(_("Default stream group name '{}' contains NULL (0x00) characters.").format( group_name, )) def send_rate_limited_pm_notification_to_bot_owner(sender: UserProfile, realm: Realm, content: str) -> None: """ Sends a PM error notification to a bot's owner if one hasn't already been sent in the last 5 minutes. """ if sender.realm.is_zephyr_mirror_realm or sender.realm.deactivated: return if not sender.is_bot or sender.bot_owner is None: return # Don't send these notifications for cross-realm bot messages # (e.g. from EMAIL_GATEWAY_BOT) since the owner for # EMAIL_GATEWAY_BOT is probably the server administrator, not # the owner of the bot who could potentially fix the problem. if sender.realm != realm: return # We warn the user once every 5 minutes to avoid a flood of # PMs on a misconfigured integration, re-using the # UserProfile.last_reminder field, which is not used for bots. last_reminder = sender.last_reminder waitperiod = datetime.timedelta(minutes=UserProfile.BOT_OWNER_STREAM_ALERT_WAITPERIOD) if last_reminder and timezone_now() - last_reminder <= waitperiod: return internal_send_private_message(realm, get_system_bot(settings.NOTIFICATION_BOT), sender.bot_owner, content) sender.last_reminder = timezone_now() sender.save(update_fields=['last_reminder']) def send_pm_if_empty_stream(stream: Optional[Stream], realm: Realm, sender: UserProfile, stream_name: Optional[str]=None, stream_id: Optional[int]=None) -> None: """If a bot sends a message to a stream that doesn't exist or has no subscribers, sends a notification to the bot owner (if not a cross-realm bot) so that the owner can correct the issue.""" if not sender.is_bot or sender.bot_owner is None: return arg_dict = { "bot_identity": f"`{sender.delivery_email}`", "stream_id": stream_id, "stream_name": f"#**{stream_name}**", "new_stream_link": "#streams/new", } if sender.bot_owner is not None: with override_language(sender.bot_owner.default_language): if stream is None: if stream_id is not None: content = _("Your bot {bot_identity} tried to send a message to stream ID " "{stream_id}, but there is no stream with that ID.").format(**arg_dict) else: assert(stream_name is not None) content = _("Your bot {bot_identity} tried to send a message to stream " "{stream_name}, but that stream does not exist. " "Click [here]({new_stream_link}) to create it.").format(**arg_dict) else: if num_subscribers_for_stream_id(stream.id) > 0: return content = _("Your bot {bot_identity} tried to send a message to " "stream {stream_name}. The stream exists but " "does not have any subscribers.").format(**arg_dict) send_rate_limited_pm_notification_to_bot_owner(sender, realm, content) def validate_stream_name_with_pm_notification(stream_name: str, realm: Realm, sender: UserProfile) -> Stream: stream_name = stream_name.strip() check_stream_name(stream_name) try: stream = get_stream(stream_name, realm) send_pm_if_empty_stream(stream, realm, sender) except Stream.DoesNotExist: send_pm_if_empty_stream(None, realm, sender, stream_name=stream_name) raise StreamDoesNotExistError(escape(stream_name)) return stream def validate_stream_id_with_pm_notification(stream_id: int, realm: Realm, sender: UserProfile) -> Stream: try: stream = get_stream_by_id_in_realm(stream_id, realm) send_pm_if_empty_stream(stream, realm, sender) except Stream.DoesNotExist: send_pm_if_empty_stream(None, realm, sender, stream_id=stream_id) raise StreamWithIDDoesNotExistError(stream_id) return stream def check_private_message_policy(realm: Realm, sender: UserProfile, user_profiles: Sequence[UserProfile]) -> None: if realm.private_message_policy == Realm.PRIVATE_MESSAGE_POLICY_DISABLED: if sender.is_bot or (len(user_profiles) == 1 and user_profiles[0].is_bot): # We allow PMs only between users and bots, to avoid # breaking the tutorial as well as automated # notifications from system bots to users. return raise JsonableError(_("Private messages are disabled in this organization.")) # check_message: # Returns message ready for sending with do_send_message on success or the error message (string) on error. def check_message(sender: UserProfile, client: Client, addressee: Addressee, message_content_raw: str, realm: Optional[Realm]=None, forged: bool=False, forged_timestamp: Optional[float]=None, forwarder_user_profile: Optional[UserProfile]=None, local_id: Optional[str]=None, sender_queue_id: Optional[str]=None, widget_content: Optional[str]=None) -> Dict[str, Any]: """See https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html for high-level documentation on this subsystem. """ stream = None message_content = message_content_raw.rstrip() if len(message_content) == 0: raise JsonableError(_("Message must not be empty")) if '\x00' in message_content: raise JsonableError(_("Message must not contain null bytes")) message_content = truncate_body(message_content) if realm is None: realm = sender.realm if addressee.is_stream(): topic_name = addressee.topic() topic_name = truncate_topic(topic_name) stream_name = addressee.stream_name() stream_id = addressee.stream_id() if stream_name is not None: stream = validate_stream_name_with_pm_notification(stream_name, realm, sender) elif stream_id is not None: stream = validate_stream_id_with_pm_notification(stream_id, realm, sender) else: stream = addressee.stream() assert stream is not None recipient = stream.recipient # This will raise JsonableError if there are problems. if sender.bot_type != sender.OUTGOING_WEBHOOK_BOT: access_stream_for_send_message( sender=sender, stream=stream, forwarder_user_profile=forwarder_user_profile) elif addressee.is_private(): user_profiles = addressee.user_profiles() mirror_message = client and client.name in ["zephyr_mirror", "irc_mirror", "jabber_mirror", "JabberMirror"] check_private_message_policy(realm, sender, user_profiles) # API Super-users who set the `forged` flag are allowed to # forge messages sent by any user, so we disable the # `forwarded_mirror_message` security check in that case. forwarded_mirror_message = mirror_message and not forged try: recipient = recipient_for_user_profiles(user_profiles, forwarded_mirror_message, forwarder_user_profile, sender) except ValidationError as e: assert isinstance(e.messages[0], str) raise JsonableError(e.messages[0]) else: # This is defensive code--Addressee already validates # the message type. raise AssertionError("Invalid message type") message = Message() message.sender = sender message.content = message_content message.recipient = recipient if addressee.is_stream(): message.set_topic_name(topic_name) if forged and forged_timestamp is not None: # Forged messages come with a timestamp message.date_sent = timestamp_to_datetime(forged_timestamp) else: message.date_sent = timezone_now() message.sending_client = client # We render messages later in the process. assert message.rendered_content is None if client.name == "zephyr_mirror": id = already_sent_mirrored_message_id(message) if id is not None: return {'message': id} if widget_content is not None: try: widget_content = ujson.loads(widget_content) except Exception: raise JsonableError(_('Widgets: API programmer sent invalid JSON content')) try: check_widget_content(widget_content) except ValidationError as error: raise JsonableError(_('Widgets: {error_msg}').format( error_msg=error.message, )) return {'message': message, 'stream': stream, 'local_id': local_id, 'sender_queue_id': sender_queue_id, 'realm': realm, 'widget_content': widget_content} # MASKED: _internal_prep_message function (lines 2385-2415) def internal_prep_stream_message( realm: Realm, sender: UserProfile, stream: Stream, topic: str, content: str, ) -> Optional[Dict[str, Any]]: """ See _internal_prep_message for details of how this works. """ addressee = Addressee.for_stream(stream, topic) return _internal_prep_message( realm=realm, sender=sender, addressee=addressee, content=content, ) def internal_prep_stream_message_by_name( realm: Realm, sender: UserProfile, stream_name: str, topic: str, content: str, ) -> Optional[Dict[str, Any]]: """ See _internal_prep_message for details of how this works. """ addressee = Addressee.for_stream_name(stream_name, topic) return _internal_prep_message( realm=realm, sender=sender, addressee=addressee, content=content, ) def internal_prep_private_message(realm: Realm, sender: UserProfile, recipient_user: UserProfile, content: str) -> Optional[Dict[str, Any]]: """ See _internal_prep_message for details of how this works. """ addressee = Addressee.for_user_profile(recipient_user) return _internal_prep_message( realm=realm, sender=sender, addressee=addressee, content=content, ) def internal_send_private_message(realm: Realm, sender: UserProfile, recipient_user: UserProfile, content: str) -> Optional[int]: message = internal_prep_private_message(realm, sender, recipient_user, content) if message is None: return None message_ids = do_send_messages([message]) return message_ids[0] def internal_send_stream_message( realm: Realm, sender: UserProfile, stream: Stream, topic: str, content: str, email_gateway: bool=False) -> Optional[int]: message = internal_prep_stream_message( realm, sender, stream, topic, content, ) if message is None: return None message_ids = do_send_messages([message], email_gateway=email_gateway) return message_ids[0] def internal_send_stream_message_by_name( realm: Realm, sender: UserProfile, stream_name: str, topic: str, content: str, ) -> Optional[int]: message = internal_prep_stream_message_by_name( realm, sender, stream_name, topic, content, ) if message is None: return None message_ids = do_send_messages([message]) return message_ids[0] def internal_send_huddle_message(realm: Realm, sender: UserProfile, emails: List[str], content: str) -> Optional[int]: addressee = Addressee.for_private(emails, realm) message = _internal_prep_message( realm=realm, sender=sender, addressee=addressee, content=content, ) if message is None: return None message_ids = do_send_messages([message]) return message_ids[0] def pick_color(user_profile: UserProfile, subs: Iterable[Subscription]) -> str: # These colors are shared with the palette in subs.js. used_colors = [sub.color for sub in subs if sub.active] available_colors = [s for s in STREAM_ASSIGNMENT_COLORS if s not in used_colors] if available_colors: return available_colors[0] else: return STREAM_ASSIGNMENT_COLORS[len(used_colors) % len(STREAM_ASSIGNMENT_COLORS)] def validate_user_access_to_subscribers(user_profile: Optional[UserProfile], stream: Stream) -> None: """ Validates whether the user can view the subscribers of a stream. Raises a JsonableError if: * The user and the stream are in different realms * The realm is MIT and the stream is not invite only. * The stream is invite only, requesting_user is passed, and that user does not subscribe to the stream. """ validate_user_access_to_subscribers_helper( user_profile, {"realm_id": stream.realm_id, "invite_only": stream.invite_only}, # We use a lambda here so that we only compute whether the # user is subscribed if we have to lambda user_profile: subscribed_to_stream(user_profile, stream.id)) def validate_user_access_to_subscribers_helper( user_profile: Optional[UserProfile], stream_dict: Mapping[str, Any], check_user_subscribed: Callable[[UserProfile], bool], ) -> None: """Helper for validate_user_access_to_subscribers that doesn't require a full stream object. This function is a bit hard to read, because it is carefully optimized for performance in the two code paths we call it from: * In `bulk_get_subscriber_user_ids`, we already know whether the user was subscribed via `sub_dict`, and so we want to avoid a database query at all (especially since it calls this in a loop); * In `validate_user_access_to_subscribers`, we want to only check if the user is subscribed when we absolutely have to, since it costs a database query. The `check_user_subscribed` argument is a function that reports whether the user is subscribed to the stream. Note also that we raise a ValidationError in cases where the caller is doing the wrong thing (maybe these should be AssertionErrors), and JsonableError for 400 type errors. """ if user_profile is None: raise ValidationError("Missing user to validate access for") if user_profile.realm_id != stream_dict["realm_id"]: raise ValidationError("Requesting user not in given realm") # Guest users can access subscribed public stream's subscribers if user_profile.is_guest: if check_user_subscribed(user_profile): return # We could put an AssertionError here; in that we don't have # any code paths that would allow a guest user to access other # streams in the first place. if not user_profile.can_access_public_streams() and not stream_dict["invite_only"]: raise JsonableError(_("Subscriber data is not available for this stream")) # Organization administrators can view subscribers for all streams. if user_profile.is_realm_admin: return if (stream_dict["invite_only"] and not check_user_subscribed(user_profile)): raise JsonableError(_("Unable to retrieve subscribers for private stream")) def bulk_get_subscriber_user_ids(stream_dicts: Iterable[Mapping[str, Any]], user_profile: UserProfile, sub_dict: Mapping[int, bool], stream_recipient: StreamRecipientMap) -> Dict[int, List[int]]: """sub_dict maps stream_id => whether the user is subscribed to that stream.""" target_stream_dicts = [] for stream_dict in stream_dicts: stream_recipient.populate_with(stream_id=stream_dict["id"], recipient_id=stream_dict["recipient_id"]) try: validate_user_access_to_subscribers_helper( user_profile, stream_dict, lambda user_profile: sub_dict[stream_dict["id"]], ) except JsonableError: continue target_stream_dicts.append(stream_dict) stream_ids = [stream['id'] for stream in target_stream_dicts] recipient_ids = sorted([ stream_recipient.recipient_id_for(stream_id) for stream_id in stream_ids ]) result: Dict[int, List[int]] = {stream["id"]: [] for stream in stream_dicts} if not recipient_ids: return result ''' The raw SQL below leads to more than a 2x speedup when tested with 20k+ total subscribers. (For large realms with lots of default streams, this function deals with LOTS of data, so it is important to optimize.) ''' query = SQL(''' SELECT zerver_subscription.recipient_id, zerver_subscription.user_profile_id FROM zerver_subscription INNER JOIN zerver_userprofile ON zerver_userprofile.id = zerver_subscription.user_profile_id WHERE zerver_subscription.recipient_id in %(recipient_ids)s AND zerver_subscription.active AND zerver_userprofile.is_active ORDER BY zerver_subscription.recipient_id, zerver_subscription.user_profile_id ''') cursor = connection.cursor() cursor.execute(query, {"recipient_ids": tuple(recipient_ids)}) rows = cursor.fetchall() cursor.close() recip_to_stream_id = stream_recipient.recipient_to_stream_id_dict() ''' Using groupby/itemgetter here is important for performance, at scale. It makes it so that all interpreter overhead is just O(N) in nature. ''' for recip_id, recip_rows in itertools.groupby(rows, itemgetter(0)): user_profile_ids = [r[1] for r in recip_rows] stream_id = recip_to_stream_id[recip_id] result[stream_id] = list(user_profile_ids) return result def get_subscribers_query(stream: Stream, requesting_user: Optional[UserProfile]) -> QuerySet: # TODO: Make a generic stub for QuerySet """ Build a query to get the subscribers list for a stream, raising a JsonableError if: 'realm' is optional in stream. The caller can refine this query with select_related(), values(), etc. depending on whether it wants objects or just certain fields """ validate_user_access_to_subscribers(requesting_user, stream) # Note that non-active users may still have "active" subscriptions, because we # want to be able to easily reactivate them with their old subscriptions. This # is why the query here has to look at the UserProfile.is_active flag. subscriptions = get_active_subscriptions_for_stream_id(stream.id).filter( user_profile__is_active=True, ) return subscriptions def get_subscriber_emails(stream: Stream, requesting_user: Optional[UserProfile]=None) -> List[str]: subscriptions_query = get_subscribers_query(stream, requesting_user) subscriptions = subscriptions_query.values('user_profile__email') return [subscription['user_profile__email'] for subscription in subscriptions] def notify_subscriptions_added(user_profile: UserProfile, sub_pairs: Iterable[Tuple[Subscription, Stream]], stream_user_ids: Callable[[Stream], List[int]], recent_traffic: Dict[int, int], no_log: bool=False) -> None: if not no_log: log_event({'type': 'subscription_added', 'user': user_profile.email, 'names': [stream.name for sub, stream in sub_pairs], 'realm': user_profile.realm.string_id}) sub_dicts = [] for (subscription, stream) in sub_pairs: sub_dict = stream.to_dict() for field_name in Subscription.API_FIELDS: if field_name == "active": # Skip the "active" field, it's implied by context continue sub_dict[field_name] = getattr(subscription, field_name) sub_dict['in_home_view'] = not subscription.is_muted sub_dict['email_address'] = encode_email_address(stream, show_sender=True) sub_dict['stream_weekly_traffic'] = get_average_weekly_stream_traffic( stream.id, stream.date_created, recent_traffic) sub_dict['subscribers'] = stream_user_ids(stream) sub_dicts.append(sub_dict) # Send a notification to the user who subscribed. event = dict(type="subscription", op="add", subscriptions=sub_dicts) send_event(user_profile.realm, event, [user_profile.id]) def get_peer_user_ids_for_stream_change(stream: Stream, altered_user_ids: Iterable[int], subscribed_user_ids: Iterable[int]) -> Set[int]: ''' altered_user_ids is the user_ids that we are adding/removing subscribed_user_ids is the already-subscribed user_ids Based on stream policy, we notify the correct bystanders, while not notifying altered_users (who get subscribers via another event) ''' if stream.invite_only: # PRIVATE STREAMS # Realm admins can access all private stream subscribers. Send them an # event even if they aren't subscribed to stream. realm_admin_ids = [user.id for user in stream.realm.get_admin_users_and_bots()] user_ids_to_notify = [] user_ids_to_notify.extend(realm_admin_ids) user_ids_to_notify.extend(subscribed_user_ids) return set(user_ids_to_notify) - set(altered_user_ids) else: # PUBLIC STREAMS # We now do "peer_add" or "peer_remove" events even for streams # users were never subscribed to, in order for the neversubscribed # structure to stay up-to-date. return set(active_non_guest_user_ids(stream.realm_id)) - set(altered_user_ids) def get_user_ids_for_streams(streams: Iterable[Stream]) -> Dict[int, List[int]]: stream_ids = [stream.id for stream in streams] all_subs = get_active_subscriptions_for_stream_ids(stream_ids).filter( user_profile__is_active=True, ).values( 'recipient__type_id', 'user_profile_id', ).order_by( 'recipient__type_id', ) get_stream_id = itemgetter('recipient__type_id') all_subscribers_by_stream: Dict[int, List[int]] = defaultdict(list) for stream_id, rows in itertools.groupby(all_subs, get_stream_id): user_ids = [row['user_profile_id'] for row in rows] all_subscribers_by_stream[stream_id] = user_ids return all_subscribers_by_stream def get_last_message_id() -> int: # We generally use this function to populate RealmAuditLog, and # the max id here is actually systemwide, not per-realm. I # assume there's some advantage in not filtering by realm. last_id = Message.objects.aggregate(Max('id'))['id__max'] if last_id is None: # During initial realm creation, there might be 0 messages in # the database; in that case, the `aggregate` query returns # None. Since we want an int for "beginning of time", use -1. last_id = -1 return last_id SubT = Tuple[List[Tuple[UserProfile, Stream]], List[Tuple[UserProfile, Stream]]] def bulk_add_subscriptions(streams: Iterable[Stream], users: Iterable[UserProfile], color_map: Mapping[str, str]={}, from_stream_creation: bool=False, acting_user: Optional[UserProfile]=None) -> SubT: users = list(users) recipients_map: Dict[int, int] = {stream.id: stream.recipient_id for stream in streams} recipient_ids: List[int] = [recipient_id for recipient_id in recipients_map.values()] stream_map: Dict[int, Stream] = {} for stream in streams: stream_map[recipients_map[stream.id]] = stream subs_by_user: Dict[int, List[Subscription]] = defaultdict(list) all_subs_query = get_stream_subscriptions_for_users(users).select_related('user_profile') for sub in all_subs_query: subs_by_user[sub.user_profile_id].append(sub) realm = users[0].realm already_subscribed: List[Tuple[UserProfile, Stream]] = [] subs_to_activate: List[Tuple[Subscription, Stream]] = [] new_subs: List[Tuple[UserProfile, int, Stream]] = [] for user_profile in users: needs_new_sub: Set[int] = set(recipient_ids) for sub in subs_by_user[user_profile.id]: if sub.recipient_id in needs_new_sub: needs_new_sub.remove(sub.recipient_id) if sub.active: already_subscribed.append((user_profile, stream_map[sub.recipient_id])) else: subs_to_activate.append((sub, stream_map[sub.recipient_id])) # Mark the sub as active, without saving, so that # pick_color will consider this to be an active # subscription when picking colors sub.active = True for recipient_id in needs_new_sub: new_subs.append((user_profile, recipient_id, stream_map[recipient_id])) subs_to_add: List[Tuple[Subscription, Stream]] = [] for (user_profile, recipient_id, stream) in new_subs: if stream.name in color_map: color = color_map[stream.name] else: color = pick_color(user_profile, subs_by_user[user_profile.id]) sub_to_add = Subscription(user_profile=user_profile, active=True, color=color, recipient_id=recipient_id) subs_by_user[user_profile.id].append(sub_to_add) subs_to_add.append((sub_to_add, stream)) # TODO: XXX: This transaction really needs to be done at the serializeable # transaction isolation level. with transaction.atomic(): occupied_streams_before = list(get_occupied_streams(realm)) Subscription.objects.bulk_create([sub for (sub, stream) in subs_to_add]) sub_ids = [sub.id for (sub, stream) in subs_to_activate] Subscription.objects.filter(id__in=sub_ids).update(active=True) occupied_streams_after = list(get_occupied_streams(realm)) # Log Subscription Activities in RealmAuditLog event_time = timezone_now() event_last_message_id = get_last_message_id() all_subscription_logs: (List[RealmAuditLog]) = [] for (sub, stream) in subs_to_add: all_subscription_logs.append(RealmAuditLog(realm=realm, acting_user=acting_user, modified_user=sub.user_profile, modified_stream=stream, event_last_message_id=event_last_message_id, event_type=RealmAuditLog.SUBSCRIPTION_CREATED, event_time=event_time)) for (sub, stream) in subs_to_activate: all_subscription_logs.append(RealmAuditLog(realm=realm, acting_user=acting_user, modified_user=sub.user_profile, modified_stream=stream, event_last_message_id=event_last_message_id, event_type=RealmAuditLog.SUBSCRIPTION_ACTIVATED, event_time=event_time)) # Now since we have all log objects generated we can do a bulk insert RealmAuditLog.objects.bulk_create(all_subscription_logs) new_occupied_streams = [stream for stream in set(occupied_streams_after) - set(occupied_streams_before) if not stream.invite_only] if new_occupied_streams and not from_stream_creation: event: Dict[str, object] = dict( type="stream", op="occupy", streams=[stream.to_dict() for stream in new_occupied_streams], ) send_event(realm, event, active_user_ids(realm.id)) # Notify all existing users on streams that users have joined # First, get all users subscribed to the streams that we care about # We fetch all subscription information upfront, as it's used throughout # the following code and we want to minize DB queries all_subscribers_by_stream = get_user_ids_for_streams(streams=streams) def fetch_stream_subscriber_user_ids(stream: Stream) -> List[int]: if stream.is_in_zephyr_realm and not stream.invite_only: return [] user_ids = all_subscribers_by_stream[stream.id] return user_ids sub_tuples_by_user: Dict[int, List[Tuple[Subscription, Stream]]] = defaultdict(list) new_streams: Set[Tuple[int, int]] = set() for (sub, stream) in subs_to_add + subs_to_activate: sub_tuples_by_user[sub.user_profile.id].append((sub, stream)) new_streams.add((sub.user_profile.id, stream.id)) # We now send several types of events to notify browsers. The # first batch is notifications to users on invite-only streams # that the stream exists. for stream in streams: if not stream.is_public(): # Users newly added to invite-only streams # need a `create` notification. The former, because # they need the stream to exist before # they get the "subscribe" notification, and the latter so # they can manage the new stream. # Realm admins already have all created private streams. realm_admin_ids = [user.id for user in realm.get_admin_users_and_bots()] new_users_ids = [user.id for user in users if (user.id, stream.id) in new_streams and user.id not in realm_admin_ids] send_stream_creation_event(stream, new_users_ids) stream_ids = {stream.id for stream in streams} recent_traffic = get_streams_traffic(stream_ids=stream_ids) # The second batch is events for the users themselves that they # were subscribed to the new streams. for user_profile in users: if len(sub_tuples_by_user[user_profile.id]) == 0: continue sub_pairs = sub_tuples_by_user[user_profile.id] notify_subscriptions_added(user_profile, sub_pairs, fetch_stream_subscriber_user_ids, recent_traffic) # The second batch is events for other users who are tracking the # subscribers lists of streams in their browser; everyone for # public streams and only existing subscribers for private streams. for stream in streams: if stream.is_in_zephyr_realm and not stream.invite_only: continue new_user_ids = [user.id for user in users if (user.id, stream.id) in new_streams] subscribed_user_ids = all_subscribers_by_stream[stream.id] peer_user_ids = get_peer_user_ids_for_stream_change( stream=stream, altered_user_ids=new_user_ids, subscribed_user_ids=subscribed_user_ids, ) if peer_user_ids: for new_user_id in new_user_ids: event = dict(type="subscription", op="peer_add", stream_id=stream.id, user_id=new_user_id) send_event(realm, event, peer_user_ids) return ([(user_profile, stream) for (user_profile, recipient_id, stream) in new_subs] + [(sub.user_profile, stream) for (sub, stream) in subs_to_activate], already_subscribed) def get_available_notification_sounds() -> List[str]: notification_sounds_path = static_path('audio/notification_sounds') available_notification_sounds = [] for file_name in os.listdir(notification_sounds_path): root, ext = os.path.splitext(file_name) if '.' in root: # nocoverage # Exclude e.g. zulip.abcd1234.ogg (generated by production hash-naming) # to avoid spurious duplicates. continue if ext == '.ogg': available_notification_sounds.append(root) return available_notification_sounds def notify_subscriptions_removed(user_profile: UserProfile, streams: Iterable[Stream], no_log: bool=False) -> None: if not no_log: log_event({'type': 'subscription_removed', 'user': user_profile.email, 'names': [stream.name for stream in streams], 'realm': user_profile.realm.string_id}) payload = [dict(name=stream.name, stream_id=stream.id) for stream in streams] event = dict(type="subscription", op="remove", subscriptions=payload) send_event(user_profile.realm, event, [user_profile.id]) SubAndRemovedT = Tuple[List[Tuple[UserProfile, Stream]], List[Tuple[UserProfile, Stream]]] def bulk_remove_subscriptions(users: Iterable[UserProfile], streams: Iterable[Stream], acting_client: Client, acting_user: Optional[UserProfile]=None) -> SubAndRemovedT: users = list(users) streams = list(streams) stream_dict = {stream.id: stream for stream in streams} existing_subs_by_user = get_bulk_stream_subscriber_info(users, stream_dict) def get_non_subscribed_tups() -> List[Tuple[UserProfile, Stream]]: stream_ids = {stream.id for stream in streams} not_subscribed: List[Tuple[UserProfile, Stream]] = [] for user_profile in users: user_sub_stream_info = existing_subs_by_user[user_profile.id] subscribed_stream_ids = { stream.id for (sub, stream) in user_sub_stream_info } not_subscribed_stream_ids = stream_ids - subscribed_stream_ids for stream_id in not_subscribed_stream_ids: stream = stream_dict[stream_id] not_subscribed.append((user_profile, stream)) return not_subscribed not_subscribed = get_non_subscribed_tups() subs_to_deactivate: List[Tuple[Subscription, Stream]] = [] sub_ids_to_deactivate: List[int] = [] # This loop just flattens out our data into big lists for # bulk operations. for tup_list in existing_subs_by_user.values(): for (sub, stream) in tup_list: subs_to_deactivate.append((sub, stream)) sub_ids_to_deactivate.append(sub.id) our_realm = users[0].realm # TODO: XXX: This transaction really needs to be done at the serializeable # transaction isolation level. with transaction.atomic(): occupied_streams_before = list(get_occupied_streams(our_realm)) Subscription.objects.filter( id__in=sub_ids_to_deactivate, ) .update(active=False) occupied_streams_after = list(get_occupied_streams(our_realm)) # Log Subscription Activities in RealmAuditLog event_time = timezone_now() event_last_message_id = get_last_message_id() all_subscription_logs: (List[RealmAuditLog]) = [] for (sub, stream) in subs_to_deactivate: all_subscription_logs.append(RealmAuditLog(realm=sub.user_profile.realm, acting_user=acting_user, modified_user=sub.user_profile, modified_stream=stream, event_last_message_id=event_last_message_id, event_type=RealmAuditLog.SUBSCRIPTION_DEACTIVATED, event_time=event_time)) # Now since we have all log objects generated we can do a bulk insert RealmAuditLog.objects.bulk_create(all_subscription_logs) altered_user_dict: Dict[int, List[UserProfile]] = defaultdict(list) streams_by_user: Dict[int, List[Stream]] = defaultdict(list) for (sub, stream) in subs_to_deactivate: streams_by_user[sub.user_profile_id].append(stream) altered_user_dict[stream.id].append(sub.user_profile) for user_profile in users: if len(streams_by_user[user_profile.id]) == 0: continue notify_subscriptions_removed(user_profile, streams_by_user[user_profile.id]) event = {'type': 'mark_stream_messages_as_read', 'client_id': acting_client.id, 'user_profile_id': user_profile.id, 'stream_ids': [stream.id for stream in streams]} queue_json_publish("deferred_work", event) all_subscribers_by_stream = get_user_ids_for_streams(streams=streams) def send_peer_remove_event(stream: Stream) -> None: if stream.is_in_zephyr_realm and not stream.invite_only: return altered_users = altered_user_dict[stream.id] altered_user_ids = [u.id for u in altered_users] subscribed_user_ids = all_subscribers_by_stream[stream.id] peer_user_ids = get_peer_user_ids_for_stream_change( stream=stream, altered_user_ids=altered_user_ids, subscribed_user_ids=subscribed_user_ids, ) if peer_user_ids: for removed_user in altered_users: event = dict(type="subscription", op="peer_remove", stream_id=stream.id, user_id=removed_user.id) send_event(our_realm, event, peer_user_ids) for stream in streams: send_peer_remove_event(stream=stream) new_vacant_streams = [stream for stream in set(occupied_streams_before) - set(occupied_streams_after)] new_vacant_private_streams = [stream for stream in new_vacant_streams if stream.invite_only] new_vacant_public_streams = [stream for stream in new_vacant_streams if not stream.invite_only] if new_vacant_public_streams: event = dict(type="stream", op="vacate", streams=[stream.to_dict() for stream in new_vacant_public_streams]) send_event(our_realm, event, active_user_ids(our_realm.id)) if new_vacant_private_streams: # Deactivate any newly-vacant private streams for stream in new_vacant_private_streams: do_deactivate_stream(stream, acting_user=acting_user) return ( [(sub.user_profile, stream) for (sub, stream) in subs_to_deactivate], not_subscribed, ) def log_subscription_property_change(user_email: str, stream_name: str, property: str, value: Any) -> None: event = {'type': 'subscription_property', 'property': property, 'user': user_email, 'stream_name': stream_name, 'value': value} log_event(event) def do_change_subscription_property(user_profile: UserProfile, sub: Subscription, stream: Stream, property_name: str, value: Any, ) -> None: database_property_name = property_name event_property_name = property_name database_value = value event_value = value # For this property, is_muted is used in the database, but # in_home_view in the API, since we haven't migrated the events # API to the new name yet. if property_name == "in_home_view": database_property_name = "is_muted" database_value = not value if property_name == "is_muted": event_property_name = "in_home_view" event_value = not value setattr(sub, database_property_name, database_value) sub.save(update_fields=[database_property_name]) log_subscription_property_change(user_profile.email, stream.name, database_property_name, database_value) event = dict(type="subscription", op="update", email=user_profile.email, property=event_property_name, value=event_value, stream_id=stream.id, name=stream.name) send_event(user_profile.realm, event, [user_profile.id]) def do_change_password(user_profile: UserProfile, password: str, commit: bool=True) -> None: user_profile.set_password(password) if commit: user_profile.save(update_fields=["password"]) event_time = timezone_now() RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=user_profile, modified_user=user_profile, event_type=RealmAuditLog.USER_PASSWORD_CHANGED, event_time=event_time) def do_change_full_name(user_profile: UserProfile, full_name: str, acting_user: Optional[UserProfile]) -> None: old_name = user_profile.full_name user_profile.full_name = full_name user_profile.save(update_fields=["full_name"]) event_time = timezone_now() RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=acting_user, modified_user=user_profile, event_type=RealmAuditLog.USER_FULL_NAME_CHANGED, event_time=event_time, extra_data=old_name) payload = dict(user_id=user_profile.id, full_name=user_profile.full_name) send_event(user_profile.realm, dict(type='realm_user', op='update', person=payload), active_user_ids(user_profile.realm_id)) if user_profile.is_bot: send_event(user_profile.realm, dict(type='realm_bot', op='update', bot=payload), bot_owner_user_ids(user_profile)) def check_change_full_name(user_profile: UserProfile, full_name_raw: str, acting_user: UserProfile) -> str: """Verifies that the user's proposed full name is valid. The caller is responsible for checking check permissions. Returns the new full name, which may differ from what was passed in (because this function strips whitespace).""" new_full_name = check_full_name(full_name_raw) do_change_full_name(user_profile, new_full_name, acting_user) return new_full_name def check_change_bot_full_name(user_profile: UserProfile, full_name_raw: str, acting_user: UserProfile) -> None: new_full_name = check_full_name(full_name_raw) if new_full_name == user_profile.full_name: # Our web app will try to patch full_name even if the user didn't # modify the name in the form. We just silently ignore those # situations. return check_bot_name_available( realm_id=user_profile.realm_id, full_name=new_full_name, ) do_change_full_name(user_profile, new_full_name, acting_user) def do_change_bot_owner(user_profile: UserProfile, bot_owner: UserProfile, acting_user: UserProfile) -> None: previous_owner = user_profile.bot_owner user_profile.bot_owner = bot_owner user_profile.save() # Can't use update_fields because of how the foreign key works. event_time = timezone_now() RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=acting_user, modified_user=user_profile, event_type=RealmAuditLog.USER_BOT_OWNER_CHANGED, event_time=event_time) update_users = bot_owner_user_ids(user_profile) # For admins, update event is sent instead of delete/add # event. bot_data of admin contains all the # bots and none of them should be removed/(added again). # Delete the bot from previous owner's bot data. if previous_owner and not previous_owner.is_realm_admin: send_event(user_profile.realm, dict(type='realm_bot', op="delete", bot=dict( user_id=user_profile.id, )), {previous_owner.id}) # Do not send update event for previous bot owner. update_users = update_users - {previous_owner.id} # Notify the new owner that the bot has been added. if not bot_owner.is_realm_admin: add_event = created_bot_event(user_profile) send_event(user_profile.realm, add_event, {bot_owner.id}) # Do not send update event for bot_owner. update_users = update_users - {bot_owner.id} send_event(user_profile.realm, dict(type='realm_bot', op='update', bot=dict(user_id=user_profile.id, owner_id=user_profile.bot_owner.id, )), update_users) # Since `bot_owner_id` is included in the user profile dict we need # to update the users dict with the new bot owner id event: Dict[str, Any] = dict( type="realm_user", op="update", person=dict( user_id=user_profile.id, bot_owner_id=user_profile.bot_owner.id, ), ) send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id)) def do_change_tos_version(user_profile: UserProfile, tos_version: str) -> None: user_profile.tos_version = tos_version user_profile.save(update_fields=["tos_version"]) event_time = timezone_now() RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=user_profile, modified_user=user_profile, event_type=RealmAuditLog.USER_TOS_VERSION_CHANGED, event_time=event_time) def do_regenerate_api_key(user_profile: UserProfile, acting_user: UserProfile) -> str: old_api_key = user_profile.api_key new_api_key = generate_api_key() user_profile.api_key = new_api_key user_profile.save(update_fields=["api_key"]) # We need to explicitly delete the old API key from our caches, # because the on-save handler for flushing the UserProfile object # in zerver/lib/cache.py only has access to the new API key. cache_delete(user_profile_by_api_key_cache_key(old_api_key)) event_time = timezone_now() RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=acting_user, modified_user=user_profile, event_type=RealmAuditLog.USER_API_KEY_CHANGED, event_time=event_time) if user_profile.is_bot: send_event(user_profile.realm, dict(type='realm_bot', op='update', bot=dict(user_id=user_profile.id, api_key=new_api_key, )), bot_owner_user_ids(user_profile)) event = {'type': 'clear_push_device_tokens', 'user_profile_id': user_profile.id} queue_json_publish("deferred_work", event) return new_api_key def notify_avatar_url_change(user_profile: UserProfile) -> None: if user_profile.is_bot: send_event(user_profile.realm, dict(type='realm_bot', op='update', bot=dict(user_id=user_profile.id, avatar_url=avatar_url(user_profile), )), bot_owner_user_ids(user_profile)) payload = dict( avatar_source=user_profile.avatar_source, avatar_url=avatar_url(user_profile), avatar_url_medium=avatar_url(user_profile, medium=True), avatar_version=user_profile.avatar_version, # Even clients using client_gravatar don't need the email, # since we're sending the URL anyway. user_id=user_profile.id, ) send_event(user_profile.realm, dict(type='realm_user', op='update', person=payload), active_user_ids(user_profile.realm_id)) def do_change_avatar_fields(user_profile: UserProfile, avatar_source: str, skip_notify: bool=False, acting_user: Optional[UserProfile]=None) -> None: user_profile.avatar_source = avatar_source user_profile.avatar_version += 1 user_profile.save(update_fields=["avatar_source", "avatar_version"]) event_time = timezone_now() RealmAuditLog.objects.create(realm=user_profile.realm, modified_user=user_profile, event_type=RealmAuditLog.USER_AVATAR_SOURCE_CHANGED, extra_data={'avatar_source': avatar_source}, event_time=event_time, acting_user=acting_user) if not skip_notify: notify_avatar_url_change(user_profile) def do_delete_avatar_image(user: UserProfile, acting_user: Optional[UserProfile]=None) -> None: do_change_avatar_fields(user, UserProfile.AVATAR_FROM_GRAVATAR, acting_user=acting_user) delete_avatar_image(user) def do_change_icon_source(realm: Realm, icon_source: str, log: bool=True) -> None: realm.icon_source = icon_source realm.icon_version += 1 realm.save(update_fields=["icon_source", "icon_version"]) if log: log_event({'type': 'realm_change_icon', 'realm': realm.string_id, 'icon_source': icon_source}) send_event(realm, dict(type='realm', op='update_dict', property="icon", data=dict(icon_source=realm.icon_source, icon_url=realm_icon_url(realm))), active_user_ids(realm.id)) def do_change_logo_source(realm: Realm, logo_source: str, night: bool, acting_user: Optional[UserProfile]=None) -> None: if not night: realm.logo_source = logo_source realm.logo_version += 1 realm.save(update_fields=["logo_source", "logo_version"]) else: realm.night_logo_source = logo_source realm.night_logo_version += 1 realm.save(update_fields=["night_logo_source", "night_logo_version"]) RealmAuditLog.objects.create(event_type=RealmAuditLog.REALM_LOGO_CHANGED, realm=realm, event_time=timezone_now(), acting_user=acting_user) event = dict(type='realm', op='update_dict', property="night_logo" if night else "logo", data=get_realm_logo_data(realm, night)) send_event(realm, event, active_user_ids(realm.id)) def do_change_plan_type(realm: Realm, plan_type: int) -> None: old_value = realm.plan_type realm.plan_type = plan_type realm.save(update_fields=['plan_type']) RealmAuditLog.objects.create(event_type=RealmAuditLog.REALM_PLAN_TYPE_CHANGED, realm=realm, event_time=timezone_now(), extra_data={'old_value': old_value, 'new_value': plan_type}) if plan_type == Realm.STANDARD: realm.max_invites = Realm.INVITES_STANDARD_REALM_DAILY_MAX realm.message_visibility_limit = None realm.upload_quota_gb = Realm.UPLOAD_QUOTA_STANDARD elif plan_type == Realm.SELF_HOSTED: realm.max_invites = None # type: ignore[assignment] # Apparent mypy bug with Optional[int] setter. realm.message_visibility_limit = None realm.upload_quota_gb = None elif plan_type == Realm.STANDARD_FREE: realm.max_invites = Realm.INVITES_STANDARD_REALM_DAILY_MAX realm.message_visibility_limit = None realm.upload_quota_gb = Realm.UPLOAD_QUOTA_STANDARD elif plan_type == Realm.LIMITED: realm.max_invites = settings.INVITES_DEFAULT_REALM_DAILY_MAX realm.message_visibility_limit = Realm.MESSAGE_VISIBILITY_LIMITED realm.upload_quota_gb = Realm.UPLOAD_QUOTA_LIMITED else: raise AssertionError("Invalid plan type") update_first_visible_message_id(realm) realm.save(update_fields=['_max_invites', 'message_visibility_limit', 'upload_quota_gb']) event = {'type': 'realm', 'op': 'update', 'property': 'plan_type', 'value': plan_type, 'extra_data': {'upload_quota': realm.upload_quota_bytes()}} send_event(realm, event, active_user_ids(realm.id)) def do_change_default_sending_stream(user_profile: UserProfile, stream: Optional[Stream], log: bool=True) -> None: user_profile.default_sending_stream = stream user_profile.save(update_fields=['default_sending_stream']) if log: log_event({'type': 'user_change_default_sending_stream', 'user': user_profile.email, 'stream': str(stream)}) if user_profile.is_bot: if stream: stream_name: Optional[str] = stream.name else: stream_name = None send_event(user_profile.realm, dict(type='realm_bot', op='update', bot=dict(user_id=user_profile.id, default_sending_stream=stream_name, )), bot_owner_user_ids(user_profile)) def do_change_default_events_register_stream(user_profile: UserProfile, stream: Optional[Stream], log: bool=True) -> None: user_profile.default_events_register_stream = stream user_profile.save(update_fields=['default_events_register_stream']) if log: log_event({'type': 'user_change_default_events_register_stream', 'user': user_profile.email, 'stream': str(stream)}) if user_profile.is_bot: if stream: stream_name: Optional[str] = stream.name else: stream_name = None send_event(user_profile.realm, dict(type='realm_bot', op='update', bot=dict(user_id=user_profile.id, default_events_register_stream=stream_name, )), bot_owner_user_ids(user_profile)) def do_change_default_all_public_streams(user_profile: UserProfile, value: bool, log: bool=True) -> None: user_profile.default_all_public_streams = value user_profile.save(update_fields=['default_all_public_streams']) if log: log_event({'type': 'user_change_default_all_public_streams', 'user': user_profile.email, 'value': str(value)}) if user_profile.is_bot: send_event(user_profile.realm, dict(type='realm_bot', op='update', bot=dict(user_id=user_profile.id, default_all_public_streams=user_profile.default_all_public_streams, )), bot_owner_user_ids(user_profile)) def do_change_user_role(user_profile: UserProfile, value: int, acting_user: Optional[UserProfile]=None) -> None: old_value = user_profile.role user_profile.role = value user_profile.save(update_fields=["role"]) RealmAuditLog.objects.create( realm=user_profile.realm, modified_user=user_profile, acting_user=acting_user, event_type=RealmAuditLog.USER_ROLE_CHANGED, event_time=timezone_now(), extra_data=ujson.dumps({ RealmAuditLog.OLD_VALUE: old_value, RealmAuditLog.NEW_VALUE: value, RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm), })) event = dict(type="realm_user", op="update", person=dict(user_id=user_profile.id, role=user_profile.role)) send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id)) def do_change_is_api_super_user(user_profile: UserProfile, value: bool) -> None: user_profile.is_api_super_user = value user_profile.save(update_fields=["is_api_super_user"]) def do_change_stream_invite_only(stream: Stream, invite_only: bool, history_public_to_subscribers: Optional[bool]=None) -> None: history_public_to_subscribers = get_default_value_for_history_public_to_subscribers( stream.realm, invite_only, history_public_to_subscribers, ) stream.invite_only = invite_only stream.history_public_to_subscribers = history_public_to_subscribers stream.save(update_fields=['invite_only', 'history_public_to_subscribers']) event = dict( op="update", type="stream", property="invite_only", value=invite_only, history_public_to_subscribers=history_public_to_subscribers, stream_id=stream.id, name=stream.name, ) send_event(stream.realm, event, can_access_stream_user_ids(stream)) def do_change_stream_web_public(stream: Stream, is_web_public: bool) -> None: stream.is_web_public = is_web_public stream.save(update_fields=['is_web_public']) def do_change_stream_post_policy(stream: Stream, stream_post_policy: int) -> None: stream.stream_post_policy = stream_post_policy stream.save(update_fields=['stream_post_policy']) event = dict( op="update", type="stream", property="stream_post_policy", value=stream_post_policy, stream_id=stream.id, name=stream.name, ) send_event(stream.realm, event, can_access_stream_user_ids(stream)) # Backwards-compatibility code: We removed the # is_announcement_only property in early 2020, but we send a # duplicate event for legacy mobile clients that might want the # data. event = dict( op="update", type="stream", property="is_announcement_only", value=stream.stream_post_policy == Stream.STREAM_POST_POLICY_ADMINS, stream_id=stream.id, name=stream.name, ) send_event(stream.realm, event, can_access_stream_user_ids(stream)) def do_rename_stream(stream: Stream, new_name: str, user_profile: UserProfile, log: bool=True) -> Dict[str, str]: old_name = stream.name stream.name = new_name stream.save(update_fields=["name"]) if log: log_event({'type': 'stream_name_change', 'realm': stream.realm.string_id, 'new_name': new_name}) recipient_id = stream.recipient_id messages = Message.objects.filter(recipient_id=recipient_id).only("id") # Update the display recipient and stream, which are easy single # items to set. old_cache_key = get_stream_cache_key(old_name, stream.realm_id) new_cache_key = get_stream_cache_key(stream.name, stream.realm_id) if old_cache_key != new_cache_key: cache_delete(old_cache_key) cache_set(new_cache_key, stream) cache_set(display_recipient_cache_key(recipient_id), stream.name) # Delete cache entries for everything else, which is cheaper and # clearer than trying to set them. display_recipient is the out of # date field in all cases. cache_delete_many( to_dict_cache_key_id(message.id) for message in messages) new_email = encode_email_address(stream, show_sender=True) # We will tell our users to essentially # update stream.name = new_name where name = old_name # and update stream.email = new_email where name = old_name. # We could optimize this by trying to send one message, but the # client code really wants one property update at a time, and # updating stream names is a pretty infrequent operation. # More importantly, we want to key these updates by id, not name, # since id is the immutable primary key, and obviously name is not. data_updates = [ ['email_address', new_email], ['name', new_name], ] for property, value in data_updates: event = dict( op="update", type="stream", property=property, value=value, stream_id=stream.id, name=old_name, ) send_event(stream.realm, event, can_access_stream_user_ids(stream)) sender = get_system_bot(settings.NOTIFICATION_BOT) with override_language(stream.realm.default_language): internal_send_stream_message( stream.realm, sender, stream, Realm.STREAM_EVENTS_NOTIFICATION_TOPIC, _('{user_name} renamed stream {old_stream_name} to {new_stream_name}.').format( user_name=f"@_**{user_profile.full_name}|{user_profile.id}**", old_stream_name=f"**{old_name}**", new_stream_name=f"**{new_name}**", ), ) # Even though the token doesn't change, the web client needs to update the # email forwarding address to display the correctly-escaped new name. return {"email_address": new_email} def do_change_stream_description(stream: Stream, new_description: str) -> None: stream.description = new_description stream.rendered_description = render_stream_description(new_description) stream.save(update_fields=['description', 'rendered_description']) event = dict( type='stream', op='update', property='description', name=stream.name, stream_id=stream.id, value=new_description, rendered_description=stream.rendered_description, ) send_event(stream.realm, event, can_access_stream_user_ids(stream)) def do_change_stream_message_retention_days(stream: Stream, message_retention_days: Optional[int]=None) -> None: stream.message_retention_days = message_retention_days stream.save(update_fields=['message_retention_days']) event = dict( op="update", type="stream", property="message_retention_days", value=message_retention_days, stream_id=stream.id, name=stream.name, ) send_event(stream.realm, event, can_access_stream_user_ids(stream)) def do_create_realm(string_id: str, name: str, emails_restricted_to_domains: Optional[bool]=None) -> Realm: if Realm.objects.filter(string_id=string_id).exists(): raise AssertionError(f"Realm {string_id} already exists!") if not server_initialized(): logging.info("Server not yet initialized. Creating the internal realm first.") create_internal_realm() kwargs: Dict[str, Any] = {} if emails_restricted_to_domains is not None: kwargs['emails_restricted_to_domains'] = emails_restricted_to_domains realm = Realm(string_id=string_id, name=name, **kwargs) realm.save() # Create stream once Realm object has been saved notifications_stream = ensure_stream( realm, Realm.DEFAULT_NOTIFICATION_STREAM_NAME, stream_description="Everyone is added to this stream by default. Welcome! :octopus:", acting_user=None) realm.notifications_stream = notifications_stream # With the current initial streams situation, the only public # stream is the notifications_stream. DefaultStream.objects.create(stream=notifications_stream, realm=realm) signup_notifications_stream = ensure_stream( realm, Realm.INITIAL_PRIVATE_STREAM_NAME, invite_only=True, stream_description="A private stream for core team members.", acting_user=None) realm.signup_notifications_stream = signup_notifications_stream realm.save(update_fields=['notifications_stream', 'signup_notifications_stream']) if settings.BILLING_ENABLED: do_change_plan_type(realm, Realm.LIMITED) # Log the event log_event({"type": "realm_created", "string_id": string_id, "emails_restricted_to_domains": emails_restricted_to_domains}) sender = get_system_bot(settings.NOTIFICATION_BOT) admin_realm = sender.realm # Send a notification to the admin realm with override_language(admin_realm.default_language): signup_message = _("Signups enabled") try: signups_stream = get_signups_stream(admin_realm) topic = realm.display_subdomain internal_send_stream_message( admin_realm, sender, signups_stream, topic, signup_message, ) except Stream.DoesNotExist: # nocoverage # If the signups stream hasn't been created in the admin # realm, don't auto-create it to send to it; just do nothing. pass return realm def do_change_notification_settings(user_profile: UserProfile, name: str, value: Union[bool, int, str], log: bool=True) -> None: """Takes in a UserProfile object, the name of a global notification preference to update, and the value to update to """ notification_setting_type = UserProfile.notification_setting_types[name] assert isinstance(value, notification_setting_type), ( f'Cannot update {name}: {value} is not an instance of {notification_setting_type}') setattr(user_profile, name, value) # Disabling digest emails should clear a user's email queue if name == 'enable_digest_emails' and not value: clear_scheduled_emails([user_profile.id], ScheduledEmail.DIGEST) user_profile.save(update_fields=[name]) event = {'type': 'update_global_notifications', 'user': user_profile.email, 'notification_name': name, 'setting': value} if log: log_event(event) send_event(user_profile.realm, event, [user_profile.id]) def do_change_enter_sends(user_profile: UserProfile, enter_sends: bool) -> None: user_profile.enter_sends = enter_sends user_profile.save(update_fields=["enter_sends"]) def do_set_user_display_setting(user_profile: UserProfile, setting_name: str, setting_value: Union[bool, str, int]) -> None: property_type = UserProfile.property_types[setting_name] assert isinstance(setting_value, property_type) setattr(user_profile, setting_name, setting_value) user_profile.save(update_fields=[setting_name]) event = {'type': 'update_display_settings', 'user': user_profile.email, 'setting_name': setting_name, 'setting': setting_value} if setting_name == "default_language": assert isinstance(setting_value, str) event['language_name'] = get_language_name(setting_value) send_event(user_profile.realm, event, [user_profile.id]) # Updates to the timezone display setting are sent to all users if setting_name == "timezone": payload = dict(email=user_profile.email, user_id=user_profile.id, timezone=user_profile.timezone) send_event(user_profile.realm, dict(type='realm_user', op='update', person=payload), active_user_ids(user_profile.realm_id)) def lookup_default_stream_groups(default_stream_group_names: List[str], realm: Realm) -> List[DefaultStreamGroup]: default_stream_groups = [] for group_name in default_stream_group_names: try: default_stream_group = DefaultStreamGroup.objects.get( name=group_name, realm=realm) except DefaultStreamGroup.DoesNotExist: raise JsonableError(_('Invalid default stream group {}').format(group_name)) default_stream_groups.append(default_stream_group) return default_stream_groups def notify_default_streams(realm: Realm) -> None: event = dict( type="default_streams", default_streams=streams_to_dicts_sorted(get_default_streams_for_realm(realm.id)), ) send_event(realm, event, active_non_guest_user_ids(realm.id)) def notify_default_stream_groups(realm: Realm) -> None: event = dict( type="default_stream_groups", default_stream_groups=default_stream_groups_to_dicts_sorted(get_default_stream_groups(realm)), ) send_event(realm, event, active_non_guest_user_ids(realm.id)) def do_add_default_stream(stream: Stream) -> None: realm_id = stream.realm_id stream_id = stream.id if not DefaultStream.objects.filter(realm_id=realm_id, stream_id=stream_id).exists(): DefaultStream.objects.create(realm_id=realm_id, stream_id=stream_id) notify_default_streams(stream.realm) def do_remove_default_stream(stream: Stream) -> None: realm_id = stream.realm_id stream_id = stream.id DefaultStream.objects.filter(realm_id=realm_id, stream_id=stream_id).delete() notify_default_streams(stream.realm) def do_create_default_stream_group(realm: Realm, group_name: str, description: str, streams: List[Stream]) -> None: default_streams = get_default_streams_for_realm(realm.id) for stream in streams: if stream in default_streams: raise JsonableError(_( "'{stream_name}' is a default stream and cannot be added to '{group_name}'", ).format(stream_name=stream.name, group_name=group_name)) check_default_stream_group_name(group_name) (group, created) = DefaultStreamGroup.objects.get_or_create( name=group_name, realm=realm, description=description) if not created: raise JsonableError(_( "Default stream group '{group_name}' already exists", ).format(group_name=group_name)) group.streams.set(streams) notify_default_stream_groups(realm) def do_add_streams_to_default_stream_group(realm: Realm, group: DefaultStreamGroup, streams: List[Stream]) -> None: default_streams = get_default_streams_for_realm(realm.id) for stream in streams: if stream in default_streams: raise JsonableError(_( "'{stream_name}' is a default stream and cannot be added to '{group_name}'", ).format(stream_name=stream.name, group_name=group.name)) if stream in group.streams.all(): raise JsonableError(_( "Stream '{stream_name}' is already present in default stream group '{group_name}'", ).format(stream_name=stream.name, group_name=group.name)) group.streams.add(stream) group.save() notify_default_stream_groups(realm) def do_remove_streams_from_default_stream_group(realm: Realm, group: DefaultStreamGroup, streams: List[Stream]) -> None: for stream in streams: if stream not in group.streams.all(): raise JsonableError(_( "Stream '{stream_name}' is not present in default stream group '{group_name}'", ).format(stream_name=stream.name, group_name=group.name)) group.streams.remove(stream) group.save() notify_default_stream_groups(realm) def do_change_default_stream_group_name(realm: Realm, group: DefaultStreamGroup, new_group_name: str) -> None: if group.name == new_group_name: raise JsonableError(_("This default stream group is already named '{}'").format(new_group_name)) if DefaultStreamGroup.objects.filter(name=new_group_name, realm=realm).exists(): raise JsonableError(_("Default stream group '{}' already exists").format(new_group_name)) group.name = new_group_name group.save() notify_default_stream_groups(realm) def do_change_default_stream_group_description(realm: Realm, group: DefaultStreamGroup, new_description: str) -> None: group.description = new_description group.save() notify_default_stream_groups(realm) def do_remove_default_stream_group(realm: Realm, group: DefaultStreamGroup) -> None: group.delete() notify_default_stream_groups(realm) def get_default_streams_for_realm(realm_id: int) -> List[Stream]: return [default.stream for default in DefaultStream.objects.select_related().filter(realm_id=realm_id)] def get_default_subs(user_profile: UserProfile) -> List[Stream]: # Right now default streams are realm-wide. This wrapper gives us flexibility # to some day further customize how we set up default streams for new users. return get_default_streams_for_realm(user_profile.realm_id) # returns default streams in json serializeable format def streams_to_dicts_sorted(streams: List[Stream]) -> List[Dict[str, Any]]: return sorted([stream.to_dict() for stream in streams], key=lambda elt: elt["name"]) def default_stream_groups_to_dicts_sorted(groups: List[DefaultStreamGroup]) -> List[Dict[str, Any]]: return sorted([group.to_dict() for group in groups], key=lambda elt: elt["name"]) def do_update_user_activity_interval(user_profile: UserProfile, log_time: datetime.datetime) -> None: effective_end = log_time + UserActivityInterval.MIN_INTERVAL_LENGTH # This code isn't perfect, because with various races we might end # up creating two overlapping intervals, but that shouldn't happen # often, and can be corrected for in post-processing try: last = UserActivityInterval.objects.filter(user_profile=user_profile).order_by("-end")[0] # There are two ways our intervals could overlap: # (1) The start of the new interval could be inside the old interval # (2) The end of the new interval could be inside the old interval # In either case, we just extend the old interval to include the new interval. if ((log_time <= last.end and log_time >= last.start) or (effective_end <= last.end and effective_end >= last.start)): last.end = max(last.end, effective_end) last.start = min(last.start, log_time) last.save(update_fields=["start", "end"]) return except IndexError: pass # Otherwise, the intervals don't overlap, so we should make a new one UserActivityInterval.objects.create(user_profile=user_profile, start=log_time, end=effective_end) @statsd_increment('user_activity') def do_update_user_activity(user_profile_id: int, client_id: int, query: str, count: int, log_time: datetime.datetime) -> None: (activity, created) = UserActivity.objects.get_or_create( user_profile_id = user_profile_id, client_id = client_id, query = query, defaults={'last_visit': log_time, 'count': count}) if not created: activity.count += count activity.last_visit = log_time activity.save(update_fields=["last_visit", "count"]) def send_presence_changed(user_profile: UserProfile, presence: UserPresence) -> None: presence_dict = presence.to_dict() event = dict(type="presence", email=user_profile.email, user_id=user_profile.id, server_timestamp=time.time(), presence={presence_dict['client']: presence_dict}) send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id)) def consolidate_client(client: Client) -> Client: # The web app reports a client as 'website' # The desktop app reports a client as ZulipDesktop # due to it setting a custom user agent. We want both # to count as web users # Alias ZulipDesktop to website if client.name in ['ZulipDesktop']: return get_client('website') else: return client @statsd_increment('user_presence') def do_update_user_presence(user_profile: UserProfile, client: Client, log_time: datetime.datetime, status: int) -> None: client = consolidate_client(client) defaults = dict( timestamp=log_time, status=status, realm_id=user_profile.realm_id, ) (presence, created) = UserPresence.objects.get_or_create( user_profile = user_profile, client = client, defaults = defaults, ) stale_status = (log_time - presence.timestamp) > datetime.timedelta(minutes=1, seconds=10) was_idle = presence.status == UserPresence.IDLE became_online = (status == UserPresence.ACTIVE) and (stale_status or was_idle) # If an object was created, it has already been saved. # # We suppress changes from ACTIVE to IDLE before stale_status is reached; # this protects us from the user having two clients open: one active, the # other idle. Without this check, we would constantly toggle their status # between the two states. if not created and stale_status or was_idle or status == presence.status: # The following block attempts to only update the "status" # field in the event that it actually changed. This is # important to avoid flushing the UserPresence cache when the # data it would return to a client hasn't actually changed # (see the UserPresence post_save hook for details). presence.timestamp = log_time update_fields = ["timestamp"] if presence.status != status: presence.status = status update_fields.append("status") presence.save(update_fields=update_fields) if not user_profile.realm.presence_disabled and (created or became_online): # Push event to all users in the realm so they see the new user # appear in the presence list immediately, or the newly online # user without delay. Note that we won't send an update here for a # timestamp update, because we rely on the browser to ping us every 50 # seconds for realm-wide status updates, and those updates should have # recent timestamps, which means the browser won't think active users # have gone idle. If we were more aggressive in this function about # sending timestamp updates, we could eliminate the ping responses, but # that's not a high priority for now, considering that most of our non-MIT # realms are pretty small. send_presence_changed(user_profile, presence) def update_user_activity_interval(user_profile: UserProfile, log_time: datetime.datetime) -> None: event = {'user_profile_id': user_profile.id, 'time': datetime_to_timestamp(log_time)} queue_json_publish("user_activity_interval", event) def update_user_presence(user_profile: UserProfile, client: Client, log_time: datetime.datetime, status: int, new_user_input: bool) -> None: event = {'user_profile_id': user_profile.id, 'status': status, 'time': datetime_to_timestamp(log_time), 'client': client.name} queue_json_publish("user_presence", event) if new_user_input: update_user_activity_interval(user_profile, log_time) def do_update_user_status(user_profile: UserProfile, away: Optional[bool], status_text: Optional[str], client_id: int) -> None: if away: status = UserStatus.AWAY else: status = UserStatus.NORMAL realm = user_profile.realm update_user_status( user_profile_id=user_profile.id, status=status, status_text=status_text, client_id=client_id, ) event = dict( type='user_status', user_id=user_profile.id, ) if away is not None: event['away'] = away if status_text is not None: event['status_text'] = status_text send_event(realm, event, active_user_ids(realm.id)) def do_mark_all_as_read(user_profile: UserProfile, client: Client) -> int: log_statsd_event('bankruptcy') # First, we clear mobile push notifications. This is safer in the # event that the below logic times out and we're killed. all_push_message_ids = UserMessage.objects.filter( user_profile=user_profile, ).extra( where=[UserMessage.where_active_push_notification()], ).values_list("message_id", flat=True)[0:10000] do_clear_mobile_push_notifications_for_ids([user_profile.id], all_push_message_ids) msgs = UserMessage.objects.filter( user_profile=user_profile, ).extra( where=[UserMessage.where_unread()], ) count = msgs.update( flags=F('flags').bitor(UserMessage.flags.read), ) event = dict( type='update_message_flags', operation='add', flag='read', messages=[], # we don't send messages, since the client reloads anyway all=True, ) event_time = timezone_now() send_event(user_profile.realm, event, [user_profile.id]) do_increment_logging_stat(user_profile, COUNT_STATS['messages_read::hour'], None, event_time, increment=count) do_increment_logging_stat(user_profile, COUNT_STATS['messages_read_interactions::hour'], None, event_time, increment=min(1, count)) return count def do_mark_stream_messages_as_read(user_profile: UserProfile, client: Client, stream: Stream, topic_name: Optional[str]=None) -> int: log_statsd_event('mark_stream_as_read') msgs = UserMessage.objects.filter( user_profile=user_profile, ) recipient = stream.recipient msgs = msgs.filter(message__recipient=recipient) if topic_name: msgs = filter_by_topic_name_via_message( query=msgs, topic_name=topic_name, ) msgs = msgs.extra( where=[UserMessage.where_unread()], ) message_ids = list(msgs.values_list('message__id', flat=True)) count = msgs.update( flags=F('flags').bitor(UserMessage.flags.read), ) event = dict( type='update_message_flags', operation='add', flag='read', messages=message_ids, all=False, ) event_time = timezone_now() send_event(user_profile.realm, event, [user_profile.id]) do_clear_mobile_push_notifications_for_ids([user_profile.id], message_ids) do_increment_logging_stat(user_profile, COUNT_STATS['messages_read::hour'], None, event_time, increment=count) do_increment_logging_stat(user_profile, COUNT_STATS['messages_read_interactions::hour'], None, event_time, increment=min(1, count)) return count def do_update_mobile_push_notification(message: Message, prior_mention_user_ids: Set[int], stream_push_user_ids: Set[int]) -> None: # Called during the message edit code path to remove mobile push # notifications for users who are no longer mentioned following # the edit. See #15428 for details. # # A perfect implementation would also support updating the message # in a sent notification if a message was edited to mention a # group rather than a user (or vise versa), though it is likely # not worth the effort to do such a change. if not message.is_stream_message(): return remove_notify_users = prior_mention_user_ids - message.mentions_user_ids - stream_push_user_ids do_clear_mobile_push_notifications_for_ids(list(remove_notify_users), [message.id]) def do_clear_mobile_push_notifications_for_ids(user_profile_ids: List[int], message_ids: List[int]) -> None: if len(message_ids) == 0: return # This function supports clearing notifications for several users # only for the message-edit use case where we'll have a single message_id. assert len(user_profile_ids) == 1 or len(message_ids) == 1 messages_by_user = defaultdict(list) notifications_to_update = list(UserMessage.objects.filter( message_id__in=message_ids, user_profile_id__in=user_profile_ids, ).extra( where=[UserMessage.where_active_push_notification()], ).values_list('user_profile_id', 'message_id')) for (user_id, message_id) in notifications_to_update: messages_by_user[user_id].append(message_id) for (user_profile_id, event_message_ids) in messages_by_user.items(): queue_json_publish("missedmessage_mobile_notifications", { "type": "remove", "user_profile_id": user_profile_id, "message_ids": event_message_ids, }) def do_update_message_flags(user_profile: UserProfile, client: Client, operation: str, flag: str, messages: List[int]) -> int: valid_flags = [item for item in UserMessage.flags if item not in UserMessage.NON_API_FLAGS] if flag not in valid_flags: raise JsonableError(_("Invalid flag: '{}'").format(flag)) if flag in UserMessage.NON_EDITABLE_FLAGS: raise JsonableError(_("Flag not editable: '{}'").format(flag)) flagattr = getattr(UserMessage.flags, flag) msgs = UserMessage.objects.filter(user_profile=user_profile, message__id__in=messages) # This next block allows you to star any message, even those you # didn't receive (e.g. because you're looking at a public stream # you're not subscribed to, etc.). The problem is that starring # is a flag boolean on UserMessage, and UserMessage rows are # normally created only when you receive a message to support # searching your personal history. So we need to create one. We # add UserMessage.flags.historical, so that features that need # "messages you actually received" can exclude these UserMessages. if msgs.count() == 0: if not len(messages) == 1: raise JsonableError(_("Invalid message(s)")) if flag != "starred": raise JsonableError(_("Invalid message(s)")) # Validate that the user could have read the relevant message message = access_message(user_profile, messages[0])[0] # OK, this is a message that you legitimately have access # to via narrowing to the stream it is on, even though you # didn't actually receive it. So we create a historical, # read UserMessage message row for you to star. UserMessage.objects.create(user_profile=user_profile, message=message, flags=UserMessage.flags.historical | UserMessage.flags.read) if operation == 'add': count = msgs.update(flags=F('flags').bitor(flagattr)) elif operation == 'remove': count = msgs.update(flags=F('flags').bitand(~flagattr)) else: raise AssertionError("Invalid message flags operation") event = {'type': 'update_message_flags', 'operation': operation, 'flag': flag, 'messages': messages, 'all': False} send_event(user_profile.realm, event, [user_profile.id]) if flag == "read" and operation == "add": event_time = timezone_now() do_clear_mobile_push_notifications_for_ids([user_profile.id], messages) do_increment_logging_stat(user_profile, COUNT_STATS['messages_read::hour'], None, event_time, increment=count) do_increment_logging_stat(user_profile, COUNT_STATS['messages_read_interactions::hour'], None, event_time, increment=min(1, count)) return count class MessageUpdateUserInfoResult(TypedDict): message_user_ids: Set[int] mention_user_ids: Set[int] def notify_topic_moved_streams(user_profile: UserProfile, old_stream: Stream, old_topic: str, new_stream: Stream, new_topic: Optional[str], send_notification_to_old_thread: bool, send_notification_to_new_thread: bool) -> None: # Since moving content between streams is highly disruptive, # it's worth adding a couple tombstone messages showing what # happened. sender = get_system_bot(settings.NOTIFICATION_BOT) if new_topic is None: new_topic = old_topic user_mention = f"@_**{user_profile.full_name}|{user_profile.id}**" old_topic_link = f"#**{old_stream.name}>{old_topic}**" new_topic_link = f"#**{new_stream.name}>{new_topic}**" if send_notification_to_new_thread: with override_language(new_stream.realm.default_language): internal_send_stream_message( new_stream.realm, sender, new_stream, new_topic, _("This topic was moved here from {old_location} by {user}").format( old_location=old_topic_link, user=user_mention, ), ) if send_notification_to_old_thread: with override_language(old_stream.realm.default_language): # Send a notification to the old stream that the topic was moved. internal_send_stream_message( old_stream.realm, sender, old_stream, old_topic, _("This topic was moved by {user} to {new_location}").format( user=user_mention, new_location=new_topic_link, ), ) def get_user_info_for_message_updates(message_id: int) -> MessageUpdateUserInfoResult: # We exclude UserMessage.flags.historical rows since those # users did not receive the message originally, and thus # probably are not relevant for reprocessed alert_words, # mentions and similar rendering features. This may be a # decision we change in the future. query = UserMessage.objects.filter( message=message_id, flags=~UserMessage.flags.historical, ).values('user_profile_id', 'flags') rows = list(query) message_user_ids = { row['user_profile_id'] for row in rows } mask = UserMessage.flags.mentioned | UserMessage.flags.wildcard_mentioned mention_user_ids = { row['user_profile_id'] for row in rows if int(row['flags']) & mask } return dict( message_user_ids=message_user_ids, mention_user_ids=mention_user_ids, ) def update_user_message_flags(message: Message, ums: Iterable[UserMessage]) -> None: wildcard = message.mentions_wildcard mentioned_ids = message.mentions_user_ids ids_with_alert_words = message.user_ids_with_alert_words changed_ums: Set[UserMessage] = set() def update_flag(um: UserMessage, should_set: bool, flag: int) -> None: if should_set: if not (um.flags & flag): um.flags |= flag changed_ums.add(um) else: if (um.flags & flag): um.flags &= ~flag changed_ums.add(um) for um in ums: has_alert_word = um.user_profile_id in ids_with_alert_words update_flag(um, has_alert_word, UserMessage.flags.has_alert_word) mentioned = um.user_profile_id in mentioned_ids update_flag(um, mentioned, UserMessage.flags.mentioned) update_flag(um, wildcard, UserMessage.flags.wildcard_mentioned) for um in changed_ums: um.save(update_fields=['flags']) def update_to_dict_cache(changed_messages: List[Message], realm_id: Optional[int]=None) -> List[int]: """Updates the message as stored in the to_dict cache (for serving messages).""" items_for_remote_cache = {} message_ids = [] changed_messages_to_dict = MessageDict.to_dict_uncached(changed_messages, realm_id) for msg_id, msg in changed_messages_to_dict.items(): message_ids.append(msg_id) key = to_dict_cache_key_id(msg_id) items_for_remote_cache[key] = (msg,) cache_set_many(items_for_remote_cache) return message_ids # We use transaction.atomic to support select_for_update in the attachment codepath. @transaction.atomic def do_update_embedded_data(user_profile: UserProfile, message: Message, content: Optional[str], rendered_content: Optional[str]) -> None: event: Dict[str, Any] = { 'type': 'update_message', 'sender': user_profile.email, 'message_id': message.id} changed_messages = [message] ums = UserMessage.objects.filter(message=message.id) if content is not None: update_user_message_flags(message, ums) message.content = content message.rendered_content = rendered_content message.rendered_content_version = markdown_version event["content"] = content event["rendered_content"] = rendered_content message.save(update_fields=["content", "rendered_content"]) event['message_ids'] = update_to_dict_cache(changed_messages) def user_info(um: UserMessage) -> Dict[str, Any]: return { 'id': um.user_profile_id, 'flags': um.flags_list(), } send_event(user_profile.realm, event, list(map(user_info, ums))) class DeleteMessagesEvent(TypedDict, total=False): type: str message_ids: List[int] message_type: str sender_id: int recipient_id: int topic: str stream_id: int # We use transaction.atomic to support select_for_update in the attachment codepath. @transaction.atomic def do_update_message(user_profile: UserProfile, message: Message, new_stream: Optional[Stream], topic_name: Optional[str], propagate_mode: str, send_notification_to_old_thread: bool, send_notification_to_new_thread: bool, content: Optional[str], rendered_content: Optional[str], prior_mention_user_ids: Set[int], mention_user_ids: Set[int], mention_data: Optional[MentionData]=None) -> int: """ The main function for message editing. A message edit event can modify: * the message's content (in which case the caller will have set both content and rendered_content), * the topic, in which case the caller will have set topic_name * or both With topic edits, propagate_mode determines whether other message also have their topics edited. """ timestamp = timezone_now() message.last_edit_time = timestamp event: Dict[str, Any] = { 'type': 'update_message', 'user_id': user_profile.id, 'edit_timestamp': datetime_to_timestamp(timestamp), 'message_id': message.id, } edit_history_event: Dict[str, Any] = { 'user_id': user_profile.id, 'timestamp': event['edit_timestamp'], } changed_messages = [message] stream_being_edited = None if message.is_stream_message(): stream_id = message.recipient.type_id stream_being_edited = get_stream_by_id_in_realm(stream_id, user_profile.realm) event['stream_name'] = stream_being_edited.name ums = UserMessage.objects.filter(message=message.id) if content is not None: assert rendered_content is not None # mention_data is required if there's a content edit. assert mention_data is not None # add data from group mentions to mentions_user_ids. for group_id in message.mentions_user_group_ids: members = mention_data.get_group_members(group_id) message.mentions_user_ids.update(members) update_user_message_flags(message, ums) # One could imagine checking realm.allow_edit_history here and # modifying the events based on that setting, but doing so # doesn't really make sense. We need to send the edit event # to clients regardless, and a client already had access to # the original/pre-edit content of the message anyway. That # setting must be enforced on the client side, and making a # change here simply complicates the logic for clients parsing # edit history events. event['orig_content'] = message.content event['orig_rendered_content'] = message.rendered_content edit_history_event["prev_content"] = message.content edit_history_event["prev_rendered_content"] = message.rendered_content edit_history_event["prev_rendered_content_version"] = message.rendered_content_version message.content = content message.rendered_content = rendered_content message.rendered_content_version = markdown_version event["content"] = content event["rendered_content"] = rendered_content event['prev_rendered_content_version'] = message.rendered_content_version event['is_me_message'] = Message.is_status_message(content, rendered_content) # message.has_image and message.has_link will have been # already updated by markdown rendering in the caller. message.has_attachment = check_attachment_reference_change(message) if message.is_stream_message(): if topic_name is not None: new_topic_name = topic_name else: new_topic_name = message.topic_name() stream_topic: Optional[StreamTopicTarget] = StreamTopicTarget( stream_id=stream_id, topic_name=new_topic_name, ) else: stream_topic = None info = get_recipient_info( recipient=message.recipient, sender_id=message.sender_id, stream_topic=stream_topic, possible_wildcard_mention=mention_data.message_has_wildcards(), ) event['push_notify_user_ids'] = list(info['push_notify_user_ids']) event['stream_push_user_ids'] = list(info['stream_push_user_ids']) event['stream_email_user_ids'] = list(info['stream_email_user_ids']) event['prior_mention_user_ids'] = list(prior_mention_user_ids) event['mention_user_ids'] = list(mention_user_ids) event['presence_idle_user_ids'] = filter_presence_idle_user_ids(info['active_user_ids']) if message.mentions_wildcard: event['wildcard_mention_user_ids'] = list(info['wildcard_mention_user_ids']) else: event['wildcard_mention_user_ids'] = [] do_update_mobile_push_notification(message, prior_mention_user_ids, info['stream_push_user_ids']) if topic_name is not None or new_stream is not None: orig_topic_name = message.topic_name() event["propagate_mode"] = propagate_mode event["stream_id"] = message.recipient.type_id if new_stream is not None: assert content is None assert message.is_stream_message() assert stream_being_edited is not None edit_history_event['prev_stream'] = stream_being_edited.id event[ORIG_TOPIC] = orig_topic_name message.recipient_id = new_stream.recipient_id event["new_stream_id"] = new_stream.id event["propagate_mode"] = propagate_mode # When messages are moved from one stream to another, some # users may lose access to those messages, including guest # users and users not subscribed to the new stream (if it is a # private stream). For those users, their experience is as # though the messages were deleted, and we should send a # delete_message event to them instead. subscribers = get_active_subscriptions_for_stream_id( stream_id).select_related("user_profile") subs_to_new_stream = list(get_active_subscriptions_for_stream_id( new_stream.id).select_related("user_profile")) new_stream_sub_ids = [user.user_profile_id for user in subs_to_new_stream] # Get users who aren't subscribed to the new_stream. subs_losing_usermessages = [ sub for sub in subscribers if sub.user_profile_id not in new_stream_sub_ids ] # Users who can longer access the message without some action # from administrators. # # TODO: Extend this list to also contain users losing access # due to the messages moving to a private stream they are not # subscribed to. subs_losing_access = [ sub for sub in subs_losing_usermessages if sub.user_profile.is_guest ] ums = ums.exclude(user_profile_id__in=[ sub.user_profile_id for sub in subs_losing_usermessages]) if topic_name is not None: topic_name = truncate_topic(topic_name) message.set_topic_name(topic_name) # These fields have legacy field names. event[ORIG_TOPIC] = orig_topic_name event[TOPIC_NAME] = topic_name event[TOPIC_LINKS] = topic_links(message.sender.realm_id, topic_name) edit_history_event[LEGACY_PREV_TOPIC] = orig_topic_name delete_event_notify_user_ids: List[int] = [] if propagate_mode in ["change_later", "change_all"]: assert topic_name is not None or new_stream is not None messages_list = update_messages_for_topic_edit( message=message, propagate_mode=propagate_mode, orig_topic_name=orig_topic_name, topic_name=topic_name, new_stream=new_stream, ) changed_messages += messages_list if new_stream is not None: assert stream_being_edited is not None message_ids = [msg.id for msg in changed_messages] # Delete UserMessage objects for users who will no # longer have access to these messages. Note: This could be # very expensive, since it's N guest users x M messages. UserMessage.objects.filter( user_profile_id__in=[sub.user_profile_id for sub in subs_losing_usermessages], message_id__in=message_ids, ).delete() delete_event: DeleteMessagesEvent = { 'type': 'delete_message', 'message_ids': message_ids, 'message_type': 'stream', 'stream_id': stream_being_edited.id, 'topic': orig_topic_name, } delete_event_notify_user_ids = [sub.user_profile_id for sub in subs_losing_access] send_event(user_profile.realm, delete_event, delete_event_notify_user_ids) if message.edit_history is not None: edit_history = ujson.loads(message.edit_history) edit_history.insert(0, edit_history_event) else: edit_history = [edit_history_event] message.edit_history = ujson.dumps(edit_history) # This does message.save(update_fields=[...]) save_message_for_edit_use_case(message=message) realm_id: Optional[int] = None if stream_being_edited is not None: realm_id = stream_being_edited.realm_id event['message_ids'] = update_to_dict_cache(changed_messages, realm_id) def user_info(um: UserMessage) -> Dict[str, Any]: return { 'id': um.user_profile_id, 'flags': um.flags_list(), } # The following blocks arranges that users who are subscribed to a # stream and can see history from before they subscribed get # live-update when old messages are edited (e.g. if the user does # a topic edit themself). # # We still don't send an update event to users who are not # subscribed to this stream and don't have a UserMessage row. This # means if a non-subscriber is viewing the narrow, they won't get # a real-time updates. This is a balance between sending # message-edit notifications for every public stream to every user # in the organization (too expansive, and also not what we do for # newly sent messages anyway) and having magical live-updates # where possible. users_to_be_notified = list(map(user_info, ums)) if stream_being_edited is not None: if stream_being_edited.is_history_public_to_subscribers: subscribers = get_active_subscriptions_for_stream_id(stream_id) # We exclude long-term idle users, since they by # definition have no active clients. subscribers = subscribers.exclude(user_profile__long_term_idle=True) # Remove duplicates by excluding the id of users already # in users_to_be_notified list. This is the case where a # user both has a UserMessage row and is a current # Subscriber subscribers = subscribers.exclude(user_profile_id__in=[um.user_profile_id for um in ums]) if new_stream is not None: assert delete_event_notify_user_ids is not None subscribers = subscribers.exclude(user_profile_id__in=delete_event_notify_user_ids) # All users that are subscribed to the stream must be # notified when a message is edited subscriber_ids = [user.user_profile_id for user in subscribers] if new_stream is not None: # TODO: Guest users don't see the new moved topic # unless breadcrumb message for new stream is # enabled. Excluding these users from receiving this # event helps us avoid a error trackeback for our # clients. We should figure out a way to inform the # guest users of this new topic if sending a 'message' # event for these messages is not an option. # # Don't send this event to guest subs who are not # subscribers of the old stream but are subscribed to # the new stream; clients will be confused. old_stream_unsubbed_guests = [ sub for sub in subs_to_new_stream if sub.user_profile.is_guest and sub.user_profile_id not in subscriber_ids ] subscribers = subscribers.exclude(user_profile_id__in=[ sub.user_profile_id for sub in old_stream_unsubbed_guests]) subscriber_ids = [user.user_profile_id for user in subscribers] users_to_be_notified += list(map(subscriber_info, subscriber_ids)) send_event(user_profile.realm, event, users_to_be_notified) if (len(changed_messages) > 0 and new_stream is not None and stream_being_edited is not None): # Notify users that the topic was moved. notify_topic_moved_streams(user_profile, stream_being_edited, orig_topic_name, new_stream, topic_name, send_notification_to_old_thread, send_notification_to_new_thread) return len(changed_messages) def do_delete_messages(realm: Realm, messages: Iterable[Message]) -> None: # messages in delete_message event belong to the same topic # or is a single private message, as any other behaviour is not possible with # the current callers to this method. messages = list(messages) message_ids = [message.id for message in messages] if not message_ids: return event: DeleteMessagesEvent = { 'type': 'delete_message', 'message_ids': message_ids, } sample_message = messages[0] message_type = "stream" users_to_notify = [] if not sample_message.is_stream_message(): assert len(messages) == 1 message_type = "private" ums = UserMessage.objects.filter(message_id__in=message_ids) users_to_notify = [um.user_profile_id for um in ums] # TODO: We should plan to remove `sender_id` here. event['recipient_id'] = sample_message.recipient_id event['sender_id'] = sample_message.sender_id archiving_chunk_size = retention.MESSAGE_BATCH_SIZE if message_type == "stream": stream_id = sample_message.recipient.type_id event['stream_id'] = stream_id event['topic'] = sample_message.topic_name() subscribers = get_active_subscriptions_for_stream_id(stream_id) # We exclude long-term idle users, since they by definition have no active clients. subscribers = subscribers.exclude(user_profile__long_term_idle=True) subscriber_ids = [user.user_profile_id for user in subscribers] users_to_notify = list(map(subscriber_info, subscriber_ids)) archiving_chunk_size = retention.STREAM_MESSAGE_BATCH_SIZE move_messages_to_archive(message_ids, realm=realm, chunk_size=archiving_chunk_size) event['message_type'] = message_type send_event(realm, event, users_to_notify) def do_delete_messages_by_sender(user: UserProfile) -> None: message_ids = list(Message.objects.filter(sender=user).values_list('id', flat=True).order_by('id')) if message_ids: move_messages_to_archive(message_ids, chunk_size=retention.STREAM_MESSAGE_BATCH_SIZE) def get_streams_traffic(stream_ids: Set[int]) -> Dict[int, int]: stat = COUNT_STATS['messages_in_stream:is_bot:day'] traffic_from = timezone_now() - datetime.timedelta(days=28) query = StreamCount.objects.filter(property=stat.property, end_time__gt=traffic_from) query = query.filter(stream_id__in=stream_ids) traffic_list = query.values('stream_id').annotate(value=Sum('value')) traffic_dict = {} for traffic in traffic_list: traffic_dict[traffic["stream_id"]] = traffic["value"] return traffic_dict def round_to_2_significant_digits(number: int) -> int: return int(round(number, 2 - len(str(number)))) STREAM_TRAFFIC_CALCULATION_MIN_AGE_DAYS = 7 def get_average_weekly_stream_traffic(stream_id: int, stream_date_created: datetime.datetime, recent_traffic: Dict[int, int]) -> Optional[int]: try: stream_traffic = recent_traffic[stream_id] except KeyError: stream_traffic = 0 stream_age = (timezone_now() - stream_date_created).days if stream_age >= 28: average_weekly_traffic = int(stream_traffic // 4) elif stream_age >= STREAM_TRAFFIC_CALCULATION_MIN_AGE_DAYS: average_weekly_traffic = int(stream_traffic * 7 // stream_age) else: return None if average_weekly_traffic == 0 and stream_traffic > 0: average_weekly_traffic = 1 return round_to_2_significant_digits(average_weekly_traffic) SubHelperT = Tuple[List[Dict[str, Any]], List[Dict[str, Any]], List[Dict[str, Any]]] def get_web_public_subs(realm: Realm) -> SubHelperT: color_idx = 0 def get_next_color() -> str: nonlocal color_idx color = STREAM_ASSIGNMENT_COLORS[color_idx] color_idx = (color_idx + 1) % len(STREAM_ASSIGNMENT_COLORS) return color subscribed = [] for stream in Stream.objects.filter(realm=realm, is_web_public=True, deactivated=False): stream_dict = stream.to_dict() # Add versions of the Subscription fields based on a simulated # new user subscription set. stream_dict['is_muted'] = False stream_dict['color'] = get_next_color() stream_dict['desktop_notifications'] = True stream_dict['audible_notifications'] = True stream_dict['push_notifications'] = True stream_dict['email_notifications'] = True stream_dict['pin_to_top'] = False stream_weekly_traffic = get_average_weekly_stream_traffic(stream.id, stream.date_created, {}) stream_dict['stream_weekly_traffic'] = stream_weekly_traffic stream_dict['email_address'] = '' subscribed.append(stream_dict) return (subscribed, [], []) # In general, it's better to avoid using .values() because it makes # the code pretty ugly, but in this case, it has significant # performance impact for loading / for users with large numbers of # subscriptions, so it's worth optimizing. def gather_subscriptions_helper(user_profile: UserProfile, include_subscribers: bool=True) -> SubHelperT: sub_dicts = get_stream_subscriptions_for_user(user_profile).values( *Subscription.API_FIELDS, "recipient_id").order_by("recipient_id") sub_dicts = list(sub_dicts) sub_recipient_ids = [ sub['recipient_id'] for sub in sub_dicts ] stream_recipient = StreamRecipientMap() stream_recipient.populate_for_recipient_ids(sub_recipient_ids) stream_ids: Set[int] = set() for sub in sub_dicts: sub['stream_id'] = stream_recipient.stream_id_for(sub['recipient_id']) stream_ids.add(sub['stream_id']) recent_traffic = get_streams_traffic(stream_ids=stream_ids) all_streams = get_active_streams(user_profile.realm).select_related( "realm").values( *Stream.API_FIELDS, # date_created is used as an input for the stream_weekly_traffic computed field. "date_created", # The realm_id and recipient_id are generally not needed in the API. "realm_id", "recipient_id", # email_token isn't public to some users with access to # the stream, so doesn't belong in API_FIELDS. "email_token") stream_dicts = [stream for stream in all_streams if stream['id'] in stream_ids] stream_hash = {} for stream in stream_dicts: stream_hash[stream["id"]] = stream all_streams_id = [stream["id"] for stream in all_streams] subscribed = [] unsubscribed = [] never_subscribed = [] # Deactivated streams aren't in stream_hash. streams = [stream_hash[sub["stream_id"]] for sub in sub_dicts if sub["stream_id"] in stream_hash] streams_subscribed_map = {sub["stream_id"]: sub["active"] for sub in sub_dicts} # Add never subscribed streams to streams_subscribed_map streams_subscribed_map.update({stream['id']: False for stream in all_streams if stream not in streams}) if include_subscribers: subscriber_map: Mapping[int, Optional[List[int]]] = bulk_get_subscriber_user_ids( all_streams, user_profile, streams_subscribed_map, stream_recipient, ) else: # If we're not including subscribers, always return None, # which the below code needs to check for anyway. subscriber_map = defaultdict(lambda: None) sub_unsub_stream_ids = set() for sub in sub_dicts: sub_unsub_stream_ids.add(sub["stream_id"]) stream = stream_hash.get(sub["stream_id"]) if not stream: # This stream has been deactivated, don't include it. continue # We first construct a dictionary based on the standard Stream # and Subscription models' API_FIELDS. stream_dict = {} for field_name in Stream.API_FIELDS: if field_name == "id": stream_dict['stream_id'] = stream["id"] continue stream_dict[field_name] = stream[field_name] # Copy Subscription.API_FIELDS except for "active", which is # used to determine where to the put the field. for field_name in Subscription.API_FIELDS: stream_dict[field_name] = sub[field_name] # Backwards-compatibility for clients that haven't been # updated for the in_home_view => is_muted API migration. stream_dict['in_home_view'] = not stream_dict['is_muted'] # Backwards-compatibility for clients that haven't been # updated for the is_announcement_only -> stream_post_policy # migration. stream_dict['is_announcement_only'] = \ stream['stream_post_policy'] == Stream.STREAM_POST_POLICY_ADMINS # Add a few computed fields not directly from the data models. stream_dict['stream_weekly_traffic'] = get_average_weekly_stream_traffic( stream["id"], stream["date_created"], recent_traffic) stream_dict['email_address'] = encode_email_address_helper( stream["name"], stream["email_token"], show_sender=True) # Construct and add subscribers data subscribers: Optional[List[int]] = subscriber_map[stream["id"]] # Important: don't show the subscribers if the stream is invite only # and this user isn't on it anymore (or a realm administrator). if stream["invite_only"] and not (sub["active"] or user_profile.is_realm_admin): subscribers = None # Guest users lose access to subscribers when they are unsubscribed. if not sub["active"] and user_profile.is_guest: subscribers = None if subscribers is not None: stream_dict['subscribers'] = subscribers # is_active is represented in this structure by which list we include it in. is_active = stream_dict.pop("active") if is_active: subscribed.append(stream_dict) else: unsubscribed.append(stream_dict) all_streams_id_set = set(all_streams_id) if user_profile.can_access_public_streams(): never_subscribed_stream_ids = all_streams_id_set - sub_unsub_stream_ids else: never_subscribed_stream_ids = set() never_subscribed_streams = [ns_stream_dict for ns_stream_dict in all_streams if ns_stream_dict['id'] in never_subscribed_stream_ids] for stream in never_subscribed_streams: is_public = (not stream['invite_only']) if is_public or user_profile.is_realm_admin: stream_dict = {} for field_name in Stream.API_FIELDS: if field_name == "id": stream_dict['stream_id'] = stream["id"] continue stream_dict[field_name] = stream[field_name] stream_dict['stream_weekly_traffic'] = get_average_weekly_stream_traffic( stream["id"], stream["date_created"], recent_traffic) # Backwards-compatibility addition of removed field. stream_dict['is_announcement_only'] = \ stream['stream_post_policy'] == Stream.STREAM_POST_POLICY_ADMINS if is_public or user_profile.is_realm_admin: subscribers = subscriber_map[stream["id"]] if subscribers is not None: stream_dict['subscribers'] = subscribers never_subscribed.append(stream_dict) return (sorted(subscribed, key=lambda x: x['name']), sorted(unsubscribed, key=lambda x: x['name']), sorted(never_subscribed, key=lambda x: x['name'])) def gather_subscriptions( user_profile: UserProfile, include_subscribers: bool=False, ) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]: subscribed, unsubscribed, _ = gather_subscriptions_helper( user_profile, include_subscribers=include_subscribers) if include_subscribers: user_ids = set() for subs in [subscribed, unsubscribed]: for sub in subs: if 'subscribers' in sub: for subscriber in sub['subscribers']: user_ids.add(subscriber) email_dict = get_emails_from_user_ids(list(user_ids)) for subs in [subscribed, unsubscribed]: for sub in subs: if 'subscribers' in sub: sub['subscribers'] = sorted([ email_dict[user_id] for user_id in sub['subscribers'] ]) return (subscribed, unsubscribed) def get_active_presence_idle_user_ids(realm: Realm, sender_id: int, message_type: str, active_user_ids: Set[int], user_flags: Dict[int, List[str]]) -> List[int]: ''' Given a list of active_user_ids, we build up a subset of those users who fit these criteria: * They are likely to need notifications (either due to mentions, alert words, or being PM'ed). * They are no longer "present" according to the UserPresence table. ''' if realm.presence_disabled: return [] is_pm = message_type == 'private' user_ids = set() for user_id in active_user_ids: flags: Iterable[str] = user_flags.get(user_id, []) mentioned = 'mentioned' in flags or 'wildcard_mentioned' in flags private_message = is_pm and user_id != sender_id alerted = 'has_alert_word' in flags if mentioned or private_message or alerted: user_ids.add(user_id) return filter_presence_idle_user_ids(user_ids) def filter_presence_idle_user_ids(user_ids: Set[int]) -> List[int]: # Given a set of user IDs (the recipients of a message), accesses # the UserPresence table to determine which of these users are # currently idle and should potentially get email notifications # (and push notifications with with # user_profile.enable_online_push_notifications=False). # # We exclude any presence data from ZulipMobile for the purpose of # triggering these notifications; the mobile app can more # effectively do its own client-side filtering of notification # sounds/etc. for the case that the user is actively doing a PM # conversation in the app. if not user_ids: return [] # Matches presence.js constant OFFLINE_THRESHOLD_SECS = 140 recent = timezone_now() - datetime.timedelta(seconds=OFFLINE_THRESHOLD_SECS) rows = UserPresence.objects.filter( user_profile_id__in=user_ids, status=UserPresence.ACTIVE, timestamp__gte=recent, ).exclude(client__name="ZulipMobile").distinct('user_profile_id').values('user_profile_id') active_user_ids = {row['user_profile_id'] for row in rows} idle_user_ids = user_ids - active_user_ids return sorted(list(idle_user_ids)) def do_send_confirmation_email(invitee: PreregistrationUser, referrer: UserProfile) -> str: """ Send the confirmation/welcome e-mail to an invited user. """ activation_url = create_confirmation_link(invitee, Confirmation.INVITATION) context = {'referrer_full_name': referrer.full_name, 'referrer_email': referrer.delivery_email, 'activate_url': activation_url, 'referrer_realm_name': referrer.realm.name} from_name = f"{referrer.full_name} (via Zulip)" send_email('zerver/emails/invitation', to_emails=[invitee.email], from_name=from_name, from_address=FromAddress.tokenized_no_reply_address(), language=referrer.realm.default_language, context=context, realm=referrer.realm) return activation_url def email_not_system_bot(email: str) -> None: if is_cross_realm_bot_email(email): msg = email_reserved_for_system_bots_error(email) code = msg raise ValidationError( msg, code=code, params=dict(deactivated=False), ) class InvitationError(JsonableError): code = ErrorCode.INVITATION_FAILED data_fields = ['errors', 'sent_invitations'] def __init__(self, msg: str, errors: List[Tuple[str, str, bool]], sent_invitations: bool) -> None: self._msg: str = msg self.errors: List[Tuple[str, str, bool]] = errors self.sent_invitations: bool = sent_invitations def estimate_recent_invites(realms: Iterable[Realm], *, days: int) -> int: '''An upper bound on the number of invites sent in the last `days` days''' recent_invites = RealmCount.objects.filter( realm__in=realms, property='invites_sent::day', end_time__gte=timezone_now() - datetime.timedelta(days=days), ).aggregate(Sum('value'))['value__sum'] if recent_invites is None: return 0 return recent_invites def check_invite_limit(realm: Realm, num_invitees: int) -> None: '''Discourage using invitation emails as a vector for carrying spam.''' msg = _("You do not have enough remaining invites. " "Please contact {email} to have your limit raised. " "No invitations were sent.").format(email=settings.ZULIP_ADMINISTRATOR) if not settings.OPEN_REALM_CREATION: return recent_invites = estimate_recent_invites([realm], days=1) if num_invitees + recent_invites > realm.max_invites: raise InvitationError(msg, [], sent_invitations=False) default_max = settings.INVITES_DEFAULT_REALM_DAILY_MAX newrealm_age = datetime.timedelta(days=settings.INVITES_NEW_REALM_DAYS) if realm.date_created <= timezone_now() - newrealm_age: # If this isn't a "newly-created" realm, we're done. The # remaining code applies an aggregate limit across all # "new" realms, to address sudden bursts of spam realms. return if realm.max_invites > default_max: # If a user is on a realm where we've bumped up # max_invites, then we exempt them from invite limits. return new_realms = Realm.objects.filter( date_created__gte=timezone_now() - newrealm_age, _max_invites__lte=default_max, ).all() for days, count in settings.INVITES_NEW_REALM_LIMIT_DAYS: recent_invites = estimate_recent_invites(new_realms, days=days) if num_invitees + recent_invites > count: raise InvitationError(msg, [], sent_invitations=False) def do_invite_users(user_profile: UserProfile, invitee_emails: SizedTextIterable, streams: Iterable[Stream], invite_as: int=PreregistrationUser.INVITE_AS['MEMBER']) -> None: check_invite_limit(user_profile.realm, len(invitee_emails)) realm = user_profile.realm if not realm.invite_required: # Inhibit joining an open realm to send spam invitations. min_age = datetime.timedelta(days=settings.INVITES_MIN_USER_AGE_DAYS) if (user_profile.date_joined > timezone_now() - min_age and not user_profile.is_realm_admin): raise InvitationError( _("Your account is too new to send invites for this organization. " "Ask an organization admin, or a more experienced user."), [], sent_invitations=False) good_emails: Set[str] = set() errors: List[Tuple[str, str, bool]] = [] validate_email_allowed_in_realm = get_realm_email_validator(user_profile.realm) for email in invitee_emails: if email == '': continue email_error = validate_email_is_valid( email, validate_email_allowed_in_realm, ) if email_error: errors.append((email, email_error, False)) else: good_emails.add(email) ''' good_emails are emails that look ok so far, but we still need to make sure they're not gonna conflict with existing users ''' error_dict = get_existing_user_errors(user_profile.realm, good_emails) skipped: List[Tuple[str, str, bool]] = [] for email in error_dict: msg, deactivated = error_dict[email] skipped.append((email, msg, deactivated)) good_emails.remove(email) validated_emails = list(good_emails) if errors: raise InvitationError( _("Some emails did not validate, so we didn't send any invitations."), errors + skipped, sent_invitations=False) if skipped and len(skipped) == len(invitee_emails): # All e-mails were skipped, so we didn't actually invite anyone. raise InvitationError(_("We weren't able to invite anyone."), skipped, sent_invitations=False) # We do this here rather than in the invite queue processor since this # is used for rate limiting invitations, rather than keeping track of # when exactly invitations were sent do_increment_logging_stat(user_profile.realm, COUNT_STATS['invites_sent::day'], None, timezone_now(), increment=len(validated_emails)) # Now that we are past all the possible errors, we actually create # the PreregistrationUser objects and trigger the email invitations. for email in validated_emails: # The logged in user is the referrer. prereg_user = PreregistrationUser(email=email, referred_by=user_profile, invited_as=invite_as, realm=user_profile.realm) prereg_user.save() stream_ids = [stream.id for stream in streams] prereg_user.streams.set(stream_ids) event = {"prereg_id": prereg_user.id, "referrer_id": user_profile.id} queue_json_publish("invites", event) if skipped: raise InvitationError(_("Some of those addresses are already using Zulip, " "so we didn't send them an invitation. We did send " "invitations to everyone else!"), skipped, sent_invitations=True) notify_invites_changed(user_profile) def do_get_user_invites(user_profile: UserProfile) -> List[Dict[str, Any]]: if user_profile.is_realm_admin: prereg_users = filter_to_valid_prereg_users( PreregistrationUser.objects.filter(referred_by__realm=user_profile.realm) ) else: prereg_users = filter_to_valid_prereg_users( PreregistrationUser.objects.filter(referred_by=user_profile) ) invites = [] for invitee in prereg_users: invites.append(dict(email=invitee.email, invited_by_user_id=invitee.referred_by.id, invited=datetime_to_timestamp(invitee.invited_at), id=invitee.id, invited_as=invitee.invited_as, is_multiuse=False)) if not user_profile.is_realm_admin: # We do not return multiuse invites to non-admin users. return invites lowest_datetime = timezone_now() - datetime.timedelta(days=settings.INVITATION_LINK_VALIDITY_DAYS) multiuse_confirmation_objs = Confirmation.objects.filter(realm=user_profile.realm, type=Confirmation.MULTIUSE_INVITE, date_sent__gte=lowest_datetime) for confirmation_obj in multiuse_confirmation_objs: invite = confirmation_obj.content_object invites.append(dict(invited_by_user_id=invite.referred_by.id, invited=datetime_to_timestamp(confirmation_obj.date_sent), id=invite.id, link_url=confirmation_url(confirmation_obj.confirmation_key, user_profile.realm, Confirmation.MULTIUSE_INVITE), invited_as=invite.invited_as, is_multiuse=True)) return invites def do_create_multiuse_invite_link(referred_by: UserProfile, invited_as: int, streams: Sequence[Stream] = []) -> str: realm = referred_by.realm invite = MultiuseInvite.objects.create(realm=realm, referred_by=referred_by) if streams: invite.streams.set(streams) invite.invited_as = invited_as invite.save() notify_invites_changed(referred_by) return create_confirmation_link(invite, Confirmation.MULTIUSE_INVITE) def do_revoke_user_invite(prereg_user: PreregistrationUser) -> None: email = prereg_user.email # Delete both the confirmation objects and the prereg_user object. # TODO: Probably we actually want to set the confirmation objects # to a "revoked" status so that we can give the invited user a better # error message. content_type = ContentType.objects.get_for_model(PreregistrationUser) Confirmation.objects.filter(content_type=content_type, object_id=prereg_user.id).delete() prereg_user.delete() clear_scheduled_invitation_emails(email) notify_invites_changed(prereg_user) def do_revoke_multi_use_invite(multiuse_invite: MultiuseInvite) -> None: content_type = ContentType.objects.get_for_model(MultiuseInvite) Confirmation.objects.filter(content_type=content_type, object_id=multiuse_invite.id).delete() multiuse_invite.delete() notify_invites_changed(multiuse_invite.referred_by) def do_resend_user_invite_email(prereg_user: PreregistrationUser) -> int: # These are two structurally for the caller's code path. assert prereg_user.referred_by is not None assert prereg_user.realm is not None check_invite_limit(prereg_user.referred_by.realm, 1) prereg_user.invited_at = timezone_now() prereg_user.save() do_increment_logging_stat(prereg_user.realm, COUNT_STATS['invites_sent::day'], None, prereg_user.invited_at) clear_scheduled_invitation_emails(prereg_user.email) # We don't store the custom email body, so just set it to None event = {"prereg_id": prereg_user.id, "referrer_id": prereg_user.referred_by.id, "email_body": None} queue_json_publish("invites", event) return datetime_to_timestamp(prereg_user.invited_at) def notify_realm_emoji(realm: Realm) -> None: event = dict(type="realm_emoji", op="update", realm_emoji=realm.get_emoji()) send_event(realm, event, active_user_ids(realm.id)) def check_add_realm_emoji(realm: Realm, name: str, author: UserProfile, image_file: File) -> Optional[RealmEmoji]: realm_emoji = RealmEmoji(realm=realm, name=name, author=author) realm_emoji.full_clean() realm_emoji.save() emoji_file_name = get_emoji_file_name(image_file.name, realm_emoji.id) # The only user-controlled portion of 'emoji_file_name' is an extension, # which can not contain '..' or '/' or '\', making it difficult to exploit emoji_file_name = mark_sanitized(emoji_file_name) emoji_uploaded_successfully = False try: upload_emoji_image(image_file, emoji_file_name, author) emoji_uploaded_successfully = True finally: if not emoji_uploaded_successfully: realm_emoji.delete() return None else: realm_emoji.file_name = emoji_file_name realm_emoji.save(update_fields=['file_name']) notify_realm_emoji(realm_emoji.realm) return realm_emoji def do_remove_realm_emoji(realm: Realm, name: str) -> None: emoji = RealmEmoji.objects.get(realm=realm, name=name, deactivated=False) emoji.deactivated = True emoji.save(update_fields=['deactivated']) notify_realm_emoji(realm) def notify_alert_words(user_profile: UserProfile, words: Iterable[str]) -> None: event = dict(type="alert_words", alert_words=words) send_event(user_profile.realm, event, [user_profile.id]) def do_add_alert_words(user_profile: UserProfile, alert_words: Iterable[str]) -> None: words = add_user_alert_words(user_profile, alert_words) notify_alert_words(user_profile, words) def do_remove_alert_words(user_profile: UserProfile, alert_words: Iterable[str]) -> None: words = remove_user_alert_words(user_profile, alert_words) notify_alert_words(user_profile, words) def do_mute_topic(user_profile: UserProfile, stream: Stream, recipient: Recipient, topic: str, date_muted: Optional[datetime.datetime]=None) -> None: if date_muted is None: date_muted = timezone_now() add_topic_mute(user_profile, stream.id, recipient.id, topic, date_muted) event = dict(type="muted_topics", muted_topics=get_topic_mutes(user_profile)) send_event(user_profile.realm, event, [user_profile.id]) def do_unmute_topic(user_profile: UserProfile, stream: Stream, topic: str) -> None: remove_topic_mute(user_profile, stream.id, topic) event = dict(type="muted_topics", muted_topics=get_topic_mutes(user_profile)) send_event(user_profile.realm, event, [user_profile.id]) def do_mark_hotspot_as_read(user: UserProfile, hotspot: str) -> None: UserHotspot.objects.get_or_create(user=user, hotspot=hotspot) event = dict(type="hotspots", hotspots=get_next_hotspots(user)) send_event(user.realm, event, [user.id]) def notify_realm_filters(realm: Realm) -> None: realm_filters = realm_filters_for_realm(realm.id) event = dict(type="realm_filters", realm_filters=realm_filters) send_event(realm, event, active_user_ids(realm.id)) # NOTE: Regexes must be simple enough that they can be easily translated to JavaScript # RegExp syntax. In addition to JS-compatible syntax, the following features are available: # * Named groups will be converted to numbered groups automatically # * Inline-regex flags will be stripped, and where possible translated to RegExp-wide flags def do_add_realm_filter(realm: Realm, pattern: str, url_format_string: str) -> int: pattern = pattern.strip() url_format_string = url_format_string.strip() realm_filter = RealmFilter( realm=realm, pattern=pattern, url_format_string=url_format_string) realm_filter.full_clean() realm_filter.save() notify_realm_filters(realm) return realm_filter.id def do_remove_realm_filter(realm: Realm, pattern: Optional[str]=None, id: Optional[int]=None) -> None: if pattern is not None: RealmFilter.objects.get(realm=realm, pattern=pattern).delete() else: RealmFilter.objects.get(realm=realm, pk=id).delete() notify_realm_filters(realm) def get_emails_from_user_ids(user_ids: Sequence[int]) -> Dict[int, str]: # We may eventually use memcached to speed this up, but the DB is fast. return UserProfile.emails_from_ids(user_ids) def do_add_realm_domain(realm: Realm, domain: str, allow_subdomains: bool) -> (RealmDomain): realm_domain = RealmDomain.objects.create(realm=realm, domain=domain, allow_subdomains=allow_subdomains) event = dict(type="realm_domains", op="add", realm_domain=dict(domain=realm_domain.domain, allow_subdomains=realm_domain.allow_subdomains)) send_event(realm, event, active_user_ids(realm.id)) return realm_domain def do_change_realm_domain(realm_domain: RealmDomain, allow_subdomains: bool) -> None: realm_domain.allow_subdomains = allow_subdomains realm_domain.save(update_fields=['allow_subdomains']) event = dict(type="realm_domains", op="change", realm_domain=dict(domain=realm_domain.domain, allow_subdomains=realm_domain.allow_subdomains)) send_event(realm_domain.realm, event, active_user_ids(realm_domain.realm_id)) def do_remove_realm_domain(realm_domain: RealmDomain, acting_user: Optional[UserProfile]=None) -> None: realm = realm_domain.realm domain = realm_domain.domain realm_domain.delete() if RealmDomain.objects.filter(realm=realm).count() == 0 and realm.emails_restricted_to_domains: # If this was the last realm domain, we mark the realm as no # longer restricted to domain, because the feature doesn't do # anything if there are no domains, and this is probably less # confusing than the alternative. do_set_realm_property(realm, 'emails_restricted_to_domains', False, acting_user=acting_user) event = dict(type="realm_domains", op="remove", domain=domain) send_event(realm, event, active_user_ids(realm.id)) def get_occupied_streams(realm: Realm) -> QuerySet: # TODO: Make a generic stub for QuerySet """ Get streams with subscribers """ exists_expression = Exists( Subscription.objects.filter(active=True, user_profile__is_active=True, user_profile__realm=realm, recipient_id=OuterRef('recipient_id')), ) occupied_streams = Stream.objects.filter(realm=realm, deactivated=False) \ .annotate(occupied=exists_expression).filter(occupied=True) return occupied_streams def get_web_public_streams(realm: Realm) -> List[Dict[str, Any]]: query = Stream.objects.filter(realm=realm, deactivated=False, is_web_public=True) streams = Stream.get_client_data(query) return streams def do_get_streams( user_profile: UserProfile, include_public: bool=True, include_subscribed: bool=True, include_all_active: bool=False, include_default: bool=False, include_owner_subscribed: bool=False, ) -> List[Dict[str, Any]]: if include_all_active and not user_profile.is_api_super_user: raise JsonableError(_("User not authorized for this query")) include_public = include_public and user_profile.can_access_public_streams() # Start out with all streams in the realm with subscribers query = get_occupied_streams(user_profile.realm) if include_all_active: streams = Stream.get_client_data(query) else: # We construct a query as the or (|) of the various sources # this user requested streams from. query_filter: Optional[Q] = None def add_filter_option(option: Q) -> None: nonlocal query_filter if query_filter is None: query_filter = option else: query_filter |= option if include_subscribed: subscribed_stream_ids = get_subscribed_stream_ids_for_user(user_profile) recipient_check = Q(id__in=set(subscribed_stream_ids)) add_filter_option(recipient_check) if include_public: invite_only_check = Q(invite_only=False) add_filter_option(invite_only_check) if include_owner_subscribed and user_profile.is_bot: bot_owner = user_profile.bot_owner assert bot_owner is not None owner_stream_ids = get_subscribed_stream_ids_for_user(bot_owner) owner_subscribed_check = Q(id__in=set(owner_stream_ids)) add_filter_option(owner_subscribed_check) if query_filter is not None: query = query.filter(query_filter) streams = Stream.get_client_data(query) else: # Don't bother going to the database with no valid sources streams = [] streams.sort(key=lambda elt: elt["name"]) if include_default: is_default = {} default_streams = get_default_streams_for_realm(user_profile.realm_id) for default_stream in default_streams: is_default[default_stream.id] = True for stream in streams: stream['is_default'] = is_default.get(stream["stream_id"], False) return streams def notify_attachment_update(user_profile: UserProfile, op: str, attachment_dict: Dict[str, Any]) -> None: event = { 'type': 'attachment', 'op': op, 'attachment': attachment_dict, "upload_space_used": user_profile.realm.currently_used_upload_space_bytes(), } send_event(user_profile.realm, event, [user_profile.id]) def do_claim_attachments(message: Message, potential_path_ids: List[str]) -> bool: claimed = False for path_id in potential_path_ids: user_profile = message.sender is_message_realm_public = False if message.is_stream_message(): is_message_realm_public = Stream.objects.get(id=message.recipient.type_id).is_public() if not validate_attachment_request(user_profile, path_id): # Technically, there are 2 cases here: # * The user put something in their message that has the form # of an upload, but doesn't correspond to a file that doesn't # exist. validate_attachment_request will return None. # * The user is trying to send a link to a file they don't have permission to # access themselves. validate_attachment_request will return False. # # Either case is unusual and suggests a UI bug that got # the user in this situation, so we log in these cases. logging.warning( "User %s tried to share upload %s in message %s, but lacks permission", user_profile.id, path_id, message.id, ) continue claimed = True attachment = claim_attachment(user_profile, path_id, message, is_message_realm_public) notify_attachment_update(user_profile, "update", attachment.to_dict()) return claimed def do_delete_old_unclaimed_attachments(weeks_ago: int) -> None: old_unclaimed_attachments = get_old_unclaimed_attachments(weeks_ago) for attachment in old_unclaimed_attachments: delete_message_image(attachment.path_id) attachment.delete() def check_attachment_reference_change(message: Message) -> bool: # For a unsaved message edit (message.* has been updated, but not # saved to the database), adjusts Attachment data to correspond to # the new content. prev_attachments = {a.path_id for a in message.attachment_set.all()} new_attachments = set(message.potential_attachment_path_ids) if new_attachments == prev_attachments: return bool(prev_attachments) to_remove = list(prev_attachments - new_attachments) if len(to_remove) > 0: attachments_to_update = Attachment.objects.filter(path_id__in=to_remove).select_for_update() message.attachment_set.remove(*attachments_to_update) to_add = list(new_attachments - prev_attachments) if len(to_add) > 0: do_claim_attachments(message, to_add) return message.attachment_set.exists() def notify_realm_custom_profile_fields(realm: Realm, operation: str) -> None: fields = custom_profile_fields_for_realm(realm.id) event = dict(type="custom_profile_fields", op=operation, fields=[f.as_dict() for f in fields]) send_event(realm, event, active_user_ids(realm.id)) def try_add_realm_default_custom_profile_field(realm: Realm, field_subtype: str) -> CustomProfileField: field_data = DEFAULT_EXTERNAL_ACCOUNTS[field_subtype] field = CustomProfileField(realm=realm, name=field_data['name'], field_type=CustomProfileField.EXTERNAL_ACCOUNT, hint=field_data['hint'], field_data=ujson.dumps(dict(subtype=field_subtype))) field.save() field.order = field.id field.save(update_fields=['order']) notify_realm_custom_profile_fields(realm, 'add') return field def try_add_realm_custom_profile_field(realm: Realm, name: str, field_type: int, hint: str='', field_data: Optional[ProfileFieldData]=None) -> CustomProfileField: field = CustomProfileField(realm=realm, name=name, field_type=field_type) field.hint = hint if (field.field_type == CustomProfileField.CHOICE or field.field_type == CustomProfileField.EXTERNAL_ACCOUNT): field.field_data = ujson.dumps(field_data or {}) field.save() field.order = field.id field.save(update_fields=['order']) notify_realm_custom_profile_fields(realm, 'add') return field def do_remove_realm_custom_profile_field(realm: Realm, field: CustomProfileField) -> None: """ Deleting a field will also delete the user profile data associated with it in CustomProfileFieldValue model. """ field.delete() notify_realm_custom_profile_fields(realm, 'delete') def do_remove_realm_custom_profile_fields(realm: Realm) -> None: CustomProfileField.objects.filter(realm=realm).delete() def try_update_realm_custom_profile_field(realm: Realm, field: CustomProfileField, name: str, hint: str='', field_data: Optional[ProfileFieldData]=None) -> None: field.name = name field.hint = hint if (field.field_type == CustomProfileField.CHOICE or field.field_type == CustomProfileField.EXTERNAL_ACCOUNT): field.field_data = ujson.dumps(field_data or {}) field.save() notify_realm_custom_profile_fields(realm, 'update') def try_reorder_realm_custom_profile_fields(realm: Realm, order: List[int]) -> None: order_mapping = {_[1]: _[0] for _ in enumerate(order)} fields = CustomProfileField.objects.filter(realm=realm) for field in fields: if field.id not in order_mapping: raise JsonableError(_("Invalid order mapping.")) for field in fields: field.order = order_mapping[field.id] field.save(update_fields=['order']) notify_realm_custom_profile_fields(realm, 'update') def notify_user_update_custom_profile_data(user_profile: UserProfile, field: Dict[str, Union[int, str, List[int], None]]) -> None: data = dict(id=field['id']) if field['type'] == CustomProfileField.USER: data["value"] = ujson.dumps(field['value']) else: data['value'] = field['value'] if field['rendered_value']: data['rendered_value'] = field['rendered_value'] payload = dict(user_id=user_profile.id, custom_profile_field=data) event = dict(type="realm_user", op="update", person=payload) send_event(user_profile.realm, event, active_user_ids(user_profile.realm.id)) def do_update_user_custom_profile_data_if_changed(user_profile: UserProfile, data: List[Dict[str, Union[int, str, List[int]]]], ) -> None: with transaction.atomic(): for field in data: field_value, created = CustomProfileFieldValue.objects.get_or_create( user_profile=user_profile, field_id=field['id']) if not created and field_value.value == str(field['value']): # If the field value isn't actually being changed to a different one, # and always_notify is disabled, we have nothing to do here for this field. # Note: field_value.value is a TextField() so we need to cast field['value'] # to a string for the comparison in this if. continue field_value.value = field['value'] if field_value.field.is_renderable(): field_value.rendered_value = render_stream_description(str(field['value'])) field_value.save(update_fields=['value', 'rendered_value']) else: field_value.save(update_fields=['value']) notify_user_update_custom_profile_data(user_profile, { "id": field_value.field_id, "value": field_value.value, "rendered_value": field_value.rendered_value, "type": field_value.field.field_type}) def check_remove_custom_profile_field_value(user_profile: UserProfile, field_id: int) -> None: try: field = CustomProfileField.objects.get(realm=user_profile.realm, id=field_id) field_value = CustomProfileFieldValue.objects.get(field=field, user_profile=user_profile) field_value.delete() notify_user_update_custom_profile_data(user_profile, {'id': field_id, 'value': None, 'rendered_value': None, 'type': field.field_type}) except CustomProfileField.DoesNotExist: raise JsonableError(_('Field id {id} not found.').format(id=field_id)) except CustomProfileFieldValue.DoesNotExist: pass def do_send_create_user_group_event(user_group: UserGroup, members: List[UserProfile]) -> None: event = dict(type="user_group", op="add", group=dict(name=user_group.name, members=[member.id for member in members], description=user_group.description, id=user_group.id, ), ) send_event(user_group.realm, event, active_user_ids(user_group.realm_id)) def check_add_user_group(realm: Realm, name: str, initial_members: List[UserProfile], description: str) -> None: try: user_group = create_user_group(name, initial_members, realm, description=description) do_send_create_user_group_event(user_group, initial_members) except django.db.utils.IntegrityError: raise JsonableError(_("User group '{}' already exists.").format(name)) def do_send_user_group_update_event(user_group: UserGroup, data: Dict[str, Any]) -> None: event = dict(type="user_group", op='update', group_id=user_group.id, data=data) send_event(user_group.realm, event, active_user_ids(user_group.realm_id)) def do_update_user_group_name(user_group: UserGroup, name: str) -> None: try: user_group.name = name user_group.save(update_fields=['name']) except django.db.utils.IntegrityError: raise JsonableError(_("User group '{}' already exists.").format(name)) do_send_user_group_update_event(user_group, dict(name=name)) def do_update_user_group_description(user_group: UserGroup, description: str) -> None: user_group.description = description user_group.save(update_fields=['description']) do_send_user_group_update_event(user_group, dict(description=description)) def do_update_outgoing_webhook_service(bot_profile: UserProfile, service_interface: int, service_payload_url: str) -> None: # TODO: First service is chosen because currently one bot can only have one service. # Update this once multiple services are supported. service = get_bot_services(bot_profile.id)[0] service.base_url = service_payload_url service.interface = service_interface service.save() send_event(bot_profile.realm, dict(type='realm_bot', op='update', bot=dict(user_id=bot_profile.id, services = [dict(base_url=service.base_url, interface=service.interface, token=service.token)], ), ), bot_owner_user_ids(bot_profile)) def do_update_bot_config_data(bot_profile: UserProfile, config_data: Dict[str, str]) -> None: for key, value in config_data.items(): set_bot_config(bot_profile, key, value) updated_config_data = get_bot_config(bot_profile) send_event(bot_profile.realm, dict(type='realm_bot', op='update', bot=dict(user_id=bot_profile.id, services = [dict(config_data=updated_config_data)], ), ), bot_owner_user_ids(bot_profile)) def get_service_dicts_for_bot(user_profile_id: int) -> List[Dict[str, Any]]: user_profile = get_user_profile_by_id(user_profile_id) services = get_bot_services(user_profile_id) service_dicts: List[Dict[str, Any]] = [] if user_profile.bot_type == UserProfile.OUTGOING_WEBHOOK_BOT: service_dicts = [{'base_url': service.base_url, 'interface': service.interface, 'token': service.token, } for service in services] elif user_profile.bot_type == UserProfile.EMBEDDED_BOT: try: service_dicts = [{'config_data': get_bot_config(user_profile), 'service_name': services[0].name, }] # A ConfigError just means that there are no config entries for user_profile. except ConfigError: pass return service_dicts def get_service_dicts_for_bots(bot_dicts: List[Dict[str, Any]], realm: Realm) -> Dict[int, List[Dict[str, Any]]]: bot_profile_ids = [bot_dict['id'] for bot_dict in bot_dicts] bot_services_by_uid: Dict[int, List[Service]] = defaultdict(list) for service in Service.objects.filter(user_profile_id__in=bot_profile_ids): bot_services_by_uid[service.user_profile_id].append(service) embedded_bot_ids = [bot_dict['id'] for bot_dict in bot_dicts if bot_dict['bot_type'] == UserProfile.EMBEDDED_BOT] embedded_bot_configs = get_bot_configs(embedded_bot_ids) service_dicts_by_uid: Dict[int, List[Dict[str, Any]]] = {} for bot_dict in bot_dicts: bot_profile_id = bot_dict["id"] bot_type = bot_dict["bot_type"] services = bot_services_by_uid[bot_profile_id] service_dicts: List[Dict[str, Any]] = [] if bot_type == UserProfile.OUTGOING_WEBHOOK_BOT: service_dicts = [{'base_url': service.base_url, 'interface': service.interface, 'token': service.token, } for service in services] elif bot_type == UserProfile.EMBEDDED_BOT: if bot_profile_id in embedded_bot_configs.keys(): bot_config = embedded_bot_configs[bot_profile_id] service_dicts = [{'config_data': bot_config, 'service_name': services[0].name, }] service_dicts_by_uid[bot_profile_id] = service_dicts return service_dicts_by_uid def get_owned_bot_dicts(user_profile: UserProfile, include_all_realm_bots_if_admin: bool=True) -> List[Dict[str, Any]]: if user_profile.is_realm_admin and include_all_realm_bots_if_admin: result = get_bot_dicts_in_realm(user_profile.realm) else: result = UserProfile.objects.filter(realm=user_profile.realm, is_bot=True, bot_owner=user_profile).values(*bot_dict_fields) services_by_ids = get_service_dicts_for_bots(result, user_profile.realm) return [{'email': botdict['email'], 'user_id': botdict['id'], 'full_name': botdict['full_name'], 'bot_type': botdict['bot_type'], 'is_active': botdict['is_active'], 'api_key': botdict['api_key'], 'default_sending_stream': botdict['default_sending_stream__name'], 'default_events_register_stream': botdict['default_events_register_stream__name'], 'default_all_public_streams': botdict['default_all_public_streams'], 'owner_id': botdict['bot_owner__id'], 'avatar_url': avatar_url_from_dict(botdict), 'services': services_by_ids[botdict['id']], } for botdict in result] def do_send_user_group_members_update_event(event_name: str, user_group: UserGroup, user_ids: List[int]) -> None: event = dict(type="user_group", op=event_name, group_id=user_group.id, user_ids=user_ids) send_event(user_group.realm, event, active_user_ids(user_group.realm_id)) def bulk_add_members_to_user_group(user_group: UserGroup, user_profiles: List[UserProfile]) -> None: memberships = [UserGroupMembership(user_group_id=user_group.id, user_profile=user_profile) for user_profile in user_profiles] UserGroupMembership.objects.bulk_create(memberships) user_ids = [up.id for up in user_profiles] do_send_user_group_members_update_event('add_members', user_group, user_ids) def remove_members_from_user_group(user_group: UserGroup, user_profiles: List[UserProfile]) -> None: UserGroupMembership.objects.filter( user_group_id=user_group.id, user_profile__in=user_profiles).delete() user_ids = [up.id for up in user_profiles] do_send_user_group_members_update_event('remove_members', user_group, user_ids) def do_send_delete_user_group_event(realm: Realm, user_group_id: int, realm_id: int) -> None: event = dict(type="user_group", op="remove", group_id=user_group_id) send_event(realm, event, active_user_ids(realm_id)) def check_delete_user_group(user_group_id: int, user_profile: UserProfile) -> None: user_group = access_user_group_by_id(user_group_id, user_profile) user_group.delete() do_send_delete_user_group_event(user_profile.realm, user_group_id, user_profile.realm.id) def do_send_realm_reactivation_email(realm: Realm) -> None: url = create_confirmation_link(realm, Confirmation.REALM_REACTIVATION) context = {'confirmation_url': url, 'realm_uri': realm.uri, 'realm_name': realm.name} language = realm.default_language send_email_to_admins( 'zerver/emails/realm_reactivation', realm, from_address=FromAddress.tokenized_no_reply_address(), from_name=FromAddress.security_email_from_name(language=language), language=language, context=context) def do_set_zoom_token(user: UserProfile, token: Optional[Dict[str, object]]) -> None: user.zoom_token = token user.save(update_fields=["zoom_token"]) send_event( user.realm, dict(type="has_zoom_token", value=token is not None), [user.id], ) def notify_realm_export(user_profile: UserProfile) -> None: # In the future, we may want to send this event to all realm admins. event = dict(type='realm_export', exports=get_realm_exports_serialized(user_profile)) send_event(user_profile.realm, event, [user_profile.id]) def do_delete_realm_export(user_profile: UserProfile, export: RealmAuditLog) -> None: # Give mypy a hint so it knows `ujson.loads` # isn't being passed an `Optional[str]`. export_extra_data = export.extra_data assert export_extra_data is not None export_data = ujson.loads(export_extra_data) export_path = export_data.get('export_path') if export_path: # Allow removal even if the export failed. delete_export_tarball(export_path) export_data.update({'deleted_timestamp': timezone_now().timestamp()}) export.extra_data = ujson.dumps(export_data) export.save(update_fields=['extra_data']) notify_realm_export(user_profile) def get_topic_messages(user_profile: UserProfile, stream: Stream, topic_name: str) -> List[Message]: query = UserMessage.objects.filter( user_profile=user_profile, message__recipient=stream.recipient, ).order_by("id") return [um.message for um in filter_by_topic_name_via_message(query, topic_name)]
def _internal_prep_message(realm: Realm, sender: UserProfile, addressee: Addressee, content: str) -> Optional[Dict[str, Any]]: """ Create a message object and checks it, but doesn't send it or save it to the database. The internal function that calls this can therefore batch send a bunch of created messages together as one database query. Call do_send_messages with a list of the return values of this method. """ # Remove any null bytes from the content if len(content) > MAX_MESSAGE_LENGTH: content = content[0:3900] + "\n\n[message was too long and has been truncated]" # If we have a stream name, and the stream doesn't exist, we # create it here (though this code path should probably be removed # eventually, moving that responsibility to the caller). If # addressee.stream_name() is None (i.e. we're sending to a stream # by ID), we skip this, as the stream object must already exist. if addressee.is_stream(): stream_name = addressee.stream_name() if stream_name is not None: ensure_stream(realm, stream_name, acting_user=sender) try: return check_message(sender, get_client("Internal"), addressee, content, realm=realm) except JsonableError as e: logging.exception("Error queueing internal message by %s: %s", sender.delivery_email, e.msg) return None
2,385
2,415
import datetime import itertools import logging import os import platform import time from collections import defaultdict from operator import itemgetter from typing import ( AbstractSet, Any, Callable, Dict, Iterable, List, Mapping, MutableMapping, Optional, Sequence, Set, Tuple, Union, ) import django.db.utils import ujson from django.conf import settings from django.contrib.contenttypes.models import ContentType from django.core.exceptions import ValidationError from django.core.files import File from django.db import IntegrityError, connection, transaction from django.db.models import Count, Exists, F, Max, OuterRef, Q, Sum from django.db.models.query import QuerySet from django.utils.html import escape from django.utils.timezone import now as timezone_now from django.utils.translation import override as override_language from django.utils.translation import ugettext as _ from psycopg2.extras import execute_values from psycopg2.sql import SQL from typing_extensions import TypedDict from analytics.lib.counts import COUNT_STATS, RealmCount, do_increment_logging_stat from analytics.models import StreamCount from confirmation import settings as confirmation_settings from confirmation.models import ( Confirmation, confirmation_url, create_confirmation_link, generate_key, ) from zerver.decorator import statsd_increment from zerver.lib import retention as retention from zerver.lib.addressee import Addressee from zerver.lib.alert_words import ( add_user_alert_words, get_alert_word_automaton, remove_user_alert_words, ) from zerver.lib.avatar import avatar_url, avatar_url_from_dict from zerver.lib.bot_config import ConfigError, get_bot_config, get_bot_configs, set_bot_config from zerver.lib.bulk_create import bulk_create_users from zerver.lib.cache import ( bot_dict_fields, cache_delete, cache_delete_many, cache_set, cache_set_many, cache_with_key, delete_user_profile_caches, display_recipient_cache_key, flush_user_profile, to_dict_cache_key_id, user_profile_by_api_key_cache_key, user_profile_by_email_cache_key, ) from zerver.lib.context_managers import lockfile from zerver.lib.create_user import create_user, get_display_email_address from zerver.lib.email_mirror_helpers import encode_email_address, encode_email_address_helper from zerver.lib.email_notifications import enqueue_welcome_emails from zerver.lib.email_validation import ( email_reserved_for_system_bots_error, get_existing_user_errors, get_realm_email_validator, validate_email_is_valid, ) from zerver.lib.emoji import get_emoji_file_name from zerver.lib.exceptions import ( ErrorCode, JsonableError, MarkdownRenderingException, StreamDoesNotExistError, StreamWithIDDoesNotExistError, ) from zerver.lib.export import get_realm_exports_serialized from zerver.lib.external_accounts import DEFAULT_EXTERNAL_ACCOUNTS from zerver.lib.hotspots import get_next_hotspots from zerver.lib.i18n import get_language_name from zerver.lib.markdown import MentionData, topic_links from zerver.lib.markdown import version as markdown_version from zerver.lib.message import ( MessageDict, access_message, render_markdown, truncate_body, truncate_topic, update_first_visible_message_id, ) from zerver.lib.pysa import mark_sanitized from zerver.lib.queue import queue_json_publish from zerver.lib.realm_icon import realm_icon_url from zerver.lib.realm_logo import get_realm_logo_data from zerver.lib.retention import move_messages_to_archive from zerver.lib.send_email import ( FromAddress, clear_scheduled_emails, clear_scheduled_invitation_emails, send_email, send_email_to_admins, ) from zerver.lib.server_initialization import create_internal_realm, server_initialized from zerver.lib.sessions import delete_user_sessions from zerver.lib.storage import static_path from zerver.lib.stream_recipient import StreamRecipientMap from zerver.lib.stream_subscription import ( get_active_subscriptions_for_stream_id, get_active_subscriptions_for_stream_ids, get_bulk_stream_subscriber_info, get_stream_subscriptions_for_user, get_stream_subscriptions_for_users, get_subscribed_stream_ids_for_user, num_subscribers_for_stream_id, ) from zerver.lib.stream_topic import StreamTopicTarget from zerver.lib.streams import ( access_stream_for_send_message, check_stream_name, create_stream_if_needed, get_default_value_for_history_public_to_subscribers, render_stream_description, send_stream_creation_event, subscribed_to_stream, ) from zerver.lib.timestamp import datetime_to_timestamp, timestamp_to_datetime from zerver.lib.topic import ( LEGACY_PREV_TOPIC, ORIG_TOPIC, TOPIC_LINKS, TOPIC_NAME, filter_by_exact_message_topic, filter_by_topic_name_via_message, save_message_for_edit_use_case, update_messages_for_topic_edit, ) from zerver.lib.topic_mutes import add_topic_mute, get_topic_mutes, remove_topic_mute from zerver.lib.types import ProfileFieldData from zerver.lib.upload import ( claim_attachment, delete_avatar_image, delete_export_tarball, delete_message_image, upload_emoji_image, ) from zerver.lib.user_groups import access_user_group_by_id, create_user_group from zerver.lib.user_status import update_user_status from zerver.lib.users import ( check_bot_name_available, check_full_name, format_user_row, get_api_key, user_profile_to_user_row, ) from zerver.lib.utils import generate_api_key, log_statsd_event from zerver.lib.validator import check_widget_content from zerver.lib.widget import do_widget_post_save_actions from zerver.models import ( MAX_MESSAGE_LENGTH, Attachment, Client, CustomProfileField, CustomProfileFieldValue, DefaultStream, DefaultStreamGroup, EmailChangeStatus, Message, MultiuseInvite, PreregistrationUser, Reaction, Realm, RealmAuditLog, RealmDomain, RealmEmoji, RealmFilter, Recipient, ScheduledEmail, ScheduledMessage, Service, Stream, SubMessage, Subscription, UserActivity, UserActivityInterval, UserGroup, UserGroupMembership, UserHotspot, UserMessage, UserPresence, UserProfile, UserStatus, active_non_guest_user_ids, active_user_ids, custom_profile_fields_for_realm, filter_to_valid_prereg_users, get_active_streams, get_bot_dicts_in_realm, get_bot_services, get_client, get_default_stream_groups, get_huddle_recipient, get_huddle_user_ids, get_old_unclaimed_attachments, get_stream, get_stream_by_id_in_realm, get_stream_cache_key, get_system_bot, get_user_by_delivery_email, get_user_by_id_in_realm_including_cross_realm, get_user_profile_by_id, is_cross_realm_bot_email, query_for_ids, realm_filters_for_realm, stream_name_in_use, validate_attachment_request, ) from zerver.tornado.event_queue import send_event if settings.BILLING_ENABLED: from corporate.lib.stripe import downgrade_now, update_license_ledger_if_needed # This will be used to type annotate parameters in a function if the function # works on both str and unicode in python 2 but in python 3 it only works on str. SizedTextIterable = Union[Sequence[str], AbstractSet[str]] ONBOARDING_TOTAL_MESSAGES = 1000 ONBOARDING_UNREAD_MESSAGES = 20 STREAM_ASSIGNMENT_COLORS = [ "#76ce90", "#fae589", "#a6c7e5", "#e79ab5", "#bfd56f", "#f4ae55", "#b0a5fd", "#addfe5", "#f5ce6e", "#c2726a", "#94c849", "#bd86e5", "#ee7e4a", "#a6dcbf", "#95a5fd", "#53a063", "#9987e1", "#e4523d", "#c2c2c2", "#4f8de4", "#c6a8ad", "#e7cc4d", "#c8bebf", "#a47462"] def subscriber_info(user_id: int) -> Dict[str, Any]: return { 'id': user_id, 'flags': ['read'] } # Store an event in the log for re-importing messages def log_event(event: MutableMapping[str, Any]) -> None: if settings.EVENT_LOG_DIR is None: return if "timestamp" not in event: event["timestamp"] = time.time() if not os.path.exists(settings.EVENT_LOG_DIR): os.mkdir(settings.EVENT_LOG_DIR) template = os.path.join(settings.EVENT_LOG_DIR, '%s.' + platform.node() + timezone_now().strftime('.%Y-%m-%d')) with lockfile(template % ('lock',)): with open(template % ('events',), 'a') as log: log.write(ujson.dumps(event) + '\n') def can_access_stream_user_ids(stream: Stream) -> Set[int]: # return user ids of users who can access the attributes of # a stream, such as its name/description. if stream.is_public(): # For a public stream, this is everyone in the realm # except unsubscribed guest users return public_stream_user_ids(stream) else: # for a private stream, it's subscribers plus realm admins. return private_stream_user_ids( stream.id) | {user.id for user in stream.realm.get_admin_users_and_bots()} def private_stream_user_ids(stream_id: int) -> Set[int]: # TODO: Find similar queries elsewhere and de-duplicate this code. subscriptions = get_active_subscriptions_for_stream_id(stream_id) return {sub['user_profile_id'] for sub in subscriptions.values('user_profile_id')} def public_stream_user_ids(stream: Stream) -> Set[int]: guest_subscriptions = get_active_subscriptions_for_stream_id( stream.id).filter(user_profile__role=UserProfile.ROLE_GUEST) guest_subscriptions = {sub['user_profile_id'] for sub in guest_subscriptions.values('user_profile_id')} return set(active_non_guest_user_ids(stream.realm_id)) | guest_subscriptions def bot_owner_user_ids(user_profile: UserProfile) -> Set[int]: is_private_bot = ( user_profile.default_sending_stream and user_profile.default_sending_stream.invite_only or user_profile.default_events_register_stream and user_profile.default_events_register_stream.invite_only) if is_private_bot: return {user_profile.bot_owner_id} else: users = {user.id for user in user_profile.realm.get_human_admin_users()} users.add(user_profile.bot_owner_id) return users def realm_user_count(realm: Realm) -> int: return UserProfile.objects.filter(realm=realm, is_active=True, is_bot=False).count() def realm_user_count_by_role(realm: Realm) -> Dict[str, Any]: human_counts = {UserProfile.ROLE_REALM_ADMINISTRATOR: 0, UserProfile.ROLE_REALM_OWNER: 0, UserProfile.ROLE_MEMBER: 0, UserProfile.ROLE_GUEST: 0} for value_dict in list(UserProfile.objects.filter( realm=realm, is_bot=False, is_active=True).values('role').annotate(Count('role'))): human_counts[value_dict['role']] = value_dict['role__count'] bot_count = UserProfile.objects.filter(realm=realm, is_bot=True, is_active=True).count() return { RealmAuditLog.ROLE_COUNT_HUMANS: human_counts, RealmAuditLog.ROLE_COUNT_BOTS: bot_count, } def get_signups_stream(realm: Realm) -> Stream: # This one-liner helps us work around a lint rule. return get_stream("signups", realm) def notify_new_user(user_profile: UserProfile) -> None: sender_email = settings.NOTIFICATION_BOT sender = get_system_bot(sender_email) user_count = realm_user_count(user_profile.realm) signup_notifications_stream = user_profile.realm.get_signup_notifications_stream() # Send notification to realm signup notifications stream if it exists # Don't send notification for the first user in a realm if signup_notifications_stream is not None and user_count > 1: with override_language(user_profile.realm.default_language): message = _("{user} just signed up for Zulip. (total: {user_count})").format( user=f"@_**{user_profile.full_name}|{user_profile.id}**", user_count=user_count ) internal_send_stream_message( user_profile.realm, sender, signup_notifications_stream, _("signups"), message ) # We also send a notification to the Zulip administrative realm admin_realm = sender.realm try: # Check whether the stream exists signups_stream = get_signups_stream(admin_realm) with override_language(admin_realm.default_language): # We intentionally use the same strings as above to avoid translation burden. message = _("{user} just signed up for Zulip. (total: {user_count})").format( user=f"{user_profile.full_name} <`{user_profile.email}`>", user_count=user_count ) internal_send_stream_message( admin_realm, sender, signups_stream, user_profile.realm.display_subdomain, message ) except Stream.DoesNotExist: # If the signups stream hasn't been created in the admin # realm, don't auto-create it to send to it; just do nothing. pass def notify_invites_changed(user_profile: UserProfile) -> None: event = dict(type="invites_changed") admin_ids = [user.id for user in user_profile.realm.get_admin_users_and_bots()] send_event(user_profile.realm, event, admin_ids) def add_new_user_history(user_profile: UserProfile, streams: Iterable[Stream]) -> None: """Give you the last ONBOARDING_TOTAL_MESSAGES messages on your public streams, so you have something to look at in your home view once you finish the tutorial. The most recent ONBOARDING_UNREAD_MESSAGES are marked unread. """ one_week_ago = timezone_now() - datetime.timedelta(weeks=1) recipient_ids = [stream.recipient_id for stream in streams if not stream.invite_only] recent_messages = Message.objects.filter(recipient_id__in=recipient_ids, date_sent__gt=one_week_ago).order_by("-id") message_ids_to_use = list(reversed(recent_messages.values_list( 'id', flat=True)[0:ONBOARDING_TOTAL_MESSAGES])) if len(message_ids_to_use) == 0: return # Handle the race condition where a message arrives between # bulk_add_subscriptions above and the Message query just above already_ids = set(UserMessage.objects.filter(message_id__in=message_ids_to_use, user_profile=user_profile).values_list("message_id", flat=True)) # Mark the newest ONBOARDING_UNREAD_MESSAGES as unread. marked_unread = 0 ums_to_create = [] for message_id in reversed(message_ids_to_use): if message_id in already_ids: continue um = UserMessage(user_profile=user_profile, message_id=message_id) if marked_unread < ONBOARDING_UNREAD_MESSAGES: marked_unread += 1 else: um.flags = UserMessage.flags.read ums_to_create.append(um) UserMessage.objects.bulk_create(reversed(ums_to_create)) # Does the processing for a new user account: # * Subscribes to default/invitation streams # * Fills in some recent historical messages # * Notifies other users in realm and Zulip about the signup # * Deactivates PreregistrationUser objects # * subscribe the user to newsletter if newsletter_data is specified def process_new_human_user(user_profile: UserProfile, prereg_user: Optional[PreregistrationUser]=None, newsletter_data: Optional[Mapping[str, str]]=None, default_stream_groups: Sequence[DefaultStreamGroup]=[], realm_creation: bool=False) -> None: mit_beta_user = user_profile.realm.is_zephyr_mirror_realm if prereg_user is not None: prereg_user.status = confirmation_settings.STATUS_ACTIVE prereg_user.save(update_fields=['status']) streams = prereg_user.streams.all() acting_user: Optional[UserProfile] = prereg_user.referred_by else: streams = [] acting_user = None # If the user's invitation didn't explicitly list some streams, we # add the default streams if len(streams) == 0: streams = get_default_subs(user_profile) for default_stream_group in default_stream_groups: default_stream_group_streams = default_stream_group.streams.all() for stream in default_stream_group_streams: if stream not in streams: streams.append(stream) bulk_add_subscriptions(streams, [user_profile], acting_user=acting_user) add_new_user_history(user_profile, streams) # mit_beta_users don't have a referred_by field if not mit_beta_user and prereg_user is not None and prereg_user.referred_by is not None: # This is a cross-realm private message. with override_language(prereg_user.referred_by.default_language): internal_send_private_message( user_profile.realm, get_system_bot(settings.NOTIFICATION_BOT), prereg_user.referred_by, _("{user} accepted your invitation to join Zulip!").format(user=f"{user_profile.full_name} <`{user_profile.email}`>") ) # Mark any other PreregistrationUsers that are STATUS_ACTIVE as # inactive so we can keep track of the PreregistrationUser we # actually used for analytics if prereg_user is not None: PreregistrationUser.objects.filter( email__iexact=user_profile.delivery_email).exclude(id=prereg_user.id)\ .update(status=confirmation_settings.STATUS_REVOKED) if prereg_user.referred_by is not None: notify_invites_changed(user_profile) else: PreregistrationUser.objects.filter(email__iexact=user_profile.delivery_email)\ .update(status=confirmation_settings.STATUS_REVOKED) notify_new_user(user_profile) # Clear any scheduled invitation emails to prevent them # from being sent after the user is created. clear_scheduled_invitation_emails(user_profile.delivery_email) if user_profile.realm.send_welcome_emails: enqueue_welcome_emails(user_profile, realm_creation) # We have an import loop here; it's intentional, because we want # to keep all the onboarding code in zerver/lib/onboarding.py. from zerver.lib.onboarding import send_initial_pms send_initial_pms(user_profile) if newsletter_data is not None: # If the user was created automatically via the API, we may # not want to register them for the newsletter queue_json_publish( "signups", { 'email_address': user_profile.delivery_email, 'user_id': user_profile.id, 'merge_fields': { 'NAME': user_profile.full_name, 'REALM_ID': user_profile.realm_id, 'OPTIN_IP': newsletter_data["IP"], 'OPTIN_TIME': datetime.datetime.isoformat(timezone_now().replace(microsecond=0)), }, }, lambda event: None) def notify_created_user(user_profile: UserProfile) -> None: user_row = user_profile_to_user_row(user_profile) person = format_user_row(user_profile.realm, user_profile, user_row, # Since we don't know what the client # supports at this point in the code, we # just assume client_gravatar and # user_avatar_url_field_optional = False :( client_gravatar=False, user_avatar_url_field_optional=False, # We assume there's no custom profile # field data for a new user; initial # values are expected to be added in a # later event. custom_profile_field_data={}) event: Dict[str, Any] = dict(type="realm_user", op="add", person=person) send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id)) def created_bot_event(user_profile: UserProfile) -> Dict[str, Any]: def stream_name(stream: Optional[Stream]) -> Optional[str]: if not stream: return None return stream.name default_sending_stream_name = stream_name(user_profile.default_sending_stream) default_events_register_stream_name = stream_name(user_profile.default_events_register_stream) bot = dict(email=user_profile.email, user_id=user_profile.id, full_name=user_profile.full_name, bot_type=user_profile.bot_type, is_active=user_profile.is_active, api_key=get_api_key(user_profile), default_sending_stream=default_sending_stream_name, default_events_register_stream=default_events_register_stream_name, default_all_public_streams=user_profile.default_all_public_streams, avatar_url=avatar_url(user_profile), services = get_service_dicts_for_bot(user_profile.id), ) # Set the owner key only when the bot has an owner. # The default bots don't have an owner. So don't # set the owner key while reactivating them. if user_profile.bot_owner is not None: bot['owner_id'] = user_profile.bot_owner.id return dict(type="realm_bot", op="add", bot=bot) def notify_created_bot(user_profile: UserProfile) -> None: event = created_bot_event(user_profile) send_event(user_profile.realm, event, bot_owner_user_ids(user_profile)) def create_users(realm: Realm, name_list: Iterable[Tuple[str, str]], bot_type: Optional[int]=None) -> None: user_set = set() for full_name, email in name_list: user_set.add((email, full_name, True)) bulk_create_users(realm, user_set, bot_type) def do_create_user(email: str, password: Optional[str], realm: Realm, full_name: str, bot_type: Optional[int]=None, role: Optional[int]=None, bot_owner: Optional[UserProfile]=None, tos_version: Optional[str]=None, timezone: str="", avatar_source: str=UserProfile.AVATAR_FROM_GRAVATAR, default_sending_stream: Optional[Stream]=None, default_events_register_stream: Optional[Stream]=None, default_all_public_streams: Optional[bool]=None, prereg_user: Optional[PreregistrationUser]=None, newsletter_data: Optional[Dict[str, str]]=None, default_stream_groups: Sequence[DefaultStreamGroup]=[], source_profile: Optional[UserProfile]=None, realm_creation: bool=False, acting_user: Optional[UserProfile]=None) -> UserProfile: user_profile = create_user(email=email, password=password, realm=realm, full_name=full_name, role=role, bot_type=bot_type, bot_owner=bot_owner, tos_version=tos_version, timezone=timezone, avatar_source=avatar_source, default_sending_stream=default_sending_stream, default_events_register_stream=default_events_register_stream, default_all_public_streams=default_all_public_streams, source_profile=source_profile) event_time = user_profile.date_joined if not acting_user: acting_user = user_profile RealmAuditLog.objects.create( realm=user_profile.realm, acting_user=acting_user, modified_user=user_profile, event_type=RealmAuditLog.USER_CREATED, event_time=event_time, extra_data=ujson.dumps({ RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm), })) do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'], user_profile.is_bot, event_time) if settings.BILLING_ENABLED: update_license_ledger_if_needed(user_profile.realm, event_time) # Note that for bots, the caller will send an additional event # with bot-specific info like services. notify_created_user(user_profile) if bot_type is None: process_new_human_user(user_profile, prereg_user=prereg_user, newsletter_data=newsletter_data, default_stream_groups=default_stream_groups, realm_creation=realm_creation) return user_profile def do_activate_user(user_profile: UserProfile, acting_user: Optional[UserProfile]=None) -> None: user_profile.is_active = True user_profile.is_mirror_dummy = False user_profile.set_unusable_password() user_profile.date_joined = timezone_now() user_profile.tos_version = settings.TOS_VERSION user_profile.save(update_fields=["is_active", "date_joined", "password", "is_mirror_dummy", "tos_version"]) event_time = user_profile.date_joined RealmAuditLog.objects.create( realm=user_profile.realm, modified_user=user_profile, acting_user=acting_user, event_type=RealmAuditLog.USER_ACTIVATED, event_time=event_time, extra_data=ujson.dumps({ RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm), })) do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'], user_profile.is_bot, event_time) if settings.BILLING_ENABLED: update_license_ledger_if_needed(user_profile.realm, event_time) notify_created_user(user_profile) def do_reactivate_user(user_profile: UserProfile, acting_user: Optional[UserProfile]=None) -> None: # Unlike do_activate_user, this is meant for re-activating existing users, # so it doesn't reset their password, etc. user_profile.is_active = True user_profile.save(update_fields=["is_active"]) event_time = timezone_now() RealmAuditLog.objects.create( realm=user_profile.realm, modified_user=user_profile, acting_user=acting_user, event_type=RealmAuditLog.USER_REACTIVATED, event_time=event_time, extra_data=ujson.dumps({ RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm), })) do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'], user_profile.is_bot, event_time) if settings.BILLING_ENABLED: update_license_ledger_if_needed(user_profile.realm, event_time) notify_created_user(user_profile) if user_profile.is_bot: notify_created_bot(user_profile) def active_humans_in_realm(realm: Realm) -> Sequence[UserProfile]: return UserProfile.objects.filter(realm=realm, is_active=True, is_bot=False) def do_set_realm_property(realm: Realm, name: str, value: Any, acting_user: Optional[UserProfile] = None) -> None: """Takes in a realm object, the name of an attribute to update, the value to update and and the user who initiated the update. """ property_type = Realm.property_types[name] assert isinstance(value, property_type), ( f'Cannot update {name}: {value} is not an instance of {property_type}') old_value = getattr(realm, name) setattr(realm, name, value) realm.save(update_fields=[name]) event = dict( type='realm', op='update', property=name, value=value, ) send_event(realm, event, active_user_ids(realm.id)) event_time = timezone_now() RealmAuditLog.objects.create( realm=realm, event_type=RealmAuditLog.REALM_PROPERTY_CHANGED, event_time=event_time, acting_user=acting_user, extra_data=ujson.dumps({ RealmAuditLog.OLD_VALUE: {'property': name, 'value': old_value}, RealmAuditLog.NEW_VALUE: {'property': name, 'value': value} })) if name == "email_address_visibility": if Realm.EMAIL_ADDRESS_VISIBILITY_EVERYONE not in [old_value, value]: # We use real email addresses on UserProfile.email only if # EMAIL_ADDRESS_VISIBILITY_EVERYONE is configured, so # changes between values that will not require changing # that field, so we can save work and return here. return user_profiles = UserProfile.objects.filter(realm=realm, is_bot=False) for user_profile in user_profiles: user_profile.email = get_display_email_address(user_profile, realm) # TODO: Design a bulk event for this or force-reload all clients send_user_email_update_event(user_profile) UserProfile.objects.bulk_update(user_profiles, ['email']) for user_profile in user_profiles: flush_user_profile(sender=UserProfile, instance=user_profile) def do_set_realm_authentication_methods(realm: Realm, authentication_methods: Dict[str, bool], acting_user: Optional[UserProfile]=None) -> None: old_value = realm.authentication_methods_dict() for key, value in list(authentication_methods.items()): index = getattr(realm.authentication_methods, key).number realm.authentication_methods.set_bit(index, int(value)) realm.save(update_fields=['authentication_methods']) updated_value = realm.authentication_methods_dict() RealmAuditLog.objects.create( realm=realm, event_type=RealmAuditLog.REALM_PROPERTY_CHANGED, event_time=timezone_now(), acting_user=acting_user, extra_data=ujson.dumps({ RealmAuditLog.OLD_VALUE: {'property': 'authentication_methods', 'value': old_value}, RealmAuditLog.NEW_VALUE: {'property': 'authentication_methods', 'value': updated_value} })) event = dict( type="realm", op="update_dict", property='default', data=dict(authentication_methods=updated_value), ) send_event(realm, event, active_user_ids(realm.id)) def do_set_realm_message_editing(realm: Realm, allow_message_editing: bool, message_content_edit_limit_seconds: int, allow_community_topic_editing: bool) -> None: realm.allow_message_editing = allow_message_editing realm.message_content_edit_limit_seconds = message_content_edit_limit_seconds realm.allow_community_topic_editing = allow_community_topic_editing realm.save(update_fields=['allow_message_editing', 'allow_community_topic_editing', 'message_content_edit_limit_seconds', ], ) event = dict( type="realm", op="update_dict", property="default", data=dict(allow_message_editing=allow_message_editing, message_content_edit_limit_seconds=message_content_edit_limit_seconds, allow_community_topic_editing=allow_community_topic_editing), ) send_event(realm, event, active_user_ids(realm.id)) def do_set_realm_message_deleting(realm: Realm, message_content_delete_limit_seconds: int) -> None: realm.message_content_delete_limit_seconds = message_content_delete_limit_seconds realm.save(update_fields=['message_content_delete_limit_seconds']) event = dict( type="realm", op="update_dict", property="default", data=dict(message_content_delete_limit_seconds=message_content_delete_limit_seconds), ) send_event(realm, event, active_user_ids(realm.id)) def do_set_realm_notifications_stream(realm: Realm, stream: Optional[Stream], stream_id: int) -> None: realm.notifications_stream = stream realm.save(update_fields=['notifications_stream']) event = dict( type="realm", op="update", property="notifications_stream_id", value=stream_id, ) send_event(realm, event, active_user_ids(realm.id)) def do_set_realm_signup_notifications_stream(realm: Realm, stream: Optional[Stream], stream_id: int) -> None: realm.signup_notifications_stream = stream realm.save(update_fields=['signup_notifications_stream']) event = dict( type="realm", op="update", property="signup_notifications_stream_id", value=stream_id, ) send_event(realm, event, active_user_ids(realm.id)) def do_deactivate_realm(realm: Realm, acting_user: Optional[UserProfile]=None) -> None: """ Deactivate this realm. Do NOT deactivate the users -- we need to be able to tell the difference between users that were intentionally deactivated, e.g. by a realm admin, and users who can't currently use Zulip because their realm has been deactivated. """ if realm.deactivated: return realm.deactivated = True realm.save(update_fields=["deactivated"]) if settings.BILLING_ENABLED: downgrade_now(realm) event_time = timezone_now() RealmAuditLog.objects.create( realm=realm, event_type=RealmAuditLog.REALM_DEACTIVATED, event_time=event_time, acting_user=acting_user, extra_data=ujson.dumps({ RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(realm), })) ScheduledEmail.objects.filter(realm=realm).delete() for user in active_humans_in_realm(realm): # Don't deactivate the users, but do delete their sessions so they get # bumped to the login screen, where they'll get a realm deactivation # notice when they try to log in. delete_user_sessions(user) event = dict(type="realm", op="deactivated", realm_id=realm.id) send_event(realm, event, active_user_ids(realm.id)) def do_reactivate_realm(realm: Realm) -> None: realm.deactivated = False realm.save(update_fields=["deactivated"]) event_time = timezone_now() RealmAuditLog.objects.create( realm=realm, event_type=RealmAuditLog.REALM_REACTIVATED, event_time=event_time, extra_data=ujson.dumps({ RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(realm), })) def do_change_realm_subdomain(realm: Realm, new_subdomain: str) -> None: realm.string_id = new_subdomain realm.save(update_fields=["string_id"]) def do_scrub_realm(realm: Realm, acting_user: Optional[UserProfile]=None) -> None: users = UserProfile.objects.filter(realm=realm) for user in users: do_delete_messages_by_sender(user) do_delete_avatar_image(user, acting_user=acting_user) user.full_name = f"Scrubbed {generate_key()[:15]}" scrubbed_email = f"scrubbed-{generate_key()[:15]}@{realm.host}" user.email = scrubbed_email user.delivery_email = scrubbed_email user.save(update_fields=["full_name", "email", "delivery_email"]) do_remove_realm_custom_profile_fields(realm) Attachment.objects.filter(realm=realm).delete() RealmAuditLog.objects.create(realm=realm, event_time=timezone_now(), acting_user=acting_user, event_type=RealmAuditLog.REALM_SCRUBBED) def do_deactivate_user(user_profile: UserProfile, acting_user: Optional[UserProfile]=None, _cascade: bool=True) -> None: if not user_profile.is_active: return if user_profile.realm.is_zephyr_mirror_realm: # nocoverage # For zephyr mirror users, we need to make them a mirror dummy # again; otherwise, other users won't get the correct behavior # when trying to send messages to this person inside Zulip. # # Ideally, we need to also ensure their zephyr mirroring bot # isn't running, but that's a separate issue. user_profile.is_mirror_dummy = True user_profile.is_active = False user_profile.save(update_fields=["is_active"]) delete_user_sessions(user_profile) clear_scheduled_emails([user_profile.id]) event_time = timezone_now() RealmAuditLog.objects.create( realm=user_profile.realm, modified_user=user_profile, acting_user=acting_user, event_type=RealmAuditLog.USER_DEACTIVATED, event_time=event_time, extra_data=ujson.dumps({ RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm), })) do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'], user_profile.is_bot, event_time, increment=-1) if settings.BILLING_ENABLED: update_license_ledger_if_needed(user_profile.realm, event_time) event = dict(type="realm_user", op="remove", person=dict(user_id=user_profile.id, full_name=user_profile.full_name)) send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id)) if user_profile.is_bot: event = dict(type="realm_bot", op="remove", bot=dict(user_id=user_profile.id, full_name=user_profile.full_name)) send_event(user_profile.realm, event, bot_owner_user_ids(user_profile)) if _cascade: bot_profiles = UserProfile.objects.filter(is_bot=True, is_active=True, bot_owner=user_profile) for profile in bot_profiles: do_deactivate_user(profile, acting_user=acting_user, _cascade=False) def do_deactivate_stream(stream: Stream, log: bool=True, acting_user: Optional[UserProfile]=None) -> None: # Get the affected user ids *before* we deactivate everybody. affected_user_ids = can_access_stream_user_ids(stream) get_active_subscriptions_for_stream_id(stream.id).update(active=False) was_invite_only = stream.invite_only stream.deactivated = True stream.invite_only = True # Preserve as much as possible the original stream name while giving it a # special prefix that both indicates that the stream is deactivated and # frees up the original name for reuse. old_name = stream.name new_name = ("!DEACTIVATED:" + old_name)[:Stream.MAX_NAME_LENGTH] for i in range(20): if stream_name_in_use(new_name, stream.realm_id): # This stream has already been deactivated, keep prepending !s until # we have a unique stream name or you've hit a rename limit. new_name = ("!" + new_name)[:Stream.MAX_NAME_LENGTH] else: break # If you don't have a unique name at this point, this will fail later in the # code path. stream.name = new_name[:Stream.MAX_NAME_LENGTH] stream.save(update_fields=['name', 'deactivated', 'invite_only']) # If this is a default stream, remove it, properly sending a # notification to browser clients. if DefaultStream.objects.filter(realm_id=stream.realm_id, stream_id=stream.id).exists(): do_remove_default_stream(stream) default_stream_groups_for_stream = DefaultStreamGroup.objects.filter(streams__id=stream.id) for group in default_stream_groups_for_stream: do_remove_streams_from_default_stream_group(stream.realm, group, [stream]) # Remove the old stream information from remote cache. old_cache_key = get_stream_cache_key(old_name, stream.realm_id) cache_delete(old_cache_key) stream_dict = stream.to_dict() stream_dict.update(dict(name=old_name, invite_only=was_invite_only)) event = dict(type="stream", op="delete", streams=[stream_dict]) send_event(stream.realm, event, affected_user_ids) event_time = timezone_now() RealmAuditLog.objects.create(realm=stream.realm, acting_user=acting_user, modified_stream=stream, event_type=RealmAuditLog.STREAM_DEACTIVATED, event_time=event_time) def send_user_email_update_event(user_profile: UserProfile) -> None: payload = dict(user_id=user_profile.id, new_email=user_profile.email) send_event(user_profile.realm, dict(type='realm_user', op='update', person=payload), active_user_ids(user_profile.realm_id)) def do_change_user_delivery_email(user_profile: UserProfile, new_email: str) -> None: delete_user_profile_caches([user_profile]) user_profile.delivery_email = new_email if user_profile.email_address_is_realm_public(): user_profile.email = new_email user_profile.save(update_fields=["email", "delivery_email"]) else: user_profile.save(update_fields=["delivery_email"]) # We notify just the target user (and eventually org admins, only # when email_address_visibility=EMAIL_ADDRESS_VISIBILITY_ADMINS) # about their new delivery email, since that field is private. payload = dict(user_id=user_profile.id, delivery_email=new_email) event = dict(type='realm_user', op='update', person=payload) send_event(user_profile.realm, event, [user_profile.id]) if user_profile.avatar_source == UserProfile.AVATAR_FROM_GRAVATAR: # If the user is using Gravatar to manage their email address, # their Gravatar just changed, and we need to notify other # clients. notify_avatar_url_change(user_profile) if user_profile.email_address_is_realm_public(): # Additionally, if we're also changing the publicly visible # email, we send a new_email event as well. send_user_email_update_event(user_profile) event_time = timezone_now() RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=user_profile, modified_user=user_profile, event_type=RealmAuditLog.USER_EMAIL_CHANGED, event_time=event_time) def do_start_email_change_process(user_profile: UserProfile, new_email: str) -> None: old_email = user_profile.delivery_email obj = EmailChangeStatus.objects.create(new_email=new_email, old_email=old_email, user_profile=user_profile, realm=user_profile.realm) activation_url = create_confirmation_link(obj, Confirmation.EMAIL_CHANGE) from zerver.context_processors import common_context context = common_context(user_profile) context.update({ 'old_email': old_email, 'new_email': new_email, 'activate_url': activation_url, }) language = user_profile.default_language send_email('zerver/emails/confirm_new_email', to_emails=[new_email], from_name=FromAddress.security_email_from_name(language=language), from_address=FromAddress.tokenized_no_reply_address(), language=language, context=context, realm=user_profile.realm) def compute_irc_user_fullname(email: str) -> str: return email.split("@")[0] + " (IRC)" def compute_jabber_user_fullname(email: str) -> str: return email.split("@")[0] + " (XMPP)" @cache_with_key(lambda realm, email, f: user_profile_by_email_cache_key(email), timeout=3600*24*7) def create_mirror_user_if_needed(realm: Realm, email: str, email_to_fullname: Callable[[str], str]) -> UserProfile: try: return get_user_by_delivery_email(email, realm) except UserProfile.DoesNotExist: try: # Forge a user for this person return create_user( email=email, password=None, realm=realm, full_name=email_to_fullname(email), active=False, is_mirror_dummy=True, ) except IntegrityError: return get_user_by_delivery_email(email, realm) def send_welcome_bot_response(message: MutableMapping[str, Any]) -> None: welcome_bot = get_system_bot(settings.WELCOME_BOT) human_recipient_id = message['message'].sender.recipient_id if Message.objects.filter(sender=welcome_bot, recipient_id=human_recipient_id).count() < 2: content = ( _("Congratulations on your first reply!") + " " ":tada:" "\n" "\n" + _("Feel free to continue using this space to practice your new messaging " "skills. Or, try clicking on some of the stream names to your left!") ) internal_send_private_message( message['realm'], welcome_bot, message['message'].sender, content) def render_incoming_message(message: Message, content: str, user_ids: Set[int], realm: Realm, mention_data: Optional[MentionData]=None, email_gateway: bool=False) -> str: realm_alert_words_automaton = get_alert_word_automaton(realm) try: rendered_content = render_markdown( message=message, content=content, realm=realm, realm_alert_words_automaton = realm_alert_words_automaton, mention_data=mention_data, email_gateway=email_gateway, ) except MarkdownRenderingException: raise JsonableError(_('Unable to render message')) return rendered_content class RecipientInfoResult(TypedDict): active_user_ids: Set[int] push_notify_user_ids: Set[int] stream_email_user_ids: Set[int] stream_push_user_ids: Set[int] wildcard_mention_user_ids: Set[int] um_eligible_user_ids: Set[int] long_term_idle_user_ids: Set[int] default_bot_user_ids: Set[int] service_bot_tuples: List[Tuple[int, int]] def get_recipient_info(recipient: Recipient, sender_id: int, stream_topic: Optional[StreamTopicTarget], possibly_mentioned_user_ids: AbstractSet[int]=set(), possible_wildcard_mention: bool=True) -> RecipientInfoResult: stream_push_user_ids: Set[int] = set() stream_email_user_ids: Set[int] = set() wildcard_mention_user_ids: Set[int] = set() if recipient.type == Recipient.PERSONAL: # The sender and recipient may be the same id, so # de-duplicate using a set. message_to_user_ids = list({recipient.type_id, sender_id}) assert(len(message_to_user_ids) in [1, 2]) elif recipient.type == Recipient.STREAM: # Anybody calling us w/r/t a stream message needs to supply # stream_topic. We may eventually want to have different versions # of this function for different message types. assert(stream_topic is not None) user_ids_muting_topic = stream_topic.user_ids_muting_topic() subscription_rows = stream_topic.get_active_subscriptions().annotate( user_profile_email_notifications=F('user_profile__enable_stream_email_notifications'), user_profile_push_notifications=F('user_profile__enable_stream_push_notifications'), user_profile_wildcard_mentions_notify=F( 'user_profile__wildcard_mentions_notify'), ).values( 'user_profile_id', 'push_notifications', 'email_notifications', 'wildcard_mentions_notify', 'user_profile_email_notifications', 'user_profile_push_notifications', 'user_profile_wildcard_mentions_notify', 'is_muted', ).order_by('user_profile_id') message_to_user_ids = [ row['user_profile_id'] for row in subscription_rows ] def should_send(setting: str, row: Dict[str, Any]) -> bool: # This implements the structure that the UserProfile stream notification settings # are defaults, which can be overridden by the stream-level settings (if those # values are not null). if row['is_muted']: return False if row['user_profile_id'] in user_ids_muting_topic: return False if row[setting] is not None: return row[setting] return row['user_profile_' + setting] stream_push_user_ids = { row['user_profile_id'] for row in subscription_rows # Note: muting a stream overrides stream_push_notify if should_send('push_notifications', row) } stream_email_user_ids = { row['user_profile_id'] for row in subscription_rows # Note: muting a stream overrides stream_email_notify if should_send('email_notifications', row) } if possible_wildcard_mention: # If there's a possible wildcard mention, we need to # determine which users would receive a wildcard mention # notification for this message should the message indeed # contain a wildcard mention. # # We don't have separate values for push/email # notifications here; at this stage, we're just # determining whether this wildcard mention should be # treated as a mention (and follow the user's mention # notification preferences) or a normal message. wildcard_mention_user_ids = { row['user_profile_id'] for row in subscription_rows if should_send("wildcard_mentions_notify", row) } elif recipient.type == Recipient.HUDDLE: message_to_user_ids = get_huddle_user_ids(recipient) else: raise ValueError('Bad recipient type') message_to_user_id_set = set(message_to_user_ids) user_ids = set(message_to_user_id_set) # Important note: Because we haven't rendered markdown yet, we # don't yet know which of these possibly-mentioned users was # actually mentioned in the message (in other words, the # mention syntax might have been in a code block or otherwise # escaped). `get_ids_for` will filter these extra user rows # for our data structures not related to bots user_ids |= possibly_mentioned_user_ids if user_ids: query = UserProfile.objects.filter( is_active=True, ).values( 'id', 'enable_online_push_notifications', 'is_bot', 'bot_type', 'long_term_idle', ) # query_for_ids is fast highly optimized for large queries, and we # need this codepath to be fast (it's part of sending messages) query = query_for_ids( query=query, user_ids=sorted(list(user_ids)), field='id', ) rows = list(query) else: # TODO: We should always have at least one user_id as a recipient # of any message we send. Right now the exception to this # rule is `notify_new_user`, which, at least in a possibly # contrived test scenario, can attempt to send messages # to an inactive bot. When we plug that hole, we can avoid # this `else` clause and just `assert(user_ids)`. # # UPDATE: It's February 2020 (and a couple years after the above # comment was written). We have simplified notify_new_user # so that it should be a little easier to reason about. # There is currently some cleanup to how we handle cross # realm bots that is still under development. Once that # effort is complete, we should be able to address this # to-do. rows = [] def get_ids_for(f: Callable[[Dict[str, Any]], bool]) -> Set[int]: """Only includes users on the explicit message to line""" return { row['id'] for row in rows if f(row) } & message_to_user_id_set def is_service_bot(row: Dict[str, Any]) -> bool: return row['is_bot'] and (row['bot_type'] in UserProfile.SERVICE_BOT_TYPES) active_user_ids = get_ids_for(lambda r: True) push_notify_user_ids = get_ids_for( lambda r: r['enable_online_push_notifications'], ) # Service bots don't get UserMessage rows. um_eligible_user_ids = get_ids_for( lambda r: not is_service_bot(r), ) long_term_idle_user_ids = get_ids_for( lambda r: r['long_term_idle'], ) # These two bot data structures need to filter from the full set # of users who either are receiving the message or might have been # mentioned in it, and so can't use get_ids_for. # # Further in the do_send_messages code path, once # `mentioned_user_ids` has been computed via markdown, we'll filter # these data structures for just those users who are either a # direct recipient or were mentioned; for now, we're just making # sure we have the data we need for that without extra database # queries. default_bot_user_ids = { row['id'] for row in rows if row['is_bot'] and row['bot_type'] == UserProfile.DEFAULT_BOT } service_bot_tuples = [ (row['id'], row['bot_type']) for row in rows if is_service_bot(row) ] info: RecipientInfoResult = dict( active_user_ids=active_user_ids, push_notify_user_ids=push_notify_user_ids, stream_push_user_ids=stream_push_user_ids, stream_email_user_ids=stream_email_user_ids, wildcard_mention_user_ids=wildcard_mention_user_ids, um_eligible_user_ids=um_eligible_user_ids, long_term_idle_user_ids=long_term_idle_user_ids, default_bot_user_ids=default_bot_user_ids, service_bot_tuples=service_bot_tuples, ) return info def get_service_bot_events(sender: UserProfile, service_bot_tuples: List[Tuple[int, int]], mentioned_user_ids: Set[int], active_user_ids: Set[int], recipient_type: int) -> Dict[str, List[Dict[str, Any]]]: event_dict: Dict[str, List[Dict[str, Any]]] = defaultdict(list) # Avoid infinite loops by preventing messages sent by bots from generating # Service events. if sender.is_bot: return event_dict def maybe_add_event(user_profile_id: int, bot_type: int) -> None: if bot_type == UserProfile.OUTGOING_WEBHOOK_BOT: queue_name = 'outgoing_webhooks' elif bot_type == UserProfile.EMBEDDED_BOT: queue_name = 'embedded_bots' else: logging.error( 'Unexpected bot_type for Service bot id=%s: %s', user_profile_id, bot_type, ) return is_stream = (recipient_type == Recipient.STREAM) # Important note: service_bot_tuples may contain service bots # who were not actually mentioned in the message (e.g. if # mention syntax for that bot appeared in a code block). # Thus, it is important to filter any users who aren't part of # either mentioned_user_ids (the actual mentioned users) or # active_user_ids (the actual recipients). # # So even though this is implied by the logic below, we filter # these not-actually-mentioned users here, to help keep this # function future-proof. if user_profile_id not in mentioned_user_ids and user_profile_id not in active_user_ids: return # Mention triggers, for stream messages if is_stream and user_profile_id in mentioned_user_ids: trigger = 'mention' # PM triggers for personal and huddle messages elif (not is_stream) and (user_profile_id in active_user_ids): trigger = 'private_message' else: return event_dict[queue_name].append({ 'trigger': trigger, 'user_profile_id': user_profile_id, }) for user_profile_id, bot_type in service_bot_tuples: maybe_add_event( user_profile_id=user_profile_id, bot_type=bot_type, ) return event_dict def do_schedule_messages(messages: Sequence[Mapping[str, Any]]) -> List[int]: scheduled_messages: List[ScheduledMessage] = [] for message in messages: scheduled_message = ScheduledMessage() scheduled_message.sender = message['message'].sender scheduled_message.recipient = message['message'].recipient topic_name = message['message'].topic_name() scheduled_message.set_topic_name(topic_name=topic_name) scheduled_message.content = message['message'].content scheduled_message.sending_client = message['message'].sending_client scheduled_message.stream = message['stream'] scheduled_message.realm = message['realm'] scheduled_message.scheduled_timestamp = message['deliver_at'] if message['delivery_type'] == 'send_later': scheduled_message.delivery_type = ScheduledMessage.SEND_LATER elif message['delivery_type'] == 'remind': scheduled_message.delivery_type = ScheduledMessage.REMIND scheduled_messages.append(scheduled_message) ScheduledMessage.objects.bulk_create(scheduled_messages) return [scheduled_message.id for scheduled_message in scheduled_messages] def do_send_messages(messages_maybe_none: Sequence[Optional[MutableMapping[str, Any]]], email_gateway: bool=False, mark_as_read: Sequence[int]=[]) -> List[int]: """See https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html for high-level documentation on this subsystem. """ # Filter out messages which didn't pass internal_prep_message properly messages = [message for message in messages_maybe_none if message is not None] # Filter out zephyr mirror anomalies where the message was already sent already_sent_ids: List[int] = [] new_messages: List[MutableMapping[str, Any]] = [] for message in messages: if isinstance(message['message'], int): already_sent_ids.append(message['message']) else: new_messages.append(message) messages = new_messages links_for_embed: Set[str] = set() # For consistency, changes to the default values for these gets should also be applied # to the default args in do_send_message for message in messages: message['rendered_content'] = message.get('rendered_content', None) message['stream'] = message.get('stream', None) message['local_id'] = message.get('local_id', None) message['sender_queue_id'] = message.get('sender_queue_id', None) message['realm'] = message.get('realm', message['message'].sender.realm) mention_data = MentionData( realm_id=message['realm'].id, content=message['message'].content, ) message['mention_data'] = mention_data if message['message'].is_stream_message(): stream_id = message['message'].recipient.type_id stream_topic: Optional[StreamTopicTarget] = StreamTopicTarget( stream_id=stream_id, topic_name=message['message'].topic_name(), ) else: stream_topic = None info = get_recipient_info( recipient=message['message'].recipient, sender_id=message['message'].sender_id, stream_topic=stream_topic, possibly_mentioned_user_ids=mention_data.get_user_ids(), possible_wildcard_mention=mention_data.message_has_wildcards(), ) message['active_user_ids'] = info['active_user_ids'] message['push_notify_user_ids'] = info['push_notify_user_ids'] message['stream_push_user_ids'] = info['stream_push_user_ids'] message['stream_email_user_ids'] = info['stream_email_user_ids'] message['um_eligible_user_ids'] = info['um_eligible_user_ids'] message['long_term_idle_user_ids'] = info['long_term_idle_user_ids'] message['default_bot_user_ids'] = info['default_bot_user_ids'] message['service_bot_tuples'] = info['service_bot_tuples'] # Render our messages. assert message['message'].rendered_content is None rendered_content = render_incoming_message( message['message'], message['message'].content, message['active_user_ids'], message['realm'], mention_data=message['mention_data'], email_gateway=email_gateway, ) message['message'].rendered_content = rendered_content message['message'].rendered_content_version = markdown_version links_for_embed |= message['message'].links_for_preview # Add members of the mentioned user groups into `mentions_user_ids`. for group_id in message['message'].mentions_user_group_ids: members = message['mention_data'].get_group_members(group_id) message['message'].mentions_user_ids.update(members) # Only send data to Tornado about wildcard mentions if message # rendering determined the message had an actual wildcard # mention in it (and not e.g. wildcard mention syntax inside a # code block). if message['message'].mentions_wildcard: message['wildcard_mention_user_ids'] = info['wildcard_mention_user_ids'] else: message['wildcard_mention_user_ids'] = [] ''' Once we have the actual list of mentioned ids from message rendering, we can patch in "default bots" (aka normal bots) who were directly mentioned in this message as eligible to get UserMessage rows. ''' mentioned_user_ids = message['message'].mentions_user_ids default_bot_user_ids = message['default_bot_user_ids'] mentioned_bot_user_ids = default_bot_user_ids & mentioned_user_ids message['um_eligible_user_ids'] |= mentioned_bot_user_ids # Save the message receipts in the database user_message_flags: Dict[int, Dict[int, List[str]]] = defaultdict(dict) with transaction.atomic(): Message.objects.bulk_create([message['message'] for message in messages]) # Claim attachments in message for message in messages: if do_claim_attachments(message['message'], message['message'].potential_attachment_path_ids): message['message'].has_attachment = True message['message'].save(update_fields=['has_attachment']) ums: List[UserMessageLite] = [] for message in messages: # Service bots (outgoing webhook bots and embedded bots) don't store UserMessage rows; # they will be processed later. mentioned_user_ids = message['message'].mentions_user_ids user_messages = create_user_messages( message=message['message'], um_eligible_user_ids=message['um_eligible_user_ids'], long_term_idle_user_ids=message['long_term_idle_user_ids'], stream_push_user_ids = message['stream_push_user_ids'], stream_email_user_ids = message['stream_email_user_ids'], mentioned_user_ids=mentioned_user_ids, mark_as_read=mark_as_read, ) for um in user_messages: user_message_flags[message['message'].id][um.user_profile_id] = um.flags_list() ums.extend(user_messages) message['message'].service_queue_events = get_service_bot_events( sender=message['message'].sender, service_bot_tuples=message['service_bot_tuples'], mentioned_user_ids=mentioned_user_ids, active_user_ids=message['active_user_ids'], recipient_type=message['message'].recipient.type, ) bulk_insert_ums(ums) for message in messages: do_widget_post_save_actions(message) for message in messages: realm_id: Optional[int] = None if message['message'].is_stream_message(): if message['stream'] is None: stream_id = message['message'].recipient.type_id message['stream'] = Stream.objects.select_related().get(id=stream_id) assert message['stream'] is not None # assert needed because stubs for django are missing realm_id = message['stream'].realm_id # Deliver events to the real-time push system, as well as # enqueuing any additional processing triggered by the message. wide_message_dict = MessageDict.wide_dict(message['message'], realm_id) user_flags = user_message_flags.get(message['message'].id, {}) sender = message['message'].sender message_type = wide_message_dict['type'] presence_idle_user_ids = get_active_presence_idle_user_ids( realm=sender.realm, sender_id=sender.id, message_type=message_type, active_user_ids=message['active_user_ids'], user_flags=user_flags, ) event = dict( type='message', message=message['message'].id, message_dict=wide_message_dict, presence_idle_user_ids=presence_idle_user_ids, ) ''' TODO: We may want to limit user_ids to only those users who have UserMessage rows, if only for minor performance reasons. For now we queue events for all subscribers/sendees of the message, since downstream code may still do notifications that don't require UserMessage rows. Our automated tests have gotten better on this codepath, but we may have coverage gaps, so we should be careful about changing the next line. ''' user_ids = message['active_user_ids'] | set(user_flags.keys()) users = [ dict( id=user_id, flags=user_flags.get(user_id, []), always_push_notify=(user_id in message['push_notify_user_ids']), stream_push_notify=(user_id in message['stream_push_user_ids']), stream_email_notify=(user_id in message['stream_email_user_ids']), wildcard_mention_notify=(user_id in message['wildcard_mention_user_ids']), ) for user_id in user_ids ] if message['message'].is_stream_message(): # Note: This is where authorization for single-stream # get_updates happens! We only attach stream data to the # notify new_message request if it's a public stream, # ensuring that in the tornado server, non-public stream # messages are only associated to their subscribed users. assert message['stream'] is not None # assert needed because stubs for django are missing if message['stream'].is_public(): event['realm_id'] = message['stream'].realm_id event['stream_name'] = message['stream'].name if message['stream'].invite_only: event['invite_only'] = True if message['stream'].first_message_id is None: message['stream'].first_message_id = message['message'].id message['stream'].save(update_fields=["first_message_id"]) if message['local_id'] is not None: event['local_id'] = message['local_id'] if message['sender_queue_id'] is not None: event['sender_queue_id'] = message['sender_queue_id'] send_event(message['realm'], event, users) if links_for_embed: event_data = { 'message_id': message['message'].id, 'message_content': message['message'].content, 'message_realm_id': message['realm'].id, 'urls': links_for_embed} queue_json_publish('embed_links', event_data) if message['message'].recipient.type == Recipient.PERSONAL: welcome_bot_id = get_system_bot(settings.WELCOME_BOT).id if (welcome_bot_id in message['active_user_ids'] and welcome_bot_id != message['message'].sender_id): send_welcome_bot_response(message) for queue_name, events in message['message'].service_queue_events.items(): for event in events: queue_json_publish( queue_name, { "message": wide_message_dict, "trigger": event['trigger'], "user_profile_id": event["user_profile_id"], }, ) # Note that this does not preserve the order of message ids # returned. In practice, this shouldn't matter, as we only # mirror single zephyr messages at a time and don't otherwise # intermingle sending zephyr messages with other messages. return already_sent_ids + [message['message'].id for message in messages] class UserMessageLite: ''' The Django ORM is too slow for bulk operations. This class is optimized for the simple use case of inserting a bunch of rows into zerver_usermessage. ''' def __init__(self, user_profile_id: int, message_id: int, flags: int) -> None: self.user_profile_id = user_profile_id self.message_id = message_id self.flags = flags def flags_list(self) -> List[str]: return UserMessage.flags_list_for_flags(self.flags) def create_user_messages(message: Message, um_eligible_user_ids: AbstractSet[int], long_term_idle_user_ids: AbstractSet[int], stream_push_user_ids: AbstractSet[int], stream_email_user_ids: AbstractSet[int], mentioned_user_ids: AbstractSet[int], mark_as_read: Sequence[int] = []) -> List[UserMessageLite]: ums_to_create = [] for user_profile_id in um_eligible_user_ids: um = UserMessageLite( user_profile_id=user_profile_id, message_id=message.id, flags=0, ) ums_to_create.append(um) # These properties on the Message are set via # render_markdown by code in the markdown inline patterns wildcard = message.mentions_wildcard ids_with_alert_words = message.user_ids_with_alert_words for um in ums_to_create: if (um.user_profile_id == message.sender.id and message.sent_by_human()) or \ um.user_profile_id in mark_as_read: um.flags |= UserMessage.flags.read if wildcard: um.flags |= UserMessage.flags.wildcard_mentioned if um.user_profile_id in mentioned_user_ids: um.flags |= UserMessage.flags.mentioned if um.user_profile_id in ids_with_alert_words: um.flags |= UserMessage.flags.has_alert_word if message.recipient.type in [Recipient.HUDDLE, Recipient.PERSONAL]: um.flags |= UserMessage.flags.is_private # For long_term_idle (aka soft-deactivated) users, we are allowed # to optimize by lazily not creating UserMessage rows that would # have the default 0 flag set (since the soft-reactivation logic # knows how to create those when the user comes back). We need to # create the UserMessage rows for these long_term_idle users # non-lazily in a few cases: # # * There are nonzero flags (e.g. the user was mentioned), since # that case is rare and this saves a lot of complexity in # soft-reactivation. # # * If the user is going to be notified (e.g. they get push/email # notifications for every message on a stream), since in that # case the notifications code will call `access_message` on the # message to re-verify permissions, and for private streams, # will get an error if the UserMessage row doesn't exist yet. # # See https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html#soft-deactivation # for details on this system. user_messages = [] for um in ums_to_create: if (um.user_profile_id in long_term_idle_user_ids and um.user_profile_id not in stream_push_user_ids and um.user_profile_id not in stream_email_user_ids and message.is_stream_message() and int(um.flags) == 0): continue user_messages.append(um) return user_messages def bulk_insert_ums(ums: List[UserMessageLite]) -> None: ''' Doing bulk inserts this way is much faster than using Django, since we don't have any ORM overhead. Profiling with 1000 users shows a speedup of 0.436 -> 0.027 seconds, so we're talking about a 15x speedup. ''' if not ums: return vals = [ (um.user_profile_id, um.message_id, um.flags) for um in ums ] query = SQL(''' INSERT into zerver_usermessage (user_profile_id, message_id, flags) VALUES %s ''') with connection.cursor() as cursor: execute_values(cursor.cursor, query, vals) def do_add_submessage(realm: Realm, sender_id: int, message_id: int, msg_type: str, content: str, ) -> None: submessage = SubMessage( sender_id=sender_id, message_id=message_id, msg_type=msg_type, content=content, ) submessage.save() event = dict( type="submessage", msg_type=msg_type, message_id=message_id, submessage_id=submessage.id, sender_id=sender_id, content=content, ) ums = UserMessage.objects.filter(message_id=message_id) target_user_ids = [um.user_profile_id for um in ums] send_event(realm, event, target_user_ids) def notify_reaction_update(user_profile: UserProfile, message: Message, reaction: Reaction, op: str) -> None: user_dict = {'user_id': user_profile.id, 'email': user_profile.email, 'full_name': user_profile.full_name} event: Dict[str, Any] = { 'type': 'reaction', 'op': op, 'user_id': user_profile.id, # TODO: We plan to remove this redundant user_dict object once # clients are updated to support accessing use user_id. See # https://github.com/zulip/zulip/pull/14711 for details. 'user': user_dict, 'message_id': message.id, 'emoji_name': reaction.emoji_name, 'emoji_code': reaction.emoji_code, 'reaction_type': reaction.reaction_type, } # Update the cached message since new reaction is added. update_to_dict_cache([message]) # Recipients for message update events, including reactions, are # everyone who got the original message. This means reactions # won't live-update in preview narrows, but it's the right # performance tradeoff, since otherwise we'd need to send all # reactions to public stream messages to every browser for every # client in the organization, which doesn't scale. # # However, to ensure that reactions do live-update for any user # who has actually participated in reacting to a message, we add a # "historical" UserMessage row for any user who reacts to message, # subscribing them to future notifications. ums = UserMessage.objects.filter(message=message.id) send_event(user_profile.realm, event, [um.user_profile_id for um in ums]) def do_add_reaction(user_profile: UserProfile, message: Message, emoji_name: str, emoji_code: str, reaction_type: str) -> None: reaction = Reaction(user_profile=user_profile, message=message, emoji_name=emoji_name, emoji_code=emoji_code, reaction_type=reaction_type) try: reaction.save() except django.db.utils.IntegrityError: # nocoverage # This can happen when a race results in the check in views # code not catching an attempt to double-add a reaction, or # perhaps if the emoji_name/emoji_code mapping is busted. raise JsonableError(_("Reaction already exists.")) notify_reaction_update(user_profile, message, reaction, "add") def do_remove_reaction(user_profile: UserProfile, message: Message, emoji_code: str, reaction_type: str) -> None: reaction = Reaction.objects.filter(user_profile=user_profile, message=message, emoji_code=emoji_code, reaction_type=reaction_type).get() reaction.delete() notify_reaction_update(user_profile, message, reaction, "remove") def do_send_typing_notification( realm: Realm, sender: UserProfile, recipient_user_profiles: List[UserProfile], operator: str) -> None: sender_dict = {'user_id': sender.id, 'email': sender.email} # Include a list of recipients in the event body to help identify where the typing is happening recipient_dicts = [{'user_id': profile.id, 'email': profile.email} for profile in recipient_user_profiles] event = dict( type='typing', op=operator, sender=sender_dict, recipients=recipient_dicts, ) # Only deliver the notification to active user recipients user_ids_to_notify = [ user.id for user in recipient_user_profiles if user.is_active ] send_event(realm, event, user_ids_to_notify) # check_send_typing_notification: # Checks the typing notification and sends it def check_send_typing_notification(sender: UserProfile, user_ids: List[int], operator: str) -> None: realm = sender.realm if len(user_ids) == 0: raise JsonableError(_('Missing parameter: \'to\' (recipient)')) elif operator not in ('start', 'stop'): raise JsonableError(_('Invalid \'op\' value (should be start or stop)')) ''' The next chunk of code will go away when we upgrade old mobile users away from versions of mobile that send emails. For the small number of very outdated mobile clients, we do double work here in terms of fetching users, but this structure reduces lots of other unnecessary duplicated code and will make it convenient to mostly delete code when we desupport old versions of the app.''' if sender.id not in user_ids: user_ids.append(sender.id) # If any of the user_ids being sent in are invalid, we will # just reject the whole request, since a partial list of user_ids # can create confusion related to huddles. Plus it's a good # sign that a client is confused (or possibly even malicious) if # we get bad user_ids. user_profiles = [] for user_id in user_ids: try: # We include cross-bot realms as possible recipients, # so that clients can know which huddle conversation # is relevant here. user_profile = get_user_by_id_in_realm_including_cross_realm( user_id, sender.realm) except UserProfile.DoesNotExist: raise JsonableError(_("Invalid user ID {}").format(user_id)) user_profiles.append(user_profile) do_send_typing_notification( realm=realm, sender=sender, recipient_user_profiles=user_profiles, operator=operator, ) def ensure_stream(realm: Realm, stream_name: str, invite_only: bool=False, stream_description: str="", acting_user: Optional[UserProfile]=None) -> Stream: return create_stream_if_needed(realm, stream_name, invite_only=invite_only, stream_description=stream_description, acting_user=acting_user)[0] def get_recipient_from_user_profiles(recipient_profiles: Sequence[UserProfile], forwarded_mirror_message: bool, forwarder_user_profile: Optional[UserProfile], sender: UserProfile) -> Recipient: # Avoid mutating the passed in list of recipient_profiles. recipient_profiles_map = {} for user_profile in recipient_profiles: recipient_profiles_map[user_profile.id] = user_profile if forwarded_mirror_message: # In our mirroring integrations with some third-party # protocols, bots subscribed to the third-party protocol # forward to Zulip messages that they received in the # third-party service. The permissions model for that # forwarding is that users can only submit to Zulip private # messages they personally received, and here we do the check # for whether forwarder_user_profile is among the private # message recipients of the message. assert forwarder_user_profile is not None if forwarder_user_profile.id not in recipient_profiles_map: raise ValidationError(_("User not authorized for this query")) # If the private message is just between the sender and # another person, force it to be a personal internally if (len(recipient_profiles_map) == 2 and sender.id in recipient_profiles_map): del recipient_profiles_map[sender.id] assert len(recipient_profiles_map) != 0 if len(recipient_profiles_map) == 1: user_profile = list(recipient_profiles_map.values())[0] return user_profile.recipient # Otherwise, we need a huddle. Make sure the sender is included in huddle messages recipient_profiles_map[sender.id] = sender user_ids: Set[int] = {user_id for user_id in recipient_profiles_map} return get_huddle_recipient(user_ids) def validate_recipient_user_profiles(user_profiles: Sequence[UserProfile], sender: UserProfile, allow_deactivated: bool=False) -> Sequence[UserProfile]: recipient_profiles_map: Dict[int, UserProfile] = {} # We exempt cross-realm bots from the check that all the recipients # are in the same realm. realms = set() if not is_cross_realm_bot_email(sender.email): realms.add(sender.realm_id) for user_profile in user_profiles: if (not user_profile.is_active and not user_profile.is_mirror_dummy and not allow_deactivated) or user_profile.realm.deactivated: raise ValidationError(_("'{email}' is no longer using Zulip.").format(email=user_profile.email)) recipient_profiles_map[user_profile.id] = user_profile if not is_cross_realm_bot_email(user_profile.email): realms.add(user_profile.realm_id) if len(realms) > 1: raise ValidationError(_("You can't send private messages outside of your organization.")) return list(recipient_profiles_map.values()) def recipient_for_user_profiles(user_profiles: Sequence[UserProfile], forwarded_mirror_message: bool, forwarder_user_profile: Optional[UserProfile], sender: UserProfile, allow_deactivated: bool=False) -> Recipient: recipient_profiles = validate_recipient_user_profiles(user_profiles, sender, allow_deactivated=allow_deactivated) return get_recipient_from_user_profiles(recipient_profiles, forwarded_mirror_message, forwarder_user_profile, sender) def already_sent_mirrored_message_id(message: Message) -> Optional[int]: if message.recipient.type == Recipient.HUDDLE: # For huddle messages, we use a 10-second window because the # timestamps aren't guaranteed to actually match between two # copies of the same message. time_window = datetime.timedelta(seconds=10) else: time_window = datetime.timedelta(seconds=0) query = Message.objects.filter( sender=message.sender, recipient=message.recipient, content=message.content, sending_client=message.sending_client, date_sent__gte=message.date_sent - time_window, date_sent__lte=message.date_sent + time_window) messages = filter_by_exact_message_topic( query=query, message=message, ) if messages.exists(): return messages[0].id return None def extract_stream_indicator(s: str) -> Union[str, int]: # Users can pass stream name as either an id or a name, # and if they choose to pass a name, they may JSON encode # it for legacy reasons. try: data = ujson.loads(s) except (ValueError, TypeError): # If there was no JSON encoding, then we just # have a raw stream name. return s # We should stop supporting this odd use case # once we improve our documentation. if isinstance(data, list): if len(data) != 1: # nocoverage raise JsonableError(_("Expected exactly one stream")) data = data[0] if isinstance(data, str): # We had a JSON-encoded stream name. return data if isinstance(data, int): # We had a stream id. return data raise JsonableError(_("Invalid data type for stream")) def extract_private_recipients(s: str) -> Union[List[str], List[int]]: # We try to accept multiple incoming formats for recipients. # See test_extract_recipients() for examples of what we allow. try: data = ujson.loads(s) except (ValueError, TypeError): data = s if isinstance(data, str): data = data.split(',') if not isinstance(data, list): raise JsonableError(_("Invalid data type for recipients")) if not data: # We don't complain about empty message recipients here return data if isinstance(data[0], str): return get_validated_emails(data) if not isinstance(data[0], int): raise JsonableError(_("Invalid data type for recipients")) return get_validated_user_ids(data) def get_validated_user_ids(user_ids: Iterable[int]) -> List[int]: for user_id in user_ids: if not isinstance(user_id, int): raise JsonableError(_("Recipient lists may contain emails or user IDs, but not both.")) return list(set(user_ids)) def get_validated_emails(emails: Iterable[str]) -> List[str]: for email in emails: if not isinstance(email, str): raise JsonableError(_("Recipient lists may contain emails or user IDs, but not both.")) return list(filter(bool, {email.strip() for email in emails})) def check_send_stream_message(sender: UserProfile, client: Client, stream_name: str, topic: str, body: str, realm: Optional[Realm]=None) -> int: addressee = Addressee.for_stream_name(stream_name, topic) message = check_message(sender, client, addressee, body, realm) return do_send_messages([message])[0] def check_send_private_message(sender: UserProfile, client: Client, receiving_user: UserProfile, body: str) -> int: addressee = Addressee.for_user_profile(receiving_user) message = check_message(sender, client, addressee, body) return do_send_messages([message])[0] # check_send_message: # Returns the id of the sent message. Has same argspec as check_message. def check_send_message(sender: UserProfile, client: Client, message_type_name: str, message_to: Union[Sequence[int], Sequence[str]], topic_name: Optional[str], message_content: str, realm: Optional[Realm]=None, forged: bool=False, forged_timestamp: Optional[float]=None, forwarder_user_profile: Optional[UserProfile]=None, local_id: Optional[str]=None, sender_queue_id: Optional[str]=None, widget_content: Optional[str]=None) -> int: addressee = Addressee.legacy_build( sender, message_type_name, message_to, topic_name) message = check_message(sender, client, addressee, message_content, realm, forged, forged_timestamp, forwarder_user_profile, local_id, sender_queue_id, widget_content) return do_send_messages([message])[0] def check_schedule_message(sender: UserProfile, client: Client, message_type_name: str, message_to: Union[Sequence[str], Sequence[int]], topic_name: Optional[str], message_content: str, delivery_type: str, deliver_at: datetime.datetime, realm: Optional[Realm]=None, forwarder_user_profile: Optional[UserProfile]=None, ) -> int: addressee = Addressee.legacy_build( sender, message_type_name, message_to, topic_name) message = check_message(sender, client, addressee, message_content, realm=realm, forwarder_user_profile=forwarder_user_profile) message['deliver_at'] = deliver_at message['delivery_type'] = delivery_type recipient = message['message'].recipient if (delivery_type == 'remind' and (recipient.type != Recipient.STREAM and recipient.type_id != sender.id)): raise JsonableError(_("Reminders can only be set for streams.")) return do_schedule_messages([message])[0] def check_default_stream_group_name(group_name: str) -> None: if group_name.strip() == "": raise JsonableError(_("Invalid default stream group name '{}'").format(group_name)) if len(group_name) > DefaultStreamGroup.MAX_NAME_LENGTH: raise JsonableError(_("Default stream group name too long (limit: {} characters)").format( DefaultStreamGroup.MAX_NAME_LENGTH, )) for i in group_name: if ord(i) == 0: raise JsonableError(_("Default stream group name '{}' contains NULL (0x00) characters.").format( group_name, )) def send_rate_limited_pm_notification_to_bot_owner(sender: UserProfile, realm: Realm, content: str) -> None: """ Sends a PM error notification to a bot's owner if one hasn't already been sent in the last 5 minutes. """ if sender.realm.is_zephyr_mirror_realm or sender.realm.deactivated: return if not sender.is_bot or sender.bot_owner is None: return # Don't send these notifications for cross-realm bot messages # (e.g. from EMAIL_GATEWAY_BOT) since the owner for # EMAIL_GATEWAY_BOT is probably the server administrator, not # the owner of the bot who could potentially fix the problem. if sender.realm != realm: return # We warn the user once every 5 minutes to avoid a flood of # PMs on a misconfigured integration, re-using the # UserProfile.last_reminder field, which is not used for bots. last_reminder = sender.last_reminder waitperiod = datetime.timedelta(minutes=UserProfile.BOT_OWNER_STREAM_ALERT_WAITPERIOD) if last_reminder and timezone_now() - last_reminder <= waitperiod: return internal_send_private_message(realm, get_system_bot(settings.NOTIFICATION_BOT), sender.bot_owner, content) sender.last_reminder = timezone_now() sender.save(update_fields=['last_reminder']) def send_pm_if_empty_stream(stream: Optional[Stream], realm: Realm, sender: UserProfile, stream_name: Optional[str]=None, stream_id: Optional[int]=None) -> None: """If a bot sends a message to a stream that doesn't exist or has no subscribers, sends a notification to the bot owner (if not a cross-realm bot) so that the owner can correct the issue.""" if not sender.is_bot or sender.bot_owner is None: return arg_dict = { "bot_identity": f"`{sender.delivery_email}`", "stream_id": stream_id, "stream_name": f"#**{stream_name}**", "new_stream_link": "#streams/new", } if sender.bot_owner is not None: with override_language(sender.bot_owner.default_language): if stream is None: if stream_id is not None: content = _("Your bot {bot_identity} tried to send a message to stream ID " "{stream_id}, but there is no stream with that ID.").format(**arg_dict) else: assert(stream_name is not None) content = _("Your bot {bot_identity} tried to send a message to stream " "{stream_name}, but that stream does not exist. " "Click [here]({new_stream_link}) to create it.").format(**arg_dict) else: if num_subscribers_for_stream_id(stream.id) > 0: return content = _("Your bot {bot_identity} tried to send a message to " "stream {stream_name}. The stream exists but " "does not have any subscribers.").format(**arg_dict) send_rate_limited_pm_notification_to_bot_owner(sender, realm, content) def validate_stream_name_with_pm_notification(stream_name: str, realm: Realm, sender: UserProfile) -> Stream: stream_name = stream_name.strip() check_stream_name(stream_name) try: stream = get_stream(stream_name, realm) send_pm_if_empty_stream(stream, realm, sender) except Stream.DoesNotExist: send_pm_if_empty_stream(None, realm, sender, stream_name=stream_name) raise StreamDoesNotExistError(escape(stream_name)) return stream def validate_stream_id_with_pm_notification(stream_id: int, realm: Realm, sender: UserProfile) -> Stream: try: stream = get_stream_by_id_in_realm(stream_id, realm) send_pm_if_empty_stream(stream, realm, sender) except Stream.DoesNotExist: send_pm_if_empty_stream(None, realm, sender, stream_id=stream_id) raise StreamWithIDDoesNotExistError(stream_id) return stream def check_private_message_policy(realm: Realm, sender: UserProfile, user_profiles: Sequence[UserProfile]) -> None: if realm.private_message_policy == Realm.PRIVATE_MESSAGE_POLICY_DISABLED: if sender.is_bot or (len(user_profiles) == 1 and user_profiles[0].is_bot): # We allow PMs only between users and bots, to avoid # breaking the tutorial as well as automated # notifications from system bots to users. return raise JsonableError(_("Private messages are disabled in this organization.")) # check_message: # Returns message ready for sending with do_send_message on success or the error message (string) on error. def check_message(sender: UserProfile, client: Client, addressee: Addressee, message_content_raw: str, realm: Optional[Realm]=None, forged: bool=False, forged_timestamp: Optional[float]=None, forwarder_user_profile: Optional[UserProfile]=None, local_id: Optional[str]=None, sender_queue_id: Optional[str]=None, widget_content: Optional[str]=None) -> Dict[str, Any]: """See https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html for high-level documentation on this subsystem. """ stream = None message_content = message_content_raw.rstrip() if len(message_content) == 0: raise JsonableError(_("Message must not be empty")) if '\x00' in message_content: raise JsonableError(_("Message must not contain null bytes")) message_content = truncate_body(message_content) if realm is None: realm = sender.realm if addressee.is_stream(): topic_name = addressee.topic() topic_name = truncate_topic(topic_name) stream_name = addressee.stream_name() stream_id = addressee.stream_id() if stream_name is not None: stream = validate_stream_name_with_pm_notification(stream_name, realm, sender) elif stream_id is not None: stream = validate_stream_id_with_pm_notification(stream_id, realm, sender) else: stream = addressee.stream() assert stream is not None recipient = stream.recipient # This will raise JsonableError if there are problems. if sender.bot_type != sender.OUTGOING_WEBHOOK_BOT: access_stream_for_send_message( sender=sender, stream=stream, forwarder_user_profile=forwarder_user_profile) elif addressee.is_private(): user_profiles = addressee.user_profiles() mirror_message = client and client.name in ["zephyr_mirror", "irc_mirror", "jabber_mirror", "JabberMirror"] check_private_message_policy(realm, sender, user_profiles) # API Super-users who set the `forged` flag are allowed to # forge messages sent by any user, so we disable the # `forwarded_mirror_message` security check in that case. forwarded_mirror_message = mirror_message and not forged try: recipient = recipient_for_user_profiles(user_profiles, forwarded_mirror_message, forwarder_user_profile, sender) except ValidationError as e: assert isinstance(e.messages[0], str) raise JsonableError(e.messages[0]) else: # This is defensive code--Addressee already validates # the message type. raise AssertionError("Invalid message type") message = Message() message.sender = sender message.content = message_content message.recipient = recipient if addressee.is_stream(): message.set_topic_name(topic_name) if forged and forged_timestamp is not None: # Forged messages come with a timestamp message.date_sent = timestamp_to_datetime(forged_timestamp) else: message.date_sent = timezone_now() message.sending_client = client # We render messages later in the process. assert message.rendered_content is None if client.name == "zephyr_mirror": id = already_sent_mirrored_message_id(message) if id is not None: return {'message': id} if widget_content is not None: try: widget_content = ujson.loads(widget_content) except Exception: raise JsonableError(_('Widgets: API programmer sent invalid JSON content')) try: check_widget_content(widget_content) except ValidationError as error: raise JsonableError(_('Widgets: {error_msg}').format( error_msg=error.message, )) return {'message': message, 'stream': stream, 'local_id': local_id, 'sender_queue_id': sender_queue_id, 'realm': realm, 'widget_content': widget_content} def _internal_prep_message(realm: Realm, sender: UserProfile, addressee: Addressee, content: str) -> Optional[Dict[str, Any]]: """ Create a message object and checks it, but doesn't send it or save it to the database. The internal function that calls this can therefore batch send a bunch of created messages together as one database query. Call do_send_messages with a list of the return values of this method. """ # Remove any null bytes from the content if len(content) > MAX_MESSAGE_LENGTH: content = content[0:3900] + "\n\n[message was too long and has been truncated]" # If we have a stream name, and the stream doesn't exist, we # create it here (though this code path should probably be removed # eventually, moving that responsibility to the caller). If # addressee.stream_name() is None (i.e. we're sending to a stream # by ID), we skip this, as the stream object must already exist. if addressee.is_stream(): stream_name = addressee.stream_name() if stream_name is not None: ensure_stream(realm, stream_name, acting_user=sender) try: return check_message(sender, get_client("Internal"), addressee, content, realm=realm) except JsonableError as e: logging.exception("Error queueing internal message by %s: %s", sender.delivery_email, e.msg) return None def internal_prep_stream_message( realm: Realm, sender: UserProfile, stream: Stream, topic: str, content: str, ) -> Optional[Dict[str, Any]]: """ See _internal_prep_message for details of how this works. """ addressee = Addressee.for_stream(stream, topic) return _internal_prep_message( realm=realm, sender=sender, addressee=addressee, content=content, ) def internal_prep_stream_message_by_name( realm: Realm, sender: UserProfile, stream_name: str, topic: str, content: str, ) -> Optional[Dict[str, Any]]: """ See _internal_prep_message for details of how this works. """ addressee = Addressee.for_stream_name(stream_name, topic) return _internal_prep_message( realm=realm, sender=sender, addressee=addressee, content=content, ) def internal_prep_private_message(realm: Realm, sender: UserProfile, recipient_user: UserProfile, content: str) -> Optional[Dict[str, Any]]: """ See _internal_prep_message for details of how this works. """ addressee = Addressee.for_user_profile(recipient_user) return _internal_prep_message( realm=realm, sender=sender, addressee=addressee, content=content, ) def internal_send_private_message(realm: Realm, sender: UserProfile, recipient_user: UserProfile, content: str) -> Optional[int]: message = internal_prep_private_message(realm, sender, recipient_user, content) if message is None: return None message_ids = do_send_messages([message]) return message_ids[0] def internal_send_stream_message( realm: Realm, sender: UserProfile, stream: Stream, topic: str, content: str, email_gateway: bool=False) -> Optional[int]: message = internal_prep_stream_message( realm, sender, stream, topic, content, ) if message is None: return None message_ids = do_send_messages([message], email_gateway=email_gateway) return message_ids[0] def internal_send_stream_message_by_name( realm: Realm, sender: UserProfile, stream_name: str, topic: str, content: str, ) -> Optional[int]: message = internal_prep_stream_message_by_name( realm, sender, stream_name, topic, content, ) if message is None: return None message_ids = do_send_messages([message]) return message_ids[0] def internal_send_huddle_message(realm: Realm, sender: UserProfile, emails: List[str], content: str) -> Optional[int]: addressee = Addressee.for_private(emails, realm) message = _internal_prep_message( realm=realm, sender=sender, addressee=addressee, content=content, ) if message is None: return None message_ids = do_send_messages([message]) return message_ids[0] def pick_color(user_profile: UserProfile, subs: Iterable[Subscription]) -> str: # These colors are shared with the palette in subs.js. used_colors = [sub.color for sub in subs if sub.active] available_colors = [s for s in STREAM_ASSIGNMENT_COLORS if s not in used_colors] if available_colors: return available_colors[0] else: return STREAM_ASSIGNMENT_COLORS[len(used_colors) % len(STREAM_ASSIGNMENT_COLORS)] def validate_user_access_to_subscribers(user_profile: Optional[UserProfile], stream: Stream) -> None: """ Validates whether the user can view the subscribers of a stream. Raises a JsonableError if: * The user and the stream are in different realms * The realm is MIT and the stream is not invite only. * The stream is invite only, requesting_user is passed, and that user does not subscribe to the stream. """ validate_user_access_to_subscribers_helper( user_profile, {"realm_id": stream.realm_id, "invite_only": stream.invite_only}, # We use a lambda here so that we only compute whether the # user is subscribed if we have to lambda user_profile: subscribed_to_stream(user_profile, stream.id)) def validate_user_access_to_subscribers_helper( user_profile: Optional[UserProfile], stream_dict: Mapping[str, Any], check_user_subscribed: Callable[[UserProfile], bool], ) -> None: """Helper for validate_user_access_to_subscribers that doesn't require a full stream object. This function is a bit hard to read, because it is carefully optimized for performance in the two code paths we call it from: * In `bulk_get_subscriber_user_ids`, we already know whether the user was subscribed via `sub_dict`, and so we want to avoid a database query at all (especially since it calls this in a loop); * In `validate_user_access_to_subscribers`, we want to only check if the user is subscribed when we absolutely have to, since it costs a database query. The `check_user_subscribed` argument is a function that reports whether the user is subscribed to the stream. Note also that we raise a ValidationError in cases where the caller is doing the wrong thing (maybe these should be AssertionErrors), and JsonableError for 400 type errors. """ if user_profile is None: raise ValidationError("Missing user to validate access for") if user_profile.realm_id != stream_dict["realm_id"]: raise ValidationError("Requesting user not in given realm") # Guest users can access subscribed public stream's subscribers if user_profile.is_guest: if check_user_subscribed(user_profile): return # We could put an AssertionError here; in that we don't have # any code paths that would allow a guest user to access other # streams in the first place. if not user_profile.can_access_public_streams() and not stream_dict["invite_only"]: raise JsonableError(_("Subscriber data is not available for this stream")) # Organization administrators can view subscribers for all streams. if user_profile.is_realm_admin: return if (stream_dict["invite_only"] and not check_user_subscribed(user_profile)): raise JsonableError(_("Unable to retrieve subscribers for private stream")) def bulk_get_subscriber_user_ids(stream_dicts: Iterable[Mapping[str, Any]], user_profile: UserProfile, sub_dict: Mapping[int, bool], stream_recipient: StreamRecipientMap) -> Dict[int, List[int]]: """sub_dict maps stream_id => whether the user is subscribed to that stream.""" target_stream_dicts = [] for stream_dict in stream_dicts: stream_recipient.populate_with(stream_id=stream_dict["id"], recipient_id=stream_dict["recipient_id"]) try: validate_user_access_to_subscribers_helper( user_profile, stream_dict, lambda user_profile: sub_dict[stream_dict["id"]], ) except JsonableError: continue target_stream_dicts.append(stream_dict) stream_ids = [stream['id'] for stream in target_stream_dicts] recipient_ids = sorted([ stream_recipient.recipient_id_for(stream_id) for stream_id in stream_ids ]) result: Dict[int, List[int]] = {stream["id"]: [] for stream in stream_dicts} if not recipient_ids: return result ''' The raw SQL below leads to more than a 2x speedup when tested with 20k+ total subscribers. (For large realms with lots of default streams, this function deals with LOTS of data, so it is important to optimize.) ''' query = SQL(''' SELECT zerver_subscription.recipient_id, zerver_subscription.user_profile_id FROM zerver_subscription INNER JOIN zerver_userprofile ON zerver_userprofile.id = zerver_subscription.user_profile_id WHERE zerver_subscription.recipient_id in %(recipient_ids)s AND zerver_subscription.active AND zerver_userprofile.is_active ORDER BY zerver_subscription.recipient_id, zerver_subscription.user_profile_id ''') cursor = connection.cursor() cursor.execute(query, {"recipient_ids": tuple(recipient_ids)}) rows = cursor.fetchall() cursor.close() recip_to_stream_id = stream_recipient.recipient_to_stream_id_dict() ''' Using groupby/itemgetter here is important for performance, at scale. It makes it so that all interpreter overhead is just O(N) in nature. ''' for recip_id, recip_rows in itertools.groupby(rows, itemgetter(0)): user_profile_ids = [r[1] for r in recip_rows] stream_id = recip_to_stream_id[recip_id] result[stream_id] = list(user_profile_ids) return result def get_subscribers_query(stream: Stream, requesting_user: Optional[UserProfile]) -> QuerySet: # TODO: Make a generic stub for QuerySet """ Build a query to get the subscribers list for a stream, raising a JsonableError if: 'realm' is optional in stream. The caller can refine this query with select_related(), values(), etc. depending on whether it wants objects or just certain fields """ validate_user_access_to_subscribers(requesting_user, stream) # Note that non-active users may still have "active" subscriptions, because we # want to be able to easily reactivate them with their old subscriptions. This # is why the query here has to look at the UserProfile.is_active flag. subscriptions = get_active_subscriptions_for_stream_id(stream.id).filter( user_profile__is_active=True, ) return subscriptions def get_subscriber_emails(stream: Stream, requesting_user: Optional[UserProfile]=None) -> List[str]: subscriptions_query = get_subscribers_query(stream, requesting_user) subscriptions = subscriptions_query.values('user_profile__email') return [subscription['user_profile__email'] for subscription in subscriptions] def notify_subscriptions_added(user_profile: UserProfile, sub_pairs: Iterable[Tuple[Subscription, Stream]], stream_user_ids: Callable[[Stream], List[int]], recent_traffic: Dict[int, int], no_log: bool=False) -> None: if not no_log: log_event({'type': 'subscription_added', 'user': user_profile.email, 'names': [stream.name for sub, stream in sub_pairs], 'realm': user_profile.realm.string_id}) sub_dicts = [] for (subscription, stream) in sub_pairs: sub_dict = stream.to_dict() for field_name in Subscription.API_FIELDS: if field_name == "active": # Skip the "active" field, it's implied by context continue sub_dict[field_name] = getattr(subscription, field_name) sub_dict['in_home_view'] = not subscription.is_muted sub_dict['email_address'] = encode_email_address(stream, show_sender=True) sub_dict['stream_weekly_traffic'] = get_average_weekly_stream_traffic( stream.id, stream.date_created, recent_traffic) sub_dict['subscribers'] = stream_user_ids(stream) sub_dicts.append(sub_dict) # Send a notification to the user who subscribed. event = dict(type="subscription", op="add", subscriptions=sub_dicts) send_event(user_profile.realm, event, [user_profile.id]) def get_peer_user_ids_for_stream_change(stream: Stream, altered_user_ids: Iterable[int], subscribed_user_ids: Iterable[int]) -> Set[int]: ''' altered_user_ids is the user_ids that we are adding/removing subscribed_user_ids is the already-subscribed user_ids Based on stream policy, we notify the correct bystanders, while not notifying altered_users (who get subscribers via another event) ''' if stream.invite_only: # PRIVATE STREAMS # Realm admins can access all private stream subscribers. Send them an # event even if they aren't subscribed to stream. realm_admin_ids = [user.id for user in stream.realm.get_admin_users_and_bots()] user_ids_to_notify = [] user_ids_to_notify.extend(realm_admin_ids) user_ids_to_notify.extend(subscribed_user_ids) return set(user_ids_to_notify) - set(altered_user_ids) else: # PUBLIC STREAMS # We now do "peer_add" or "peer_remove" events even for streams # users were never subscribed to, in order for the neversubscribed # structure to stay up-to-date. return set(active_non_guest_user_ids(stream.realm_id)) - set(altered_user_ids) def get_user_ids_for_streams(streams: Iterable[Stream]) -> Dict[int, List[int]]: stream_ids = [stream.id for stream in streams] all_subs = get_active_subscriptions_for_stream_ids(stream_ids).filter( user_profile__is_active=True, ).values( 'recipient__type_id', 'user_profile_id', ).order_by( 'recipient__type_id', ) get_stream_id = itemgetter('recipient__type_id') all_subscribers_by_stream: Dict[int, List[int]] = defaultdict(list) for stream_id, rows in itertools.groupby(all_subs, get_stream_id): user_ids = [row['user_profile_id'] for row in rows] all_subscribers_by_stream[stream_id] = user_ids return all_subscribers_by_stream def get_last_message_id() -> int: # We generally use this function to populate RealmAuditLog, and # the max id here is actually systemwide, not per-realm. I # assume there's some advantage in not filtering by realm. last_id = Message.objects.aggregate(Max('id'))['id__max'] if last_id is None: # During initial realm creation, there might be 0 messages in # the database; in that case, the `aggregate` query returns # None. Since we want an int for "beginning of time", use -1. last_id = -1 return last_id SubT = Tuple[List[Tuple[UserProfile, Stream]], List[Tuple[UserProfile, Stream]]] def bulk_add_subscriptions(streams: Iterable[Stream], users: Iterable[UserProfile], color_map: Mapping[str, str]={}, from_stream_creation: bool=False, acting_user: Optional[UserProfile]=None) -> SubT: users = list(users) recipients_map: Dict[int, int] = {stream.id: stream.recipient_id for stream in streams} recipient_ids: List[int] = [recipient_id for recipient_id in recipients_map.values()] stream_map: Dict[int, Stream] = {} for stream in streams: stream_map[recipients_map[stream.id]] = stream subs_by_user: Dict[int, List[Subscription]] = defaultdict(list) all_subs_query = get_stream_subscriptions_for_users(users).select_related('user_profile') for sub in all_subs_query: subs_by_user[sub.user_profile_id].append(sub) realm = users[0].realm already_subscribed: List[Tuple[UserProfile, Stream]] = [] subs_to_activate: List[Tuple[Subscription, Stream]] = [] new_subs: List[Tuple[UserProfile, int, Stream]] = [] for user_profile in users: needs_new_sub: Set[int] = set(recipient_ids) for sub in subs_by_user[user_profile.id]: if sub.recipient_id in needs_new_sub: needs_new_sub.remove(sub.recipient_id) if sub.active: already_subscribed.append((user_profile, stream_map[sub.recipient_id])) else: subs_to_activate.append((sub, stream_map[sub.recipient_id])) # Mark the sub as active, without saving, so that # pick_color will consider this to be an active # subscription when picking colors sub.active = True for recipient_id in needs_new_sub: new_subs.append((user_profile, recipient_id, stream_map[recipient_id])) subs_to_add: List[Tuple[Subscription, Stream]] = [] for (user_profile, recipient_id, stream) in new_subs: if stream.name in color_map: color = color_map[stream.name] else: color = pick_color(user_profile, subs_by_user[user_profile.id]) sub_to_add = Subscription(user_profile=user_profile, active=True, color=color, recipient_id=recipient_id) subs_by_user[user_profile.id].append(sub_to_add) subs_to_add.append((sub_to_add, stream)) # TODO: XXX: This transaction really needs to be done at the serializeable # transaction isolation level. with transaction.atomic(): occupied_streams_before = list(get_occupied_streams(realm)) Subscription.objects.bulk_create([sub for (sub, stream) in subs_to_add]) sub_ids = [sub.id for (sub, stream) in subs_to_activate] Subscription.objects.filter(id__in=sub_ids).update(active=True) occupied_streams_after = list(get_occupied_streams(realm)) # Log Subscription Activities in RealmAuditLog event_time = timezone_now() event_last_message_id = get_last_message_id() all_subscription_logs: (List[RealmAuditLog]) = [] for (sub, stream) in subs_to_add: all_subscription_logs.append(RealmAuditLog(realm=realm, acting_user=acting_user, modified_user=sub.user_profile, modified_stream=stream, event_last_message_id=event_last_message_id, event_type=RealmAuditLog.SUBSCRIPTION_CREATED, event_time=event_time)) for (sub, stream) in subs_to_activate: all_subscription_logs.append(RealmAuditLog(realm=realm, acting_user=acting_user, modified_user=sub.user_profile, modified_stream=stream, event_last_message_id=event_last_message_id, event_type=RealmAuditLog.SUBSCRIPTION_ACTIVATED, event_time=event_time)) # Now since we have all log objects generated we can do a bulk insert RealmAuditLog.objects.bulk_create(all_subscription_logs) new_occupied_streams = [stream for stream in set(occupied_streams_after) - set(occupied_streams_before) if not stream.invite_only] if new_occupied_streams and not from_stream_creation: event: Dict[str, object] = dict( type="stream", op="occupy", streams=[stream.to_dict() for stream in new_occupied_streams], ) send_event(realm, event, active_user_ids(realm.id)) # Notify all existing users on streams that users have joined # First, get all users subscribed to the streams that we care about # We fetch all subscription information upfront, as it's used throughout # the following code and we want to minize DB queries all_subscribers_by_stream = get_user_ids_for_streams(streams=streams) def fetch_stream_subscriber_user_ids(stream: Stream) -> List[int]: if stream.is_in_zephyr_realm and not stream.invite_only: return [] user_ids = all_subscribers_by_stream[stream.id] return user_ids sub_tuples_by_user: Dict[int, List[Tuple[Subscription, Stream]]] = defaultdict(list) new_streams: Set[Tuple[int, int]] = set() for (sub, stream) in subs_to_add + subs_to_activate: sub_tuples_by_user[sub.user_profile.id].append((sub, stream)) new_streams.add((sub.user_profile.id, stream.id)) # We now send several types of events to notify browsers. The # first batch is notifications to users on invite-only streams # that the stream exists. for stream in streams: if not stream.is_public(): # Users newly added to invite-only streams # need a `create` notification. The former, because # they need the stream to exist before # they get the "subscribe" notification, and the latter so # they can manage the new stream. # Realm admins already have all created private streams. realm_admin_ids = [user.id for user in realm.get_admin_users_and_bots()] new_users_ids = [user.id for user in users if (user.id, stream.id) in new_streams and user.id not in realm_admin_ids] send_stream_creation_event(stream, new_users_ids) stream_ids = {stream.id for stream in streams} recent_traffic = get_streams_traffic(stream_ids=stream_ids) # The second batch is events for the users themselves that they # were subscribed to the new streams. for user_profile in users: if len(sub_tuples_by_user[user_profile.id]) == 0: continue sub_pairs = sub_tuples_by_user[user_profile.id] notify_subscriptions_added(user_profile, sub_pairs, fetch_stream_subscriber_user_ids, recent_traffic) # The second batch is events for other users who are tracking the # subscribers lists of streams in their browser; everyone for # public streams and only existing subscribers for private streams. for stream in streams: if stream.is_in_zephyr_realm and not stream.invite_only: continue new_user_ids = [user.id for user in users if (user.id, stream.id) in new_streams] subscribed_user_ids = all_subscribers_by_stream[stream.id] peer_user_ids = get_peer_user_ids_for_stream_change( stream=stream, altered_user_ids=new_user_ids, subscribed_user_ids=subscribed_user_ids, ) if peer_user_ids: for new_user_id in new_user_ids: event = dict(type="subscription", op="peer_add", stream_id=stream.id, user_id=new_user_id) send_event(realm, event, peer_user_ids) return ([(user_profile, stream) for (user_profile, recipient_id, stream) in new_subs] + [(sub.user_profile, stream) for (sub, stream) in subs_to_activate], already_subscribed) def get_available_notification_sounds() -> List[str]: notification_sounds_path = static_path('audio/notification_sounds') available_notification_sounds = [] for file_name in os.listdir(notification_sounds_path): root, ext = os.path.splitext(file_name) if '.' in root: # nocoverage # Exclude e.g. zulip.abcd1234.ogg (generated by production hash-naming) # to avoid spurious duplicates. continue if ext == '.ogg': available_notification_sounds.append(root) return available_notification_sounds def notify_subscriptions_removed(user_profile: UserProfile, streams: Iterable[Stream], no_log: bool=False) -> None: if not no_log: log_event({'type': 'subscription_removed', 'user': user_profile.email, 'names': [stream.name for stream in streams], 'realm': user_profile.realm.string_id}) payload = [dict(name=stream.name, stream_id=stream.id) for stream in streams] event = dict(type="subscription", op="remove", subscriptions=payload) send_event(user_profile.realm, event, [user_profile.id]) SubAndRemovedT = Tuple[List[Tuple[UserProfile, Stream]], List[Tuple[UserProfile, Stream]]] def bulk_remove_subscriptions(users: Iterable[UserProfile], streams: Iterable[Stream], acting_client: Client, acting_user: Optional[UserProfile]=None) -> SubAndRemovedT: users = list(users) streams = list(streams) stream_dict = {stream.id: stream for stream in streams} existing_subs_by_user = get_bulk_stream_subscriber_info(users, stream_dict) def get_non_subscribed_tups() -> List[Tuple[UserProfile, Stream]]: stream_ids = {stream.id for stream in streams} not_subscribed: List[Tuple[UserProfile, Stream]] = [] for user_profile in users: user_sub_stream_info = existing_subs_by_user[user_profile.id] subscribed_stream_ids = { stream.id for (sub, stream) in user_sub_stream_info } not_subscribed_stream_ids = stream_ids - subscribed_stream_ids for stream_id in not_subscribed_stream_ids: stream = stream_dict[stream_id] not_subscribed.append((user_profile, stream)) return not_subscribed not_subscribed = get_non_subscribed_tups() subs_to_deactivate: List[Tuple[Subscription, Stream]] = [] sub_ids_to_deactivate: List[int] = [] # This loop just flattens out our data into big lists for # bulk operations. for tup_list in existing_subs_by_user.values(): for (sub, stream) in tup_list: subs_to_deactivate.append((sub, stream)) sub_ids_to_deactivate.append(sub.id) our_realm = users[0].realm # TODO: XXX: This transaction really needs to be done at the serializeable # transaction isolation level. with transaction.atomic(): occupied_streams_before = list(get_occupied_streams(our_realm)) Subscription.objects.filter( id__in=sub_ids_to_deactivate, ) .update(active=False) occupied_streams_after = list(get_occupied_streams(our_realm)) # Log Subscription Activities in RealmAuditLog event_time = timezone_now() event_last_message_id = get_last_message_id() all_subscription_logs: (List[RealmAuditLog]) = [] for (sub, stream) in subs_to_deactivate: all_subscription_logs.append(RealmAuditLog(realm=sub.user_profile.realm, acting_user=acting_user, modified_user=sub.user_profile, modified_stream=stream, event_last_message_id=event_last_message_id, event_type=RealmAuditLog.SUBSCRIPTION_DEACTIVATED, event_time=event_time)) # Now since we have all log objects generated we can do a bulk insert RealmAuditLog.objects.bulk_create(all_subscription_logs) altered_user_dict: Dict[int, List[UserProfile]] = defaultdict(list) streams_by_user: Dict[int, List[Stream]] = defaultdict(list) for (sub, stream) in subs_to_deactivate: streams_by_user[sub.user_profile_id].append(stream) altered_user_dict[stream.id].append(sub.user_profile) for user_profile in users: if len(streams_by_user[user_profile.id]) == 0: continue notify_subscriptions_removed(user_profile, streams_by_user[user_profile.id]) event = {'type': 'mark_stream_messages_as_read', 'client_id': acting_client.id, 'user_profile_id': user_profile.id, 'stream_ids': [stream.id for stream in streams]} queue_json_publish("deferred_work", event) all_subscribers_by_stream = get_user_ids_for_streams(streams=streams) def send_peer_remove_event(stream: Stream) -> None: if stream.is_in_zephyr_realm and not stream.invite_only: return altered_users = altered_user_dict[stream.id] altered_user_ids = [u.id for u in altered_users] subscribed_user_ids = all_subscribers_by_stream[stream.id] peer_user_ids = get_peer_user_ids_for_stream_change( stream=stream, altered_user_ids=altered_user_ids, subscribed_user_ids=subscribed_user_ids, ) if peer_user_ids: for removed_user in altered_users: event = dict(type="subscription", op="peer_remove", stream_id=stream.id, user_id=removed_user.id) send_event(our_realm, event, peer_user_ids) for stream in streams: send_peer_remove_event(stream=stream) new_vacant_streams = [stream for stream in set(occupied_streams_before) - set(occupied_streams_after)] new_vacant_private_streams = [stream for stream in new_vacant_streams if stream.invite_only] new_vacant_public_streams = [stream for stream in new_vacant_streams if not stream.invite_only] if new_vacant_public_streams: event = dict(type="stream", op="vacate", streams=[stream.to_dict() for stream in new_vacant_public_streams]) send_event(our_realm, event, active_user_ids(our_realm.id)) if new_vacant_private_streams: # Deactivate any newly-vacant private streams for stream in new_vacant_private_streams: do_deactivate_stream(stream, acting_user=acting_user) return ( [(sub.user_profile, stream) for (sub, stream) in subs_to_deactivate], not_subscribed, ) def log_subscription_property_change(user_email: str, stream_name: str, property: str, value: Any) -> None: event = {'type': 'subscription_property', 'property': property, 'user': user_email, 'stream_name': stream_name, 'value': value} log_event(event) def do_change_subscription_property(user_profile: UserProfile, sub: Subscription, stream: Stream, property_name: str, value: Any, ) -> None: database_property_name = property_name event_property_name = property_name database_value = value event_value = value # For this property, is_muted is used in the database, but # in_home_view in the API, since we haven't migrated the events # API to the new name yet. if property_name == "in_home_view": database_property_name = "is_muted" database_value = not value if property_name == "is_muted": event_property_name = "in_home_view" event_value = not value setattr(sub, database_property_name, database_value) sub.save(update_fields=[database_property_name]) log_subscription_property_change(user_profile.email, stream.name, database_property_name, database_value) event = dict(type="subscription", op="update", email=user_profile.email, property=event_property_name, value=event_value, stream_id=stream.id, name=stream.name) send_event(user_profile.realm, event, [user_profile.id]) def do_change_password(user_profile: UserProfile, password: str, commit: bool=True) -> None: user_profile.set_password(password) if commit: user_profile.save(update_fields=["password"]) event_time = timezone_now() RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=user_profile, modified_user=user_profile, event_type=RealmAuditLog.USER_PASSWORD_CHANGED, event_time=event_time) def do_change_full_name(user_profile: UserProfile, full_name: str, acting_user: Optional[UserProfile]) -> None: old_name = user_profile.full_name user_profile.full_name = full_name user_profile.save(update_fields=["full_name"]) event_time = timezone_now() RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=acting_user, modified_user=user_profile, event_type=RealmAuditLog.USER_FULL_NAME_CHANGED, event_time=event_time, extra_data=old_name) payload = dict(user_id=user_profile.id, full_name=user_profile.full_name) send_event(user_profile.realm, dict(type='realm_user', op='update', person=payload), active_user_ids(user_profile.realm_id)) if user_profile.is_bot: send_event(user_profile.realm, dict(type='realm_bot', op='update', bot=payload), bot_owner_user_ids(user_profile)) def check_change_full_name(user_profile: UserProfile, full_name_raw: str, acting_user: UserProfile) -> str: """Verifies that the user's proposed full name is valid. The caller is responsible for checking check permissions. Returns the new full name, which may differ from what was passed in (because this function strips whitespace).""" new_full_name = check_full_name(full_name_raw) do_change_full_name(user_profile, new_full_name, acting_user) return new_full_name def check_change_bot_full_name(user_profile: UserProfile, full_name_raw: str, acting_user: UserProfile) -> None: new_full_name = check_full_name(full_name_raw) if new_full_name == user_profile.full_name: # Our web app will try to patch full_name even if the user didn't # modify the name in the form. We just silently ignore those # situations. return check_bot_name_available( realm_id=user_profile.realm_id, full_name=new_full_name, ) do_change_full_name(user_profile, new_full_name, acting_user) def do_change_bot_owner(user_profile: UserProfile, bot_owner: UserProfile, acting_user: UserProfile) -> None: previous_owner = user_profile.bot_owner user_profile.bot_owner = bot_owner user_profile.save() # Can't use update_fields because of how the foreign key works. event_time = timezone_now() RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=acting_user, modified_user=user_profile, event_type=RealmAuditLog.USER_BOT_OWNER_CHANGED, event_time=event_time) update_users = bot_owner_user_ids(user_profile) # For admins, update event is sent instead of delete/add # event. bot_data of admin contains all the # bots and none of them should be removed/(added again). # Delete the bot from previous owner's bot data. if previous_owner and not previous_owner.is_realm_admin: send_event(user_profile.realm, dict(type='realm_bot', op="delete", bot=dict( user_id=user_profile.id, )), {previous_owner.id}) # Do not send update event for previous bot owner. update_users = update_users - {previous_owner.id} # Notify the new owner that the bot has been added. if not bot_owner.is_realm_admin: add_event = created_bot_event(user_profile) send_event(user_profile.realm, add_event, {bot_owner.id}) # Do not send update event for bot_owner. update_users = update_users - {bot_owner.id} send_event(user_profile.realm, dict(type='realm_bot', op='update', bot=dict(user_id=user_profile.id, owner_id=user_profile.bot_owner.id, )), update_users) # Since `bot_owner_id` is included in the user profile dict we need # to update the users dict with the new bot owner id event: Dict[str, Any] = dict( type="realm_user", op="update", person=dict( user_id=user_profile.id, bot_owner_id=user_profile.bot_owner.id, ), ) send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id)) def do_change_tos_version(user_profile: UserProfile, tos_version: str) -> None: user_profile.tos_version = tos_version user_profile.save(update_fields=["tos_version"]) event_time = timezone_now() RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=user_profile, modified_user=user_profile, event_type=RealmAuditLog.USER_TOS_VERSION_CHANGED, event_time=event_time) def do_regenerate_api_key(user_profile: UserProfile, acting_user: UserProfile) -> str: old_api_key = user_profile.api_key new_api_key = generate_api_key() user_profile.api_key = new_api_key user_profile.save(update_fields=["api_key"]) # We need to explicitly delete the old API key from our caches, # because the on-save handler for flushing the UserProfile object # in zerver/lib/cache.py only has access to the new API key. cache_delete(user_profile_by_api_key_cache_key(old_api_key)) event_time = timezone_now() RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=acting_user, modified_user=user_profile, event_type=RealmAuditLog.USER_API_KEY_CHANGED, event_time=event_time) if user_profile.is_bot: send_event(user_profile.realm, dict(type='realm_bot', op='update', bot=dict(user_id=user_profile.id, api_key=new_api_key, )), bot_owner_user_ids(user_profile)) event = {'type': 'clear_push_device_tokens', 'user_profile_id': user_profile.id} queue_json_publish("deferred_work", event) return new_api_key def notify_avatar_url_change(user_profile: UserProfile) -> None: if user_profile.is_bot: send_event(user_profile.realm, dict(type='realm_bot', op='update', bot=dict(user_id=user_profile.id, avatar_url=avatar_url(user_profile), )), bot_owner_user_ids(user_profile)) payload = dict( avatar_source=user_profile.avatar_source, avatar_url=avatar_url(user_profile), avatar_url_medium=avatar_url(user_profile, medium=True), avatar_version=user_profile.avatar_version, # Even clients using client_gravatar don't need the email, # since we're sending the URL anyway. user_id=user_profile.id, ) send_event(user_profile.realm, dict(type='realm_user', op='update', person=payload), active_user_ids(user_profile.realm_id)) def do_change_avatar_fields(user_profile: UserProfile, avatar_source: str, skip_notify: bool=False, acting_user: Optional[UserProfile]=None) -> None: user_profile.avatar_source = avatar_source user_profile.avatar_version += 1 user_profile.save(update_fields=["avatar_source", "avatar_version"]) event_time = timezone_now() RealmAuditLog.objects.create(realm=user_profile.realm, modified_user=user_profile, event_type=RealmAuditLog.USER_AVATAR_SOURCE_CHANGED, extra_data={'avatar_source': avatar_source}, event_time=event_time, acting_user=acting_user) if not skip_notify: notify_avatar_url_change(user_profile) def do_delete_avatar_image(user: UserProfile, acting_user: Optional[UserProfile]=None) -> None: do_change_avatar_fields(user, UserProfile.AVATAR_FROM_GRAVATAR, acting_user=acting_user) delete_avatar_image(user) def do_change_icon_source(realm: Realm, icon_source: str, log: bool=True) -> None: realm.icon_source = icon_source realm.icon_version += 1 realm.save(update_fields=["icon_source", "icon_version"]) if log: log_event({'type': 'realm_change_icon', 'realm': realm.string_id, 'icon_source': icon_source}) send_event(realm, dict(type='realm', op='update_dict', property="icon", data=dict(icon_source=realm.icon_source, icon_url=realm_icon_url(realm))), active_user_ids(realm.id)) def do_change_logo_source(realm: Realm, logo_source: str, night: bool, acting_user: Optional[UserProfile]=None) -> None: if not night: realm.logo_source = logo_source realm.logo_version += 1 realm.save(update_fields=["logo_source", "logo_version"]) else: realm.night_logo_source = logo_source realm.night_logo_version += 1 realm.save(update_fields=["night_logo_source", "night_logo_version"]) RealmAuditLog.objects.create(event_type=RealmAuditLog.REALM_LOGO_CHANGED, realm=realm, event_time=timezone_now(), acting_user=acting_user) event = dict(type='realm', op='update_dict', property="night_logo" if night else "logo", data=get_realm_logo_data(realm, night)) send_event(realm, event, active_user_ids(realm.id)) def do_change_plan_type(realm: Realm, plan_type: int) -> None: old_value = realm.plan_type realm.plan_type = plan_type realm.save(update_fields=['plan_type']) RealmAuditLog.objects.create(event_type=RealmAuditLog.REALM_PLAN_TYPE_CHANGED, realm=realm, event_time=timezone_now(), extra_data={'old_value': old_value, 'new_value': plan_type}) if plan_type == Realm.STANDARD: realm.max_invites = Realm.INVITES_STANDARD_REALM_DAILY_MAX realm.message_visibility_limit = None realm.upload_quota_gb = Realm.UPLOAD_QUOTA_STANDARD elif plan_type == Realm.SELF_HOSTED: realm.max_invites = None # type: ignore[assignment] # Apparent mypy bug with Optional[int] setter. realm.message_visibility_limit = None realm.upload_quota_gb = None elif plan_type == Realm.STANDARD_FREE: realm.max_invites = Realm.INVITES_STANDARD_REALM_DAILY_MAX realm.message_visibility_limit = None realm.upload_quota_gb = Realm.UPLOAD_QUOTA_STANDARD elif plan_type == Realm.LIMITED: realm.max_invites = settings.INVITES_DEFAULT_REALM_DAILY_MAX realm.message_visibility_limit = Realm.MESSAGE_VISIBILITY_LIMITED realm.upload_quota_gb = Realm.UPLOAD_QUOTA_LIMITED else: raise AssertionError("Invalid plan type") update_first_visible_message_id(realm) realm.save(update_fields=['_max_invites', 'message_visibility_limit', 'upload_quota_gb']) event = {'type': 'realm', 'op': 'update', 'property': 'plan_type', 'value': plan_type, 'extra_data': {'upload_quota': realm.upload_quota_bytes()}} send_event(realm, event, active_user_ids(realm.id)) def do_change_default_sending_stream(user_profile: UserProfile, stream: Optional[Stream], log: bool=True) -> None: user_profile.default_sending_stream = stream user_profile.save(update_fields=['default_sending_stream']) if log: log_event({'type': 'user_change_default_sending_stream', 'user': user_profile.email, 'stream': str(stream)}) if user_profile.is_bot: if stream: stream_name: Optional[str] = stream.name else: stream_name = None send_event(user_profile.realm, dict(type='realm_bot', op='update', bot=dict(user_id=user_profile.id, default_sending_stream=stream_name, )), bot_owner_user_ids(user_profile)) def do_change_default_events_register_stream(user_profile: UserProfile, stream: Optional[Stream], log: bool=True) -> None: user_profile.default_events_register_stream = stream user_profile.save(update_fields=['default_events_register_stream']) if log: log_event({'type': 'user_change_default_events_register_stream', 'user': user_profile.email, 'stream': str(stream)}) if user_profile.is_bot: if stream: stream_name: Optional[str] = stream.name else: stream_name = None send_event(user_profile.realm, dict(type='realm_bot', op='update', bot=dict(user_id=user_profile.id, default_events_register_stream=stream_name, )), bot_owner_user_ids(user_profile)) def do_change_default_all_public_streams(user_profile: UserProfile, value: bool, log: bool=True) -> None: user_profile.default_all_public_streams = value user_profile.save(update_fields=['default_all_public_streams']) if log: log_event({'type': 'user_change_default_all_public_streams', 'user': user_profile.email, 'value': str(value)}) if user_profile.is_bot: send_event(user_profile.realm, dict(type='realm_bot', op='update', bot=dict(user_id=user_profile.id, default_all_public_streams=user_profile.default_all_public_streams, )), bot_owner_user_ids(user_profile)) def do_change_user_role(user_profile: UserProfile, value: int, acting_user: Optional[UserProfile]=None) -> None: old_value = user_profile.role user_profile.role = value user_profile.save(update_fields=["role"]) RealmAuditLog.objects.create( realm=user_profile.realm, modified_user=user_profile, acting_user=acting_user, event_type=RealmAuditLog.USER_ROLE_CHANGED, event_time=timezone_now(), extra_data=ujson.dumps({ RealmAuditLog.OLD_VALUE: old_value, RealmAuditLog.NEW_VALUE: value, RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm), })) event = dict(type="realm_user", op="update", person=dict(user_id=user_profile.id, role=user_profile.role)) send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id)) def do_change_is_api_super_user(user_profile: UserProfile, value: bool) -> None: user_profile.is_api_super_user = value user_profile.save(update_fields=["is_api_super_user"]) def do_change_stream_invite_only(stream: Stream, invite_only: bool, history_public_to_subscribers: Optional[bool]=None) -> None: history_public_to_subscribers = get_default_value_for_history_public_to_subscribers( stream.realm, invite_only, history_public_to_subscribers, ) stream.invite_only = invite_only stream.history_public_to_subscribers = history_public_to_subscribers stream.save(update_fields=['invite_only', 'history_public_to_subscribers']) event = dict( op="update", type="stream", property="invite_only", value=invite_only, history_public_to_subscribers=history_public_to_subscribers, stream_id=stream.id, name=stream.name, ) send_event(stream.realm, event, can_access_stream_user_ids(stream)) def do_change_stream_web_public(stream: Stream, is_web_public: bool) -> None: stream.is_web_public = is_web_public stream.save(update_fields=['is_web_public']) def do_change_stream_post_policy(stream: Stream, stream_post_policy: int) -> None: stream.stream_post_policy = stream_post_policy stream.save(update_fields=['stream_post_policy']) event = dict( op="update", type="stream", property="stream_post_policy", value=stream_post_policy, stream_id=stream.id, name=stream.name, ) send_event(stream.realm, event, can_access_stream_user_ids(stream)) # Backwards-compatibility code: We removed the # is_announcement_only property in early 2020, but we send a # duplicate event for legacy mobile clients that might want the # data. event = dict( op="update", type="stream", property="is_announcement_only", value=stream.stream_post_policy == Stream.STREAM_POST_POLICY_ADMINS, stream_id=stream.id, name=stream.name, ) send_event(stream.realm, event, can_access_stream_user_ids(stream)) def do_rename_stream(stream: Stream, new_name: str, user_profile: UserProfile, log: bool=True) -> Dict[str, str]: old_name = stream.name stream.name = new_name stream.save(update_fields=["name"]) if log: log_event({'type': 'stream_name_change', 'realm': stream.realm.string_id, 'new_name': new_name}) recipient_id = stream.recipient_id messages = Message.objects.filter(recipient_id=recipient_id).only("id") # Update the display recipient and stream, which are easy single # items to set. old_cache_key = get_stream_cache_key(old_name, stream.realm_id) new_cache_key = get_stream_cache_key(stream.name, stream.realm_id) if old_cache_key != new_cache_key: cache_delete(old_cache_key) cache_set(new_cache_key, stream) cache_set(display_recipient_cache_key(recipient_id), stream.name) # Delete cache entries for everything else, which is cheaper and # clearer than trying to set them. display_recipient is the out of # date field in all cases. cache_delete_many( to_dict_cache_key_id(message.id) for message in messages) new_email = encode_email_address(stream, show_sender=True) # We will tell our users to essentially # update stream.name = new_name where name = old_name # and update stream.email = new_email where name = old_name. # We could optimize this by trying to send one message, but the # client code really wants one property update at a time, and # updating stream names is a pretty infrequent operation. # More importantly, we want to key these updates by id, not name, # since id is the immutable primary key, and obviously name is not. data_updates = [ ['email_address', new_email], ['name', new_name], ] for property, value in data_updates: event = dict( op="update", type="stream", property=property, value=value, stream_id=stream.id, name=old_name, ) send_event(stream.realm, event, can_access_stream_user_ids(stream)) sender = get_system_bot(settings.NOTIFICATION_BOT) with override_language(stream.realm.default_language): internal_send_stream_message( stream.realm, sender, stream, Realm.STREAM_EVENTS_NOTIFICATION_TOPIC, _('{user_name} renamed stream {old_stream_name} to {new_stream_name}.').format( user_name=f"@_**{user_profile.full_name}|{user_profile.id}**", old_stream_name=f"**{old_name}**", new_stream_name=f"**{new_name}**", ), ) # Even though the token doesn't change, the web client needs to update the # email forwarding address to display the correctly-escaped new name. return {"email_address": new_email} def do_change_stream_description(stream: Stream, new_description: str) -> None: stream.description = new_description stream.rendered_description = render_stream_description(new_description) stream.save(update_fields=['description', 'rendered_description']) event = dict( type='stream', op='update', property='description', name=stream.name, stream_id=stream.id, value=new_description, rendered_description=stream.rendered_description, ) send_event(stream.realm, event, can_access_stream_user_ids(stream)) def do_change_stream_message_retention_days(stream: Stream, message_retention_days: Optional[int]=None) -> None: stream.message_retention_days = message_retention_days stream.save(update_fields=['message_retention_days']) event = dict( op="update", type="stream", property="message_retention_days", value=message_retention_days, stream_id=stream.id, name=stream.name, ) send_event(stream.realm, event, can_access_stream_user_ids(stream)) def do_create_realm(string_id: str, name: str, emails_restricted_to_domains: Optional[bool]=None) -> Realm: if Realm.objects.filter(string_id=string_id).exists(): raise AssertionError(f"Realm {string_id} already exists!") if not server_initialized(): logging.info("Server not yet initialized. Creating the internal realm first.") create_internal_realm() kwargs: Dict[str, Any] = {} if emails_restricted_to_domains is not None: kwargs['emails_restricted_to_domains'] = emails_restricted_to_domains realm = Realm(string_id=string_id, name=name, **kwargs) realm.save() # Create stream once Realm object has been saved notifications_stream = ensure_stream( realm, Realm.DEFAULT_NOTIFICATION_STREAM_NAME, stream_description="Everyone is added to this stream by default. Welcome! :octopus:", acting_user=None) realm.notifications_stream = notifications_stream # With the current initial streams situation, the only public # stream is the notifications_stream. DefaultStream.objects.create(stream=notifications_stream, realm=realm) signup_notifications_stream = ensure_stream( realm, Realm.INITIAL_PRIVATE_STREAM_NAME, invite_only=True, stream_description="A private stream for core team members.", acting_user=None) realm.signup_notifications_stream = signup_notifications_stream realm.save(update_fields=['notifications_stream', 'signup_notifications_stream']) if settings.BILLING_ENABLED: do_change_plan_type(realm, Realm.LIMITED) # Log the event log_event({"type": "realm_created", "string_id": string_id, "emails_restricted_to_domains": emails_restricted_to_domains}) sender = get_system_bot(settings.NOTIFICATION_BOT) admin_realm = sender.realm # Send a notification to the admin realm with override_language(admin_realm.default_language): signup_message = _("Signups enabled") try: signups_stream = get_signups_stream(admin_realm) topic = realm.display_subdomain internal_send_stream_message( admin_realm, sender, signups_stream, topic, signup_message, ) except Stream.DoesNotExist: # nocoverage # If the signups stream hasn't been created in the admin # realm, don't auto-create it to send to it; just do nothing. pass return realm def do_change_notification_settings(user_profile: UserProfile, name: str, value: Union[bool, int, str], log: bool=True) -> None: """Takes in a UserProfile object, the name of a global notification preference to update, and the value to update to """ notification_setting_type = UserProfile.notification_setting_types[name] assert isinstance(value, notification_setting_type), ( f'Cannot update {name}: {value} is not an instance of {notification_setting_type}') setattr(user_profile, name, value) # Disabling digest emails should clear a user's email queue if name == 'enable_digest_emails' and not value: clear_scheduled_emails([user_profile.id], ScheduledEmail.DIGEST) user_profile.save(update_fields=[name]) event = {'type': 'update_global_notifications', 'user': user_profile.email, 'notification_name': name, 'setting': value} if log: log_event(event) send_event(user_profile.realm, event, [user_profile.id]) def do_change_enter_sends(user_profile: UserProfile, enter_sends: bool) -> None: user_profile.enter_sends = enter_sends user_profile.save(update_fields=["enter_sends"]) def do_set_user_display_setting(user_profile: UserProfile, setting_name: str, setting_value: Union[bool, str, int]) -> None: property_type = UserProfile.property_types[setting_name] assert isinstance(setting_value, property_type) setattr(user_profile, setting_name, setting_value) user_profile.save(update_fields=[setting_name]) event = {'type': 'update_display_settings', 'user': user_profile.email, 'setting_name': setting_name, 'setting': setting_value} if setting_name == "default_language": assert isinstance(setting_value, str) event['language_name'] = get_language_name(setting_value) send_event(user_profile.realm, event, [user_profile.id]) # Updates to the timezone display setting are sent to all users if setting_name == "timezone": payload = dict(email=user_profile.email, user_id=user_profile.id, timezone=user_profile.timezone) send_event(user_profile.realm, dict(type='realm_user', op='update', person=payload), active_user_ids(user_profile.realm_id)) def lookup_default_stream_groups(default_stream_group_names: List[str], realm: Realm) -> List[DefaultStreamGroup]: default_stream_groups = [] for group_name in default_stream_group_names: try: default_stream_group = DefaultStreamGroup.objects.get( name=group_name, realm=realm) except DefaultStreamGroup.DoesNotExist: raise JsonableError(_('Invalid default stream group {}').format(group_name)) default_stream_groups.append(default_stream_group) return default_stream_groups def notify_default_streams(realm: Realm) -> None: event = dict( type="default_streams", default_streams=streams_to_dicts_sorted(get_default_streams_for_realm(realm.id)), ) send_event(realm, event, active_non_guest_user_ids(realm.id)) def notify_default_stream_groups(realm: Realm) -> None: event = dict( type="default_stream_groups", default_stream_groups=default_stream_groups_to_dicts_sorted(get_default_stream_groups(realm)), ) send_event(realm, event, active_non_guest_user_ids(realm.id)) def do_add_default_stream(stream: Stream) -> None: realm_id = stream.realm_id stream_id = stream.id if not DefaultStream.objects.filter(realm_id=realm_id, stream_id=stream_id).exists(): DefaultStream.objects.create(realm_id=realm_id, stream_id=stream_id) notify_default_streams(stream.realm) def do_remove_default_stream(stream: Stream) -> None: realm_id = stream.realm_id stream_id = stream.id DefaultStream.objects.filter(realm_id=realm_id, stream_id=stream_id).delete() notify_default_streams(stream.realm) def do_create_default_stream_group(realm: Realm, group_name: str, description: str, streams: List[Stream]) -> None: default_streams = get_default_streams_for_realm(realm.id) for stream in streams: if stream in default_streams: raise JsonableError(_( "'{stream_name}' is a default stream and cannot be added to '{group_name}'", ).format(stream_name=stream.name, group_name=group_name)) check_default_stream_group_name(group_name) (group, created) = DefaultStreamGroup.objects.get_or_create( name=group_name, realm=realm, description=description) if not created: raise JsonableError(_( "Default stream group '{group_name}' already exists", ).format(group_name=group_name)) group.streams.set(streams) notify_default_stream_groups(realm) def do_add_streams_to_default_stream_group(realm: Realm, group: DefaultStreamGroup, streams: List[Stream]) -> None: default_streams = get_default_streams_for_realm(realm.id) for stream in streams: if stream in default_streams: raise JsonableError(_( "'{stream_name}' is a default stream and cannot be added to '{group_name}'", ).format(stream_name=stream.name, group_name=group.name)) if stream in group.streams.all(): raise JsonableError(_( "Stream '{stream_name}' is already present in default stream group '{group_name}'", ).format(stream_name=stream.name, group_name=group.name)) group.streams.add(stream) group.save() notify_default_stream_groups(realm) def do_remove_streams_from_default_stream_group(realm: Realm, group: DefaultStreamGroup, streams: List[Stream]) -> None: for stream in streams: if stream not in group.streams.all(): raise JsonableError(_( "Stream '{stream_name}' is not present in default stream group '{group_name}'", ).format(stream_name=stream.name, group_name=group.name)) group.streams.remove(stream) group.save() notify_default_stream_groups(realm) def do_change_default_stream_group_name(realm: Realm, group: DefaultStreamGroup, new_group_name: str) -> None: if group.name == new_group_name: raise JsonableError(_("This default stream group is already named '{}'").format(new_group_name)) if DefaultStreamGroup.objects.filter(name=new_group_name, realm=realm).exists(): raise JsonableError(_("Default stream group '{}' already exists").format(new_group_name)) group.name = new_group_name group.save() notify_default_stream_groups(realm) def do_change_default_stream_group_description(realm: Realm, group: DefaultStreamGroup, new_description: str) -> None: group.description = new_description group.save() notify_default_stream_groups(realm) def do_remove_default_stream_group(realm: Realm, group: DefaultStreamGroup) -> None: group.delete() notify_default_stream_groups(realm) def get_default_streams_for_realm(realm_id: int) -> List[Stream]: return [default.stream for default in DefaultStream.objects.select_related().filter(realm_id=realm_id)] def get_default_subs(user_profile: UserProfile) -> List[Stream]: # Right now default streams are realm-wide. This wrapper gives us flexibility # to some day further customize how we set up default streams for new users. return get_default_streams_for_realm(user_profile.realm_id) # returns default streams in json serializeable format def streams_to_dicts_sorted(streams: List[Stream]) -> List[Dict[str, Any]]: return sorted([stream.to_dict() for stream in streams], key=lambda elt: elt["name"]) def default_stream_groups_to_dicts_sorted(groups: List[DefaultStreamGroup]) -> List[Dict[str, Any]]: return sorted([group.to_dict() for group in groups], key=lambda elt: elt["name"]) def do_update_user_activity_interval(user_profile: UserProfile, log_time: datetime.datetime) -> None: effective_end = log_time + UserActivityInterval.MIN_INTERVAL_LENGTH # This code isn't perfect, because with various races we might end # up creating two overlapping intervals, but that shouldn't happen # often, and can be corrected for in post-processing try: last = UserActivityInterval.objects.filter(user_profile=user_profile).order_by("-end")[0] # There are two ways our intervals could overlap: # (1) The start of the new interval could be inside the old interval # (2) The end of the new interval could be inside the old interval # In either case, we just extend the old interval to include the new interval. if ((log_time <= last.end and log_time >= last.start) or (effective_end <= last.end and effective_end >= last.start)): last.end = max(last.end, effective_end) last.start = min(last.start, log_time) last.save(update_fields=["start", "end"]) return except IndexError: pass # Otherwise, the intervals don't overlap, so we should make a new one UserActivityInterval.objects.create(user_profile=user_profile, start=log_time, end=effective_end) @statsd_increment('user_activity') def do_update_user_activity(user_profile_id: int, client_id: int, query: str, count: int, log_time: datetime.datetime) -> None: (activity, created) = UserActivity.objects.get_or_create( user_profile_id = user_profile_id, client_id = client_id, query = query, defaults={'last_visit': log_time, 'count': count}) if not created: activity.count += count activity.last_visit = log_time activity.save(update_fields=["last_visit", "count"]) def send_presence_changed(user_profile: UserProfile, presence: UserPresence) -> None: presence_dict = presence.to_dict() event = dict(type="presence", email=user_profile.email, user_id=user_profile.id, server_timestamp=time.time(), presence={presence_dict['client']: presence_dict}) send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id)) def consolidate_client(client: Client) -> Client: # The web app reports a client as 'website' # The desktop app reports a client as ZulipDesktop # due to it setting a custom user agent. We want both # to count as web users # Alias ZulipDesktop to website if client.name in ['ZulipDesktop']: return get_client('website') else: return client @statsd_increment('user_presence') def do_update_user_presence(user_profile: UserProfile, client: Client, log_time: datetime.datetime, status: int) -> None: client = consolidate_client(client) defaults = dict( timestamp=log_time, status=status, realm_id=user_profile.realm_id, ) (presence, created) = UserPresence.objects.get_or_create( user_profile = user_profile, client = client, defaults = defaults, ) stale_status = (log_time - presence.timestamp) > datetime.timedelta(minutes=1, seconds=10) was_idle = presence.status == UserPresence.IDLE became_online = (status == UserPresence.ACTIVE) and (stale_status or was_idle) # If an object was created, it has already been saved. # # We suppress changes from ACTIVE to IDLE before stale_status is reached; # this protects us from the user having two clients open: one active, the # other idle. Without this check, we would constantly toggle their status # between the two states. if not created and stale_status or was_idle or status == presence.status: # The following block attempts to only update the "status" # field in the event that it actually changed. This is # important to avoid flushing the UserPresence cache when the # data it would return to a client hasn't actually changed # (see the UserPresence post_save hook for details). presence.timestamp = log_time update_fields = ["timestamp"] if presence.status != status: presence.status = status update_fields.append("status") presence.save(update_fields=update_fields) if not user_profile.realm.presence_disabled and (created or became_online): # Push event to all users in the realm so they see the new user # appear in the presence list immediately, or the newly online # user without delay. Note that we won't send an update here for a # timestamp update, because we rely on the browser to ping us every 50 # seconds for realm-wide status updates, and those updates should have # recent timestamps, which means the browser won't think active users # have gone idle. If we were more aggressive in this function about # sending timestamp updates, we could eliminate the ping responses, but # that's not a high priority for now, considering that most of our non-MIT # realms are pretty small. send_presence_changed(user_profile, presence) def update_user_activity_interval(user_profile: UserProfile, log_time: datetime.datetime) -> None: event = {'user_profile_id': user_profile.id, 'time': datetime_to_timestamp(log_time)} queue_json_publish("user_activity_interval", event) def update_user_presence(user_profile: UserProfile, client: Client, log_time: datetime.datetime, status: int, new_user_input: bool) -> None: event = {'user_profile_id': user_profile.id, 'status': status, 'time': datetime_to_timestamp(log_time), 'client': client.name} queue_json_publish("user_presence", event) if new_user_input: update_user_activity_interval(user_profile, log_time) def do_update_user_status(user_profile: UserProfile, away: Optional[bool], status_text: Optional[str], client_id: int) -> None: if away: status = UserStatus.AWAY else: status = UserStatus.NORMAL realm = user_profile.realm update_user_status( user_profile_id=user_profile.id, status=status, status_text=status_text, client_id=client_id, ) event = dict( type='user_status', user_id=user_profile.id, ) if away is not None: event['away'] = away if status_text is not None: event['status_text'] = status_text send_event(realm, event, active_user_ids(realm.id)) def do_mark_all_as_read(user_profile: UserProfile, client: Client) -> int: log_statsd_event('bankruptcy') # First, we clear mobile push notifications. This is safer in the # event that the below logic times out and we're killed. all_push_message_ids = UserMessage.objects.filter( user_profile=user_profile, ).extra( where=[UserMessage.where_active_push_notification()], ).values_list("message_id", flat=True)[0:10000] do_clear_mobile_push_notifications_for_ids([user_profile.id], all_push_message_ids) msgs = UserMessage.objects.filter( user_profile=user_profile, ).extra( where=[UserMessage.where_unread()], ) count = msgs.update( flags=F('flags').bitor(UserMessage.flags.read), ) event = dict( type='update_message_flags', operation='add', flag='read', messages=[], # we don't send messages, since the client reloads anyway all=True, ) event_time = timezone_now() send_event(user_profile.realm, event, [user_profile.id]) do_increment_logging_stat(user_profile, COUNT_STATS['messages_read::hour'], None, event_time, increment=count) do_increment_logging_stat(user_profile, COUNT_STATS['messages_read_interactions::hour'], None, event_time, increment=min(1, count)) return count def do_mark_stream_messages_as_read(user_profile: UserProfile, client: Client, stream: Stream, topic_name: Optional[str]=None) -> int: log_statsd_event('mark_stream_as_read') msgs = UserMessage.objects.filter( user_profile=user_profile, ) recipient = stream.recipient msgs = msgs.filter(message__recipient=recipient) if topic_name: msgs = filter_by_topic_name_via_message( query=msgs, topic_name=topic_name, ) msgs = msgs.extra( where=[UserMessage.where_unread()], ) message_ids = list(msgs.values_list('message__id', flat=True)) count = msgs.update( flags=F('flags').bitor(UserMessage.flags.read), ) event = dict( type='update_message_flags', operation='add', flag='read', messages=message_ids, all=False, ) event_time = timezone_now() send_event(user_profile.realm, event, [user_profile.id]) do_clear_mobile_push_notifications_for_ids([user_profile.id], message_ids) do_increment_logging_stat(user_profile, COUNT_STATS['messages_read::hour'], None, event_time, increment=count) do_increment_logging_stat(user_profile, COUNT_STATS['messages_read_interactions::hour'], None, event_time, increment=min(1, count)) return count def do_update_mobile_push_notification(message: Message, prior_mention_user_ids: Set[int], stream_push_user_ids: Set[int]) -> None: # Called during the message edit code path to remove mobile push # notifications for users who are no longer mentioned following # the edit. See #15428 for details. # # A perfect implementation would also support updating the message # in a sent notification if a message was edited to mention a # group rather than a user (or vise versa), though it is likely # not worth the effort to do such a change. if not message.is_stream_message(): return remove_notify_users = prior_mention_user_ids - message.mentions_user_ids - stream_push_user_ids do_clear_mobile_push_notifications_for_ids(list(remove_notify_users), [message.id]) def do_clear_mobile_push_notifications_for_ids(user_profile_ids: List[int], message_ids: List[int]) -> None: if len(message_ids) == 0: return # This function supports clearing notifications for several users # only for the message-edit use case where we'll have a single message_id. assert len(user_profile_ids) == 1 or len(message_ids) == 1 messages_by_user = defaultdict(list) notifications_to_update = list(UserMessage.objects.filter( message_id__in=message_ids, user_profile_id__in=user_profile_ids, ).extra( where=[UserMessage.where_active_push_notification()], ).values_list('user_profile_id', 'message_id')) for (user_id, message_id) in notifications_to_update: messages_by_user[user_id].append(message_id) for (user_profile_id, event_message_ids) in messages_by_user.items(): queue_json_publish("missedmessage_mobile_notifications", { "type": "remove", "user_profile_id": user_profile_id, "message_ids": event_message_ids, }) def do_update_message_flags(user_profile: UserProfile, client: Client, operation: str, flag: str, messages: List[int]) -> int: valid_flags = [item for item in UserMessage.flags if item not in UserMessage.NON_API_FLAGS] if flag not in valid_flags: raise JsonableError(_("Invalid flag: '{}'").format(flag)) if flag in UserMessage.NON_EDITABLE_FLAGS: raise JsonableError(_("Flag not editable: '{}'").format(flag)) flagattr = getattr(UserMessage.flags, flag) msgs = UserMessage.objects.filter(user_profile=user_profile, message__id__in=messages) # This next block allows you to star any message, even those you # didn't receive (e.g. because you're looking at a public stream # you're not subscribed to, etc.). The problem is that starring # is a flag boolean on UserMessage, and UserMessage rows are # normally created only when you receive a message to support # searching your personal history. So we need to create one. We # add UserMessage.flags.historical, so that features that need # "messages you actually received" can exclude these UserMessages. if msgs.count() == 0: if not len(messages) == 1: raise JsonableError(_("Invalid message(s)")) if flag != "starred": raise JsonableError(_("Invalid message(s)")) # Validate that the user could have read the relevant message message = access_message(user_profile, messages[0])[0] # OK, this is a message that you legitimately have access # to via narrowing to the stream it is on, even though you # didn't actually receive it. So we create a historical, # read UserMessage message row for you to star. UserMessage.objects.create(user_profile=user_profile, message=message, flags=UserMessage.flags.historical | UserMessage.flags.read) if operation == 'add': count = msgs.update(flags=F('flags').bitor(flagattr)) elif operation == 'remove': count = msgs.update(flags=F('flags').bitand(~flagattr)) else: raise AssertionError("Invalid message flags operation") event = {'type': 'update_message_flags', 'operation': operation, 'flag': flag, 'messages': messages, 'all': False} send_event(user_profile.realm, event, [user_profile.id]) if flag == "read" and operation == "add": event_time = timezone_now() do_clear_mobile_push_notifications_for_ids([user_profile.id], messages) do_increment_logging_stat(user_profile, COUNT_STATS['messages_read::hour'], None, event_time, increment=count) do_increment_logging_stat(user_profile, COUNT_STATS['messages_read_interactions::hour'], None, event_time, increment=min(1, count)) return count class MessageUpdateUserInfoResult(TypedDict): message_user_ids: Set[int] mention_user_ids: Set[int] def notify_topic_moved_streams(user_profile: UserProfile, old_stream: Stream, old_topic: str, new_stream: Stream, new_topic: Optional[str], send_notification_to_old_thread: bool, send_notification_to_new_thread: bool) -> None: # Since moving content between streams is highly disruptive, # it's worth adding a couple tombstone messages showing what # happened. sender = get_system_bot(settings.NOTIFICATION_BOT) if new_topic is None: new_topic = old_topic user_mention = f"@_**{user_profile.full_name}|{user_profile.id}**" old_topic_link = f"#**{old_stream.name}>{old_topic}**" new_topic_link = f"#**{new_stream.name}>{new_topic}**" if send_notification_to_new_thread: with override_language(new_stream.realm.default_language): internal_send_stream_message( new_stream.realm, sender, new_stream, new_topic, _("This topic was moved here from {old_location} by {user}").format( old_location=old_topic_link, user=user_mention, ), ) if send_notification_to_old_thread: with override_language(old_stream.realm.default_language): # Send a notification to the old stream that the topic was moved. internal_send_stream_message( old_stream.realm, sender, old_stream, old_topic, _("This topic was moved by {user} to {new_location}").format( user=user_mention, new_location=new_topic_link, ), ) def get_user_info_for_message_updates(message_id: int) -> MessageUpdateUserInfoResult: # We exclude UserMessage.flags.historical rows since those # users did not receive the message originally, and thus # probably are not relevant for reprocessed alert_words, # mentions and similar rendering features. This may be a # decision we change in the future. query = UserMessage.objects.filter( message=message_id, flags=~UserMessage.flags.historical, ).values('user_profile_id', 'flags') rows = list(query) message_user_ids = { row['user_profile_id'] for row in rows } mask = UserMessage.flags.mentioned | UserMessage.flags.wildcard_mentioned mention_user_ids = { row['user_profile_id'] for row in rows if int(row['flags']) & mask } return dict( message_user_ids=message_user_ids, mention_user_ids=mention_user_ids, ) def update_user_message_flags(message: Message, ums: Iterable[UserMessage]) -> None: wildcard = message.mentions_wildcard mentioned_ids = message.mentions_user_ids ids_with_alert_words = message.user_ids_with_alert_words changed_ums: Set[UserMessage] = set() def update_flag(um: UserMessage, should_set: bool, flag: int) -> None: if should_set: if not (um.flags & flag): um.flags |= flag changed_ums.add(um) else: if (um.flags & flag): um.flags &= ~flag changed_ums.add(um) for um in ums: has_alert_word = um.user_profile_id in ids_with_alert_words update_flag(um, has_alert_word, UserMessage.flags.has_alert_word) mentioned = um.user_profile_id in mentioned_ids update_flag(um, mentioned, UserMessage.flags.mentioned) update_flag(um, wildcard, UserMessage.flags.wildcard_mentioned) for um in changed_ums: um.save(update_fields=['flags']) def update_to_dict_cache(changed_messages: List[Message], realm_id: Optional[int]=None) -> List[int]: """Updates the message as stored in the to_dict cache (for serving messages).""" items_for_remote_cache = {} message_ids = [] changed_messages_to_dict = MessageDict.to_dict_uncached(changed_messages, realm_id) for msg_id, msg in changed_messages_to_dict.items(): message_ids.append(msg_id) key = to_dict_cache_key_id(msg_id) items_for_remote_cache[key] = (msg,) cache_set_many(items_for_remote_cache) return message_ids # We use transaction.atomic to support select_for_update in the attachment codepath. @transaction.atomic def do_update_embedded_data(user_profile: UserProfile, message: Message, content: Optional[str], rendered_content: Optional[str]) -> None: event: Dict[str, Any] = { 'type': 'update_message', 'sender': user_profile.email, 'message_id': message.id} changed_messages = [message] ums = UserMessage.objects.filter(message=message.id) if content is not None: update_user_message_flags(message, ums) message.content = content message.rendered_content = rendered_content message.rendered_content_version = markdown_version event["content"] = content event["rendered_content"] = rendered_content message.save(update_fields=["content", "rendered_content"]) event['message_ids'] = update_to_dict_cache(changed_messages) def user_info(um: UserMessage) -> Dict[str, Any]: return { 'id': um.user_profile_id, 'flags': um.flags_list(), } send_event(user_profile.realm, event, list(map(user_info, ums))) class DeleteMessagesEvent(TypedDict, total=False): type: str message_ids: List[int] message_type: str sender_id: int recipient_id: int topic: str stream_id: int # We use transaction.atomic to support select_for_update in the attachment codepath. @transaction.atomic def do_update_message(user_profile: UserProfile, message: Message, new_stream: Optional[Stream], topic_name: Optional[str], propagate_mode: str, send_notification_to_old_thread: bool, send_notification_to_new_thread: bool, content: Optional[str], rendered_content: Optional[str], prior_mention_user_ids: Set[int], mention_user_ids: Set[int], mention_data: Optional[MentionData]=None) -> int: """ The main function for message editing. A message edit event can modify: * the message's content (in which case the caller will have set both content and rendered_content), * the topic, in which case the caller will have set topic_name * or both With topic edits, propagate_mode determines whether other message also have their topics edited. """ timestamp = timezone_now() message.last_edit_time = timestamp event: Dict[str, Any] = { 'type': 'update_message', 'user_id': user_profile.id, 'edit_timestamp': datetime_to_timestamp(timestamp), 'message_id': message.id, } edit_history_event: Dict[str, Any] = { 'user_id': user_profile.id, 'timestamp': event['edit_timestamp'], } changed_messages = [message] stream_being_edited = None if message.is_stream_message(): stream_id = message.recipient.type_id stream_being_edited = get_stream_by_id_in_realm(stream_id, user_profile.realm) event['stream_name'] = stream_being_edited.name ums = UserMessage.objects.filter(message=message.id) if content is not None: assert rendered_content is not None # mention_data is required if there's a content edit. assert mention_data is not None # add data from group mentions to mentions_user_ids. for group_id in message.mentions_user_group_ids: members = mention_data.get_group_members(group_id) message.mentions_user_ids.update(members) update_user_message_flags(message, ums) # One could imagine checking realm.allow_edit_history here and # modifying the events based on that setting, but doing so # doesn't really make sense. We need to send the edit event # to clients regardless, and a client already had access to # the original/pre-edit content of the message anyway. That # setting must be enforced on the client side, and making a # change here simply complicates the logic for clients parsing # edit history events. event['orig_content'] = message.content event['orig_rendered_content'] = message.rendered_content edit_history_event["prev_content"] = message.content edit_history_event["prev_rendered_content"] = message.rendered_content edit_history_event["prev_rendered_content_version"] = message.rendered_content_version message.content = content message.rendered_content = rendered_content message.rendered_content_version = markdown_version event["content"] = content event["rendered_content"] = rendered_content event['prev_rendered_content_version'] = message.rendered_content_version event['is_me_message'] = Message.is_status_message(content, rendered_content) # message.has_image and message.has_link will have been # already updated by markdown rendering in the caller. message.has_attachment = check_attachment_reference_change(message) if message.is_stream_message(): if topic_name is not None: new_topic_name = topic_name else: new_topic_name = message.topic_name() stream_topic: Optional[StreamTopicTarget] = StreamTopicTarget( stream_id=stream_id, topic_name=new_topic_name, ) else: stream_topic = None info = get_recipient_info( recipient=message.recipient, sender_id=message.sender_id, stream_topic=stream_topic, possible_wildcard_mention=mention_data.message_has_wildcards(), ) event['push_notify_user_ids'] = list(info['push_notify_user_ids']) event['stream_push_user_ids'] = list(info['stream_push_user_ids']) event['stream_email_user_ids'] = list(info['stream_email_user_ids']) event['prior_mention_user_ids'] = list(prior_mention_user_ids) event['mention_user_ids'] = list(mention_user_ids) event['presence_idle_user_ids'] = filter_presence_idle_user_ids(info['active_user_ids']) if message.mentions_wildcard: event['wildcard_mention_user_ids'] = list(info['wildcard_mention_user_ids']) else: event['wildcard_mention_user_ids'] = [] do_update_mobile_push_notification(message, prior_mention_user_ids, info['stream_push_user_ids']) if topic_name is not None or new_stream is not None: orig_topic_name = message.topic_name() event["propagate_mode"] = propagate_mode event["stream_id"] = message.recipient.type_id if new_stream is not None: assert content is None assert message.is_stream_message() assert stream_being_edited is not None edit_history_event['prev_stream'] = stream_being_edited.id event[ORIG_TOPIC] = orig_topic_name message.recipient_id = new_stream.recipient_id event["new_stream_id"] = new_stream.id event["propagate_mode"] = propagate_mode # When messages are moved from one stream to another, some # users may lose access to those messages, including guest # users and users not subscribed to the new stream (if it is a # private stream). For those users, their experience is as # though the messages were deleted, and we should send a # delete_message event to them instead. subscribers = get_active_subscriptions_for_stream_id( stream_id).select_related("user_profile") subs_to_new_stream = list(get_active_subscriptions_for_stream_id( new_stream.id).select_related("user_profile")) new_stream_sub_ids = [user.user_profile_id for user in subs_to_new_stream] # Get users who aren't subscribed to the new_stream. subs_losing_usermessages = [ sub for sub in subscribers if sub.user_profile_id not in new_stream_sub_ids ] # Users who can longer access the message without some action # from administrators. # # TODO: Extend this list to also contain users losing access # due to the messages moving to a private stream they are not # subscribed to. subs_losing_access = [ sub for sub in subs_losing_usermessages if sub.user_profile.is_guest ] ums = ums.exclude(user_profile_id__in=[ sub.user_profile_id for sub in subs_losing_usermessages]) if topic_name is not None: topic_name = truncate_topic(topic_name) message.set_topic_name(topic_name) # These fields have legacy field names. event[ORIG_TOPIC] = orig_topic_name event[TOPIC_NAME] = topic_name event[TOPIC_LINKS] = topic_links(message.sender.realm_id, topic_name) edit_history_event[LEGACY_PREV_TOPIC] = orig_topic_name delete_event_notify_user_ids: List[int] = [] if propagate_mode in ["change_later", "change_all"]: assert topic_name is not None or new_stream is not None messages_list = update_messages_for_topic_edit( message=message, propagate_mode=propagate_mode, orig_topic_name=orig_topic_name, topic_name=topic_name, new_stream=new_stream, ) changed_messages += messages_list if new_stream is not None: assert stream_being_edited is not None message_ids = [msg.id for msg in changed_messages] # Delete UserMessage objects for users who will no # longer have access to these messages. Note: This could be # very expensive, since it's N guest users x M messages. UserMessage.objects.filter( user_profile_id__in=[sub.user_profile_id for sub in subs_losing_usermessages], message_id__in=message_ids, ).delete() delete_event: DeleteMessagesEvent = { 'type': 'delete_message', 'message_ids': message_ids, 'message_type': 'stream', 'stream_id': stream_being_edited.id, 'topic': orig_topic_name, } delete_event_notify_user_ids = [sub.user_profile_id for sub in subs_losing_access] send_event(user_profile.realm, delete_event, delete_event_notify_user_ids) if message.edit_history is not None: edit_history = ujson.loads(message.edit_history) edit_history.insert(0, edit_history_event) else: edit_history = [edit_history_event] message.edit_history = ujson.dumps(edit_history) # This does message.save(update_fields=[...]) save_message_for_edit_use_case(message=message) realm_id: Optional[int] = None if stream_being_edited is not None: realm_id = stream_being_edited.realm_id event['message_ids'] = update_to_dict_cache(changed_messages, realm_id) def user_info(um: UserMessage) -> Dict[str, Any]: return { 'id': um.user_profile_id, 'flags': um.flags_list(), } # The following blocks arranges that users who are subscribed to a # stream and can see history from before they subscribed get # live-update when old messages are edited (e.g. if the user does # a topic edit themself). # # We still don't send an update event to users who are not # subscribed to this stream and don't have a UserMessage row. This # means if a non-subscriber is viewing the narrow, they won't get # a real-time updates. This is a balance between sending # message-edit notifications for every public stream to every user # in the organization (too expansive, and also not what we do for # newly sent messages anyway) and having magical live-updates # where possible. users_to_be_notified = list(map(user_info, ums)) if stream_being_edited is not None: if stream_being_edited.is_history_public_to_subscribers: subscribers = get_active_subscriptions_for_stream_id(stream_id) # We exclude long-term idle users, since they by # definition have no active clients. subscribers = subscribers.exclude(user_profile__long_term_idle=True) # Remove duplicates by excluding the id of users already # in users_to_be_notified list. This is the case where a # user both has a UserMessage row and is a current # Subscriber subscribers = subscribers.exclude(user_profile_id__in=[um.user_profile_id for um in ums]) if new_stream is not None: assert delete_event_notify_user_ids is not None subscribers = subscribers.exclude(user_profile_id__in=delete_event_notify_user_ids) # All users that are subscribed to the stream must be # notified when a message is edited subscriber_ids = [user.user_profile_id for user in subscribers] if new_stream is not None: # TODO: Guest users don't see the new moved topic # unless breadcrumb message for new stream is # enabled. Excluding these users from receiving this # event helps us avoid a error trackeback for our # clients. We should figure out a way to inform the # guest users of this new topic if sending a 'message' # event for these messages is not an option. # # Don't send this event to guest subs who are not # subscribers of the old stream but are subscribed to # the new stream; clients will be confused. old_stream_unsubbed_guests = [ sub for sub in subs_to_new_stream if sub.user_profile.is_guest and sub.user_profile_id not in subscriber_ids ] subscribers = subscribers.exclude(user_profile_id__in=[ sub.user_profile_id for sub in old_stream_unsubbed_guests]) subscriber_ids = [user.user_profile_id for user in subscribers] users_to_be_notified += list(map(subscriber_info, subscriber_ids)) send_event(user_profile.realm, event, users_to_be_notified) if (len(changed_messages) > 0 and new_stream is not None and stream_being_edited is not None): # Notify users that the topic was moved. notify_topic_moved_streams(user_profile, stream_being_edited, orig_topic_name, new_stream, topic_name, send_notification_to_old_thread, send_notification_to_new_thread) return len(changed_messages) def do_delete_messages(realm: Realm, messages: Iterable[Message]) -> None: # messages in delete_message event belong to the same topic # or is a single private message, as any other behaviour is not possible with # the current callers to this method. messages = list(messages) message_ids = [message.id for message in messages] if not message_ids: return event: DeleteMessagesEvent = { 'type': 'delete_message', 'message_ids': message_ids, } sample_message = messages[0] message_type = "stream" users_to_notify = [] if not sample_message.is_stream_message(): assert len(messages) == 1 message_type = "private" ums = UserMessage.objects.filter(message_id__in=message_ids) users_to_notify = [um.user_profile_id for um in ums] # TODO: We should plan to remove `sender_id` here. event['recipient_id'] = sample_message.recipient_id event['sender_id'] = sample_message.sender_id archiving_chunk_size = retention.MESSAGE_BATCH_SIZE if message_type == "stream": stream_id = sample_message.recipient.type_id event['stream_id'] = stream_id event['topic'] = sample_message.topic_name() subscribers = get_active_subscriptions_for_stream_id(stream_id) # We exclude long-term idle users, since they by definition have no active clients. subscribers = subscribers.exclude(user_profile__long_term_idle=True) subscriber_ids = [user.user_profile_id for user in subscribers] users_to_notify = list(map(subscriber_info, subscriber_ids)) archiving_chunk_size = retention.STREAM_MESSAGE_BATCH_SIZE move_messages_to_archive(message_ids, realm=realm, chunk_size=archiving_chunk_size) event['message_type'] = message_type send_event(realm, event, users_to_notify) def do_delete_messages_by_sender(user: UserProfile) -> None: message_ids = list(Message.objects.filter(sender=user).values_list('id', flat=True).order_by('id')) if message_ids: move_messages_to_archive(message_ids, chunk_size=retention.STREAM_MESSAGE_BATCH_SIZE) def get_streams_traffic(stream_ids: Set[int]) -> Dict[int, int]: stat = COUNT_STATS['messages_in_stream:is_bot:day'] traffic_from = timezone_now() - datetime.timedelta(days=28) query = StreamCount.objects.filter(property=stat.property, end_time__gt=traffic_from) query = query.filter(stream_id__in=stream_ids) traffic_list = query.values('stream_id').annotate(value=Sum('value')) traffic_dict = {} for traffic in traffic_list: traffic_dict[traffic["stream_id"]] = traffic["value"] return traffic_dict def round_to_2_significant_digits(number: int) -> int: return int(round(number, 2 - len(str(number)))) STREAM_TRAFFIC_CALCULATION_MIN_AGE_DAYS = 7 def get_average_weekly_stream_traffic(stream_id: int, stream_date_created: datetime.datetime, recent_traffic: Dict[int, int]) -> Optional[int]: try: stream_traffic = recent_traffic[stream_id] except KeyError: stream_traffic = 0 stream_age = (timezone_now() - stream_date_created).days if stream_age >= 28: average_weekly_traffic = int(stream_traffic // 4) elif stream_age >= STREAM_TRAFFIC_CALCULATION_MIN_AGE_DAYS: average_weekly_traffic = int(stream_traffic * 7 // stream_age) else: return None if average_weekly_traffic == 0 and stream_traffic > 0: average_weekly_traffic = 1 return round_to_2_significant_digits(average_weekly_traffic) SubHelperT = Tuple[List[Dict[str, Any]], List[Dict[str, Any]], List[Dict[str, Any]]] def get_web_public_subs(realm: Realm) -> SubHelperT: color_idx = 0 def get_next_color() -> str: nonlocal color_idx color = STREAM_ASSIGNMENT_COLORS[color_idx] color_idx = (color_idx + 1) % len(STREAM_ASSIGNMENT_COLORS) return color subscribed = [] for stream in Stream.objects.filter(realm=realm, is_web_public=True, deactivated=False): stream_dict = stream.to_dict() # Add versions of the Subscription fields based on a simulated # new user subscription set. stream_dict['is_muted'] = False stream_dict['color'] = get_next_color() stream_dict['desktop_notifications'] = True stream_dict['audible_notifications'] = True stream_dict['push_notifications'] = True stream_dict['email_notifications'] = True stream_dict['pin_to_top'] = False stream_weekly_traffic = get_average_weekly_stream_traffic(stream.id, stream.date_created, {}) stream_dict['stream_weekly_traffic'] = stream_weekly_traffic stream_dict['email_address'] = '' subscribed.append(stream_dict) return (subscribed, [], []) # In general, it's better to avoid using .values() because it makes # the code pretty ugly, but in this case, it has significant # performance impact for loading / for users with large numbers of # subscriptions, so it's worth optimizing. def gather_subscriptions_helper(user_profile: UserProfile, include_subscribers: bool=True) -> SubHelperT: sub_dicts = get_stream_subscriptions_for_user(user_profile).values( *Subscription.API_FIELDS, "recipient_id").order_by("recipient_id") sub_dicts = list(sub_dicts) sub_recipient_ids = [ sub['recipient_id'] for sub in sub_dicts ] stream_recipient = StreamRecipientMap() stream_recipient.populate_for_recipient_ids(sub_recipient_ids) stream_ids: Set[int] = set() for sub in sub_dicts: sub['stream_id'] = stream_recipient.stream_id_for(sub['recipient_id']) stream_ids.add(sub['stream_id']) recent_traffic = get_streams_traffic(stream_ids=stream_ids) all_streams = get_active_streams(user_profile.realm).select_related( "realm").values( *Stream.API_FIELDS, # date_created is used as an input for the stream_weekly_traffic computed field. "date_created", # The realm_id and recipient_id are generally not needed in the API. "realm_id", "recipient_id", # email_token isn't public to some users with access to # the stream, so doesn't belong in API_FIELDS. "email_token") stream_dicts = [stream for stream in all_streams if stream['id'] in stream_ids] stream_hash = {} for stream in stream_dicts: stream_hash[stream["id"]] = stream all_streams_id = [stream["id"] for stream in all_streams] subscribed = [] unsubscribed = [] never_subscribed = [] # Deactivated streams aren't in stream_hash. streams = [stream_hash[sub["stream_id"]] for sub in sub_dicts if sub["stream_id"] in stream_hash] streams_subscribed_map = {sub["stream_id"]: sub["active"] for sub in sub_dicts} # Add never subscribed streams to streams_subscribed_map streams_subscribed_map.update({stream['id']: False for stream in all_streams if stream not in streams}) if include_subscribers: subscriber_map: Mapping[int, Optional[List[int]]] = bulk_get_subscriber_user_ids( all_streams, user_profile, streams_subscribed_map, stream_recipient, ) else: # If we're not including subscribers, always return None, # which the below code needs to check for anyway. subscriber_map = defaultdict(lambda: None) sub_unsub_stream_ids = set() for sub in sub_dicts: sub_unsub_stream_ids.add(sub["stream_id"]) stream = stream_hash.get(sub["stream_id"]) if not stream: # This stream has been deactivated, don't include it. continue # We first construct a dictionary based on the standard Stream # and Subscription models' API_FIELDS. stream_dict = {} for field_name in Stream.API_FIELDS: if field_name == "id": stream_dict['stream_id'] = stream["id"] continue stream_dict[field_name] = stream[field_name] # Copy Subscription.API_FIELDS except for "active", which is # used to determine where to the put the field. for field_name in Subscription.API_FIELDS: stream_dict[field_name] = sub[field_name] # Backwards-compatibility for clients that haven't been # updated for the in_home_view => is_muted API migration. stream_dict['in_home_view'] = not stream_dict['is_muted'] # Backwards-compatibility for clients that haven't been # updated for the is_announcement_only -> stream_post_policy # migration. stream_dict['is_announcement_only'] = \ stream['stream_post_policy'] == Stream.STREAM_POST_POLICY_ADMINS # Add a few computed fields not directly from the data models. stream_dict['stream_weekly_traffic'] = get_average_weekly_stream_traffic( stream["id"], stream["date_created"], recent_traffic) stream_dict['email_address'] = encode_email_address_helper( stream["name"], stream["email_token"], show_sender=True) # Construct and add subscribers data subscribers: Optional[List[int]] = subscriber_map[stream["id"]] # Important: don't show the subscribers if the stream is invite only # and this user isn't on it anymore (or a realm administrator). if stream["invite_only"] and not (sub["active"] or user_profile.is_realm_admin): subscribers = None # Guest users lose access to subscribers when they are unsubscribed. if not sub["active"] and user_profile.is_guest: subscribers = None if subscribers is not None: stream_dict['subscribers'] = subscribers # is_active is represented in this structure by which list we include it in. is_active = stream_dict.pop("active") if is_active: subscribed.append(stream_dict) else: unsubscribed.append(stream_dict) all_streams_id_set = set(all_streams_id) if user_profile.can_access_public_streams(): never_subscribed_stream_ids = all_streams_id_set - sub_unsub_stream_ids else: never_subscribed_stream_ids = set() never_subscribed_streams = [ns_stream_dict for ns_stream_dict in all_streams if ns_stream_dict['id'] in never_subscribed_stream_ids] for stream in never_subscribed_streams: is_public = (not stream['invite_only']) if is_public or user_profile.is_realm_admin: stream_dict = {} for field_name in Stream.API_FIELDS: if field_name == "id": stream_dict['stream_id'] = stream["id"] continue stream_dict[field_name] = stream[field_name] stream_dict['stream_weekly_traffic'] = get_average_weekly_stream_traffic( stream["id"], stream["date_created"], recent_traffic) # Backwards-compatibility addition of removed field. stream_dict['is_announcement_only'] = \ stream['stream_post_policy'] == Stream.STREAM_POST_POLICY_ADMINS if is_public or user_profile.is_realm_admin: subscribers = subscriber_map[stream["id"]] if subscribers is not None: stream_dict['subscribers'] = subscribers never_subscribed.append(stream_dict) return (sorted(subscribed, key=lambda x: x['name']), sorted(unsubscribed, key=lambda x: x['name']), sorted(never_subscribed, key=lambda x: x['name'])) def gather_subscriptions( user_profile: UserProfile, include_subscribers: bool=False, ) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]: subscribed, unsubscribed, _ = gather_subscriptions_helper( user_profile, include_subscribers=include_subscribers) if include_subscribers: user_ids = set() for subs in [subscribed, unsubscribed]: for sub in subs: if 'subscribers' in sub: for subscriber in sub['subscribers']: user_ids.add(subscriber) email_dict = get_emails_from_user_ids(list(user_ids)) for subs in [subscribed, unsubscribed]: for sub in subs: if 'subscribers' in sub: sub['subscribers'] = sorted([ email_dict[user_id] for user_id in sub['subscribers'] ]) return (subscribed, unsubscribed) def get_active_presence_idle_user_ids(realm: Realm, sender_id: int, message_type: str, active_user_ids: Set[int], user_flags: Dict[int, List[str]]) -> List[int]: ''' Given a list of active_user_ids, we build up a subset of those users who fit these criteria: * They are likely to need notifications (either due to mentions, alert words, or being PM'ed). * They are no longer "present" according to the UserPresence table. ''' if realm.presence_disabled: return [] is_pm = message_type == 'private' user_ids = set() for user_id in active_user_ids: flags: Iterable[str] = user_flags.get(user_id, []) mentioned = 'mentioned' in flags or 'wildcard_mentioned' in flags private_message = is_pm and user_id != sender_id alerted = 'has_alert_word' in flags if mentioned or private_message or alerted: user_ids.add(user_id) return filter_presence_idle_user_ids(user_ids) def filter_presence_idle_user_ids(user_ids: Set[int]) -> List[int]: # Given a set of user IDs (the recipients of a message), accesses # the UserPresence table to determine which of these users are # currently idle and should potentially get email notifications # (and push notifications with with # user_profile.enable_online_push_notifications=False). # # We exclude any presence data from ZulipMobile for the purpose of # triggering these notifications; the mobile app can more # effectively do its own client-side filtering of notification # sounds/etc. for the case that the user is actively doing a PM # conversation in the app. if not user_ids: return [] # Matches presence.js constant OFFLINE_THRESHOLD_SECS = 140 recent = timezone_now() - datetime.timedelta(seconds=OFFLINE_THRESHOLD_SECS) rows = UserPresence.objects.filter( user_profile_id__in=user_ids, status=UserPresence.ACTIVE, timestamp__gte=recent, ).exclude(client__name="ZulipMobile").distinct('user_profile_id').values('user_profile_id') active_user_ids = {row['user_profile_id'] for row in rows} idle_user_ids = user_ids - active_user_ids return sorted(list(idle_user_ids)) def do_send_confirmation_email(invitee: PreregistrationUser, referrer: UserProfile) -> str: """ Send the confirmation/welcome e-mail to an invited user. """ activation_url = create_confirmation_link(invitee, Confirmation.INVITATION) context = {'referrer_full_name': referrer.full_name, 'referrer_email': referrer.delivery_email, 'activate_url': activation_url, 'referrer_realm_name': referrer.realm.name} from_name = f"{referrer.full_name} (via Zulip)" send_email('zerver/emails/invitation', to_emails=[invitee.email], from_name=from_name, from_address=FromAddress.tokenized_no_reply_address(), language=referrer.realm.default_language, context=context, realm=referrer.realm) return activation_url def email_not_system_bot(email: str) -> None: if is_cross_realm_bot_email(email): msg = email_reserved_for_system_bots_error(email) code = msg raise ValidationError( msg, code=code, params=dict(deactivated=False), ) class InvitationError(JsonableError): code = ErrorCode.INVITATION_FAILED data_fields = ['errors', 'sent_invitations'] def __init__(self, msg: str, errors: List[Tuple[str, str, bool]], sent_invitations: bool) -> None: self._msg: str = msg self.errors: List[Tuple[str, str, bool]] = errors self.sent_invitations: bool = sent_invitations def estimate_recent_invites(realms: Iterable[Realm], *, days: int) -> int: '''An upper bound on the number of invites sent in the last `days` days''' recent_invites = RealmCount.objects.filter( realm__in=realms, property='invites_sent::day', end_time__gte=timezone_now() - datetime.timedelta(days=days), ).aggregate(Sum('value'))['value__sum'] if recent_invites is None: return 0 return recent_invites def check_invite_limit(realm: Realm, num_invitees: int) -> None: '''Discourage using invitation emails as a vector for carrying spam.''' msg = _("You do not have enough remaining invites. " "Please contact {email} to have your limit raised. " "No invitations were sent.").format(email=settings.ZULIP_ADMINISTRATOR) if not settings.OPEN_REALM_CREATION: return recent_invites = estimate_recent_invites([realm], days=1) if num_invitees + recent_invites > realm.max_invites: raise InvitationError(msg, [], sent_invitations=False) default_max = settings.INVITES_DEFAULT_REALM_DAILY_MAX newrealm_age = datetime.timedelta(days=settings.INVITES_NEW_REALM_DAYS) if realm.date_created <= timezone_now() - newrealm_age: # If this isn't a "newly-created" realm, we're done. The # remaining code applies an aggregate limit across all # "new" realms, to address sudden bursts of spam realms. return if realm.max_invites > default_max: # If a user is on a realm where we've bumped up # max_invites, then we exempt them from invite limits. return new_realms = Realm.objects.filter( date_created__gte=timezone_now() - newrealm_age, _max_invites__lte=default_max, ).all() for days, count in settings.INVITES_NEW_REALM_LIMIT_DAYS: recent_invites = estimate_recent_invites(new_realms, days=days) if num_invitees + recent_invites > count: raise InvitationError(msg, [], sent_invitations=False) def do_invite_users(user_profile: UserProfile, invitee_emails: SizedTextIterable, streams: Iterable[Stream], invite_as: int=PreregistrationUser.INVITE_AS['MEMBER']) -> None: check_invite_limit(user_profile.realm, len(invitee_emails)) realm = user_profile.realm if not realm.invite_required: # Inhibit joining an open realm to send spam invitations. min_age = datetime.timedelta(days=settings.INVITES_MIN_USER_AGE_DAYS) if (user_profile.date_joined > timezone_now() - min_age and not user_profile.is_realm_admin): raise InvitationError( _("Your account is too new to send invites for this organization. " "Ask an organization admin, or a more experienced user."), [], sent_invitations=False) good_emails: Set[str] = set() errors: List[Tuple[str, str, bool]] = [] validate_email_allowed_in_realm = get_realm_email_validator(user_profile.realm) for email in invitee_emails: if email == '': continue email_error = validate_email_is_valid( email, validate_email_allowed_in_realm, ) if email_error: errors.append((email, email_error, False)) else: good_emails.add(email) ''' good_emails are emails that look ok so far, but we still need to make sure they're not gonna conflict with existing users ''' error_dict = get_existing_user_errors(user_profile.realm, good_emails) skipped: List[Tuple[str, str, bool]] = [] for email in error_dict: msg, deactivated = error_dict[email] skipped.append((email, msg, deactivated)) good_emails.remove(email) validated_emails = list(good_emails) if errors: raise InvitationError( _("Some emails did not validate, so we didn't send any invitations."), errors + skipped, sent_invitations=False) if skipped and len(skipped) == len(invitee_emails): # All e-mails were skipped, so we didn't actually invite anyone. raise InvitationError(_("We weren't able to invite anyone."), skipped, sent_invitations=False) # We do this here rather than in the invite queue processor since this # is used for rate limiting invitations, rather than keeping track of # when exactly invitations were sent do_increment_logging_stat(user_profile.realm, COUNT_STATS['invites_sent::day'], None, timezone_now(), increment=len(validated_emails)) # Now that we are past all the possible errors, we actually create # the PreregistrationUser objects and trigger the email invitations. for email in validated_emails: # The logged in user is the referrer. prereg_user = PreregistrationUser(email=email, referred_by=user_profile, invited_as=invite_as, realm=user_profile.realm) prereg_user.save() stream_ids = [stream.id for stream in streams] prereg_user.streams.set(stream_ids) event = {"prereg_id": prereg_user.id, "referrer_id": user_profile.id} queue_json_publish("invites", event) if skipped: raise InvitationError(_("Some of those addresses are already using Zulip, " "so we didn't send them an invitation. We did send " "invitations to everyone else!"), skipped, sent_invitations=True) notify_invites_changed(user_profile) def do_get_user_invites(user_profile: UserProfile) -> List[Dict[str, Any]]: if user_profile.is_realm_admin: prereg_users = filter_to_valid_prereg_users( PreregistrationUser.objects.filter(referred_by__realm=user_profile.realm) ) else: prereg_users = filter_to_valid_prereg_users( PreregistrationUser.objects.filter(referred_by=user_profile) ) invites = [] for invitee in prereg_users: invites.append(dict(email=invitee.email, invited_by_user_id=invitee.referred_by.id, invited=datetime_to_timestamp(invitee.invited_at), id=invitee.id, invited_as=invitee.invited_as, is_multiuse=False)) if not user_profile.is_realm_admin: # We do not return multiuse invites to non-admin users. return invites lowest_datetime = timezone_now() - datetime.timedelta(days=settings.INVITATION_LINK_VALIDITY_DAYS) multiuse_confirmation_objs = Confirmation.objects.filter(realm=user_profile.realm, type=Confirmation.MULTIUSE_INVITE, date_sent__gte=lowest_datetime) for confirmation_obj in multiuse_confirmation_objs: invite = confirmation_obj.content_object invites.append(dict(invited_by_user_id=invite.referred_by.id, invited=datetime_to_timestamp(confirmation_obj.date_sent), id=invite.id, link_url=confirmation_url(confirmation_obj.confirmation_key, user_profile.realm, Confirmation.MULTIUSE_INVITE), invited_as=invite.invited_as, is_multiuse=True)) return invites def do_create_multiuse_invite_link(referred_by: UserProfile, invited_as: int, streams: Sequence[Stream] = []) -> str: realm = referred_by.realm invite = MultiuseInvite.objects.create(realm=realm, referred_by=referred_by) if streams: invite.streams.set(streams) invite.invited_as = invited_as invite.save() notify_invites_changed(referred_by) return create_confirmation_link(invite, Confirmation.MULTIUSE_INVITE) def do_revoke_user_invite(prereg_user: PreregistrationUser) -> None: email = prereg_user.email # Delete both the confirmation objects and the prereg_user object. # TODO: Probably we actually want to set the confirmation objects # to a "revoked" status so that we can give the invited user a better # error message. content_type = ContentType.objects.get_for_model(PreregistrationUser) Confirmation.objects.filter(content_type=content_type, object_id=prereg_user.id).delete() prereg_user.delete() clear_scheduled_invitation_emails(email) notify_invites_changed(prereg_user) def do_revoke_multi_use_invite(multiuse_invite: MultiuseInvite) -> None: content_type = ContentType.objects.get_for_model(MultiuseInvite) Confirmation.objects.filter(content_type=content_type, object_id=multiuse_invite.id).delete() multiuse_invite.delete() notify_invites_changed(multiuse_invite.referred_by) def do_resend_user_invite_email(prereg_user: PreregistrationUser) -> int: # These are two structurally for the caller's code path. assert prereg_user.referred_by is not None assert prereg_user.realm is not None check_invite_limit(prereg_user.referred_by.realm, 1) prereg_user.invited_at = timezone_now() prereg_user.save() do_increment_logging_stat(prereg_user.realm, COUNT_STATS['invites_sent::day'], None, prereg_user.invited_at) clear_scheduled_invitation_emails(prereg_user.email) # We don't store the custom email body, so just set it to None event = {"prereg_id": prereg_user.id, "referrer_id": prereg_user.referred_by.id, "email_body": None} queue_json_publish("invites", event) return datetime_to_timestamp(prereg_user.invited_at) def notify_realm_emoji(realm: Realm) -> None: event = dict(type="realm_emoji", op="update", realm_emoji=realm.get_emoji()) send_event(realm, event, active_user_ids(realm.id)) def check_add_realm_emoji(realm: Realm, name: str, author: UserProfile, image_file: File) -> Optional[RealmEmoji]: realm_emoji = RealmEmoji(realm=realm, name=name, author=author) realm_emoji.full_clean() realm_emoji.save() emoji_file_name = get_emoji_file_name(image_file.name, realm_emoji.id) # The only user-controlled portion of 'emoji_file_name' is an extension, # which can not contain '..' or '/' or '\', making it difficult to exploit emoji_file_name = mark_sanitized(emoji_file_name) emoji_uploaded_successfully = False try: upload_emoji_image(image_file, emoji_file_name, author) emoji_uploaded_successfully = True finally: if not emoji_uploaded_successfully: realm_emoji.delete() return None else: realm_emoji.file_name = emoji_file_name realm_emoji.save(update_fields=['file_name']) notify_realm_emoji(realm_emoji.realm) return realm_emoji def do_remove_realm_emoji(realm: Realm, name: str) -> None: emoji = RealmEmoji.objects.get(realm=realm, name=name, deactivated=False) emoji.deactivated = True emoji.save(update_fields=['deactivated']) notify_realm_emoji(realm) def notify_alert_words(user_profile: UserProfile, words: Iterable[str]) -> None: event = dict(type="alert_words", alert_words=words) send_event(user_profile.realm, event, [user_profile.id]) def do_add_alert_words(user_profile: UserProfile, alert_words: Iterable[str]) -> None: words = add_user_alert_words(user_profile, alert_words) notify_alert_words(user_profile, words) def do_remove_alert_words(user_profile: UserProfile, alert_words: Iterable[str]) -> None: words = remove_user_alert_words(user_profile, alert_words) notify_alert_words(user_profile, words) def do_mute_topic(user_profile: UserProfile, stream: Stream, recipient: Recipient, topic: str, date_muted: Optional[datetime.datetime]=None) -> None: if date_muted is None: date_muted = timezone_now() add_topic_mute(user_profile, stream.id, recipient.id, topic, date_muted) event = dict(type="muted_topics", muted_topics=get_topic_mutes(user_profile)) send_event(user_profile.realm, event, [user_profile.id]) def do_unmute_topic(user_profile: UserProfile, stream: Stream, topic: str) -> None: remove_topic_mute(user_profile, stream.id, topic) event = dict(type="muted_topics", muted_topics=get_topic_mutes(user_profile)) send_event(user_profile.realm, event, [user_profile.id]) def do_mark_hotspot_as_read(user: UserProfile, hotspot: str) -> None: UserHotspot.objects.get_or_create(user=user, hotspot=hotspot) event = dict(type="hotspots", hotspots=get_next_hotspots(user)) send_event(user.realm, event, [user.id]) def notify_realm_filters(realm: Realm) -> None: realm_filters = realm_filters_for_realm(realm.id) event = dict(type="realm_filters", realm_filters=realm_filters) send_event(realm, event, active_user_ids(realm.id)) # NOTE: Regexes must be simple enough that they can be easily translated to JavaScript # RegExp syntax. In addition to JS-compatible syntax, the following features are available: # * Named groups will be converted to numbered groups automatically # * Inline-regex flags will be stripped, and where possible translated to RegExp-wide flags def do_add_realm_filter(realm: Realm, pattern: str, url_format_string: str) -> int: pattern = pattern.strip() url_format_string = url_format_string.strip() realm_filter = RealmFilter( realm=realm, pattern=pattern, url_format_string=url_format_string) realm_filter.full_clean() realm_filter.save() notify_realm_filters(realm) return realm_filter.id def do_remove_realm_filter(realm: Realm, pattern: Optional[str]=None, id: Optional[int]=None) -> None: if pattern is not None: RealmFilter.objects.get(realm=realm, pattern=pattern).delete() else: RealmFilter.objects.get(realm=realm, pk=id).delete() notify_realm_filters(realm) def get_emails_from_user_ids(user_ids: Sequence[int]) -> Dict[int, str]: # We may eventually use memcached to speed this up, but the DB is fast. return UserProfile.emails_from_ids(user_ids) def do_add_realm_domain(realm: Realm, domain: str, allow_subdomains: bool) -> (RealmDomain): realm_domain = RealmDomain.objects.create(realm=realm, domain=domain, allow_subdomains=allow_subdomains) event = dict(type="realm_domains", op="add", realm_domain=dict(domain=realm_domain.domain, allow_subdomains=realm_domain.allow_subdomains)) send_event(realm, event, active_user_ids(realm.id)) return realm_domain def do_change_realm_domain(realm_domain: RealmDomain, allow_subdomains: bool) -> None: realm_domain.allow_subdomains = allow_subdomains realm_domain.save(update_fields=['allow_subdomains']) event = dict(type="realm_domains", op="change", realm_domain=dict(domain=realm_domain.domain, allow_subdomains=realm_domain.allow_subdomains)) send_event(realm_domain.realm, event, active_user_ids(realm_domain.realm_id)) def do_remove_realm_domain(realm_domain: RealmDomain, acting_user: Optional[UserProfile]=None) -> None: realm = realm_domain.realm domain = realm_domain.domain realm_domain.delete() if RealmDomain.objects.filter(realm=realm).count() == 0 and realm.emails_restricted_to_domains: # If this was the last realm domain, we mark the realm as no # longer restricted to domain, because the feature doesn't do # anything if there are no domains, and this is probably less # confusing than the alternative. do_set_realm_property(realm, 'emails_restricted_to_domains', False, acting_user=acting_user) event = dict(type="realm_domains", op="remove", domain=domain) send_event(realm, event, active_user_ids(realm.id)) def get_occupied_streams(realm: Realm) -> QuerySet: # TODO: Make a generic stub for QuerySet """ Get streams with subscribers """ exists_expression = Exists( Subscription.objects.filter(active=True, user_profile__is_active=True, user_profile__realm=realm, recipient_id=OuterRef('recipient_id')), ) occupied_streams = Stream.objects.filter(realm=realm, deactivated=False) \ .annotate(occupied=exists_expression).filter(occupied=True) return occupied_streams def get_web_public_streams(realm: Realm) -> List[Dict[str, Any]]: query = Stream.objects.filter(realm=realm, deactivated=False, is_web_public=True) streams = Stream.get_client_data(query) return streams def do_get_streams( user_profile: UserProfile, include_public: bool=True, include_subscribed: bool=True, include_all_active: bool=False, include_default: bool=False, include_owner_subscribed: bool=False, ) -> List[Dict[str, Any]]: if include_all_active and not user_profile.is_api_super_user: raise JsonableError(_("User not authorized for this query")) include_public = include_public and user_profile.can_access_public_streams() # Start out with all streams in the realm with subscribers query = get_occupied_streams(user_profile.realm) if include_all_active: streams = Stream.get_client_data(query) else: # We construct a query as the or (|) of the various sources # this user requested streams from. query_filter: Optional[Q] = None def add_filter_option(option: Q) -> None: nonlocal query_filter if query_filter is None: query_filter = option else: query_filter |= option if include_subscribed: subscribed_stream_ids = get_subscribed_stream_ids_for_user(user_profile) recipient_check = Q(id__in=set(subscribed_stream_ids)) add_filter_option(recipient_check) if include_public: invite_only_check = Q(invite_only=False) add_filter_option(invite_only_check) if include_owner_subscribed and user_profile.is_bot: bot_owner = user_profile.bot_owner assert bot_owner is not None owner_stream_ids = get_subscribed_stream_ids_for_user(bot_owner) owner_subscribed_check = Q(id__in=set(owner_stream_ids)) add_filter_option(owner_subscribed_check) if query_filter is not None: query = query.filter(query_filter) streams = Stream.get_client_data(query) else: # Don't bother going to the database with no valid sources streams = [] streams.sort(key=lambda elt: elt["name"]) if include_default: is_default = {} default_streams = get_default_streams_for_realm(user_profile.realm_id) for default_stream in default_streams: is_default[default_stream.id] = True for stream in streams: stream['is_default'] = is_default.get(stream["stream_id"], False) return streams def notify_attachment_update(user_profile: UserProfile, op: str, attachment_dict: Dict[str, Any]) -> None: event = { 'type': 'attachment', 'op': op, 'attachment': attachment_dict, "upload_space_used": user_profile.realm.currently_used_upload_space_bytes(), } send_event(user_profile.realm, event, [user_profile.id]) def do_claim_attachments(message: Message, potential_path_ids: List[str]) -> bool: claimed = False for path_id in potential_path_ids: user_profile = message.sender is_message_realm_public = False if message.is_stream_message(): is_message_realm_public = Stream.objects.get(id=message.recipient.type_id).is_public() if not validate_attachment_request(user_profile, path_id): # Technically, there are 2 cases here: # * The user put something in their message that has the form # of an upload, but doesn't correspond to a file that doesn't # exist. validate_attachment_request will return None. # * The user is trying to send a link to a file they don't have permission to # access themselves. validate_attachment_request will return False. # # Either case is unusual and suggests a UI bug that got # the user in this situation, so we log in these cases. logging.warning( "User %s tried to share upload %s in message %s, but lacks permission", user_profile.id, path_id, message.id, ) continue claimed = True attachment = claim_attachment(user_profile, path_id, message, is_message_realm_public) notify_attachment_update(user_profile, "update", attachment.to_dict()) return claimed def do_delete_old_unclaimed_attachments(weeks_ago: int) -> None: old_unclaimed_attachments = get_old_unclaimed_attachments(weeks_ago) for attachment in old_unclaimed_attachments: delete_message_image(attachment.path_id) attachment.delete() def check_attachment_reference_change(message: Message) -> bool: # For a unsaved message edit (message.* has been updated, but not # saved to the database), adjusts Attachment data to correspond to # the new content. prev_attachments = {a.path_id for a in message.attachment_set.all()} new_attachments = set(message.potential_attachment_path_ids) if new_attachments == prev_attachments: return bool(prev_attachments) to_remove = list(prev_attachments - new_attachments) if len(to_remove) > 0: attachments_to_update = Attachment.objects.filter(path_id__in=to_remove).select_for_update() message.attachment_set.remove(*attachments_to_update) to_add = list(new_attachments - prev_attachments) if len(to_add) > 0: do_claim_attachments(message, to_add) return message.attachment_set.exists() def notify_realm_custom_profile_fields(realm: Realm, operation: str) -> None: fields = custom_profile_fields_for_realm(realm.id) event = dict(type="custom_profile_fields", op=operation, fields=[f.as_dict() for f in fields]) send_event(realm, event, active_user_ids(realm.id)) def try_add_realm_default_custom_profile_field(realm: Realm, field_subtype: str) -> CustomProfileField: field_data = DEFAULT_EXTERNAL_ACCOUNTS[field_subtype] field = CustomProfileField(realm=realm, name=field_data['name'], field_type=CustomProfileField.EXTERNAL_ACCOUNT, hint=field_data['hint'], field_data=ujson.dumps(dict(subtype=field_subtype))) field.save() field.order = field.id field.save(update_fields=['order']) notify_realm_custom_profile_fields(realm, 'add') return field def try_add_realm_custom_profile_field(realm: Realm, name: str, field_type: int, hint: str='', field_data: Optional[ProfileFieldData]=None) -> CustomProfileField: field = CustomProfileField(realm=realm, name=name, field_type=field_type) field.hint = hint if (field.field_type == CustomProfileField.CHOICE or field.field_type == CustomProfileField.EXTERNAL_ACCOUNT): field.field_data = ujson.dumps(field_data or {}) field.save() field.order = field.id field.save(update_fields=['order']) notify_realm_custom_profile_fields(realm, 'add') return field def do_remove_realm_custom_profile_field(realm: Realm, field: CustomProfileField) -> None: """ Deleting a field will also delete the user profile data associated with it in CustomProfileFieldValue model. """ field.delete() notify_realm_custom_profile_fields(realm, 'delete') def do_remove_realm_custom_profile_fields(realm: Realm) -> None: CustomProfileField.objects.filter(realm=realm).delete() def try_update_realm_custom_profile_field(realm: Realm, field: CustomProfileField, name: str, hint: str='', field_data: Optional[ProfileFieldData]=None) -> None: field.name = name field.hint = hint if (field.field_type == CustomProfileField.CHOICE or field.field_type == CustomProfileField.EXTERNAL_ACCOUNT): field.field_data = ujson.dumps(field_data or {}) field.save() notify_realm_custom_profile_fields(realm, 'update') def try_reorder_realm_custom_profile_fields(realm: Realm, order: List[int]) -> None: order_mapping = {_[1]: _[0] for _ in enumerate(order)} fields = CustomProfileField.objects.filter(realm=realm) for field in fields: if field.id not in order_mapping: raise JsonableError(_("Invalid order mapping.")) for field in fields: field.order = order_mapping[field.id] field.save(update_fields=['order']) notify_realm_custom_profile_fields(realm, 'update') def notify_user_update_custom_profile_data(user_profile: UserProfile, field: Dict[str, Union[int, str, List[int], None]]) -> None: data = dict(id=field['id']) if field['type'] == CustomProfileField.USER: data["value"] = ujson.dumps(field['value']) else: data['value'] = field['value'] if field['rendered_value']: data['rendered_value'] = field['rendered_value'] payload = dict(user_id=user_profile.id, custom_profile_field=data) event = dict(type="realm_user", op="update", person=payload) send_event(user_profile.realm, event, active_user_ids(user_profile.realm.id)) def do_update_user_custom_profile_data_if_changed(user_profile: UserProfile, data: List[Dict[str, Union[int, str, List[int]]]], ) -> None: with transaction.atomic(): for field in data: field_value, created = CustomProfileFieldValue.objects.get_or_create( user_profile=user_profile, field_id=field['id']) if not created and field_value.value == str(field['value']): # If the field value isn't actually being changed to a different one, # and always_notify is disabled, we have nothing to do here for this field. # Note: field_value.value is a TextField() so we need to cast field['value'] # to a string for the comparison in this if. continue field_value.value = field['value'] if field_value.field.is_renderable(): field_value.rendered_value = render_stream_description(str(field['value'])) field_value.save(update_fields=['value', 'rendered_value']) else: field_value.save(update_fields=['value']) notify_user_update_custom_profile_data(user_profile, { "id": field_value.field_id, "value": field_value.value, "rendered_value": field_value.rendered_value, "type": field_value.field.field_type}) def check_remove_custom_profile_field_value(user_profile: UserProfile, field_id: int) -> None: try: field = CustomProfileField.objects.get(realm=user_profile.realm, id=field_id) field_value = CustomProfileFieldValue.objects.get(field=field, user_profile=user_profile) field_value.delete() notify_user_update_custom_profile_data(user_profile, {'id': field_id, 'value': None, 'rendered_value': None, 'type': field.field_type}) except CustomProfileField.DoesNotExist: raise JsonableError(_('Field id {id} not found.').format(id=field_id)) except CustomProfileFieldValue.DoesNotExist: pass def do_send_create_user_group_event(user_group: UserGroup, members: List[UserProfile]) -> None: event = dict(type="user_group", op="add", group=dict(name=user_group.name, members=[member.id for member in members], description=user_group.description, id=user_group.id, ), ) send_event(user_group.realm, event, active_user_ids(user_group.realm_id)) def check_add_user_group(realm: Realm, name: str, initial_members: List[UserProfile], description: str) -> None: try: user_group = create_user_group(name, initial_members, realm, description=description) do_send_create_user_group_event(user_group, initial_members) except django.db.utils.IntegrityError: raise JsonableError(_("User group '{}' already exists.").format(name)) def do_send_user_group_update_event(user_group: UserGroup, data: Dict[str, Any]) -> None: event = dict(type="user_group", op='update', group_id=user_group.id, data=data) send_event(user_group.realm, event, active_user_ids(user_group.realm_id)) def do_update_user_group_name(user_group: UserGroup, name: str) -> None: try: user_group.name = name user_group.save(update_fields=['name']) except django.db.utils.IntegrityError: raise JsonableError(_("User group '{}' already exists.").format(name)) do_send_user_group_update_event(user_group, dict(name=name)) def do_update_user_group_description(user_group: UserGroup, description: str) -> None: user_group.description = description user_group.save(update_fields=['description']) do_send_user_group_update_event(user_group, dict(description=description)) def do_update_outgoing_webhook_service(bot_profile: UserProfile, service_interface: int, service_payload_url: str) -> None: # TODO: First service is chosen because currently one bot can only have one service. # Update this once multiple services are supported. service = get_bot_services(bot_profile.id)[0] service.base_url = service_payload_url service.interface = service_interface service.save() send_event(bot_profile.realm, dict(type='realm_bot', op='update', bot=dict(user_id=bot_profile.id, services = [dict(base_url=service.base_url, interface=service.interface, token=service.token)], ), ), bot_owner_user_ids(bot_profile)) def do_update_bot_config_data(bot_profile: UserProfile, config_data: Dict[str, str]) -> None: for key, value in config_data.items(): set_bot_config(bot_profile, key, value) updated_config_data = get_bot_config(bot_profile) send_event(bot_profile.realm, dict(type='realm_bot', op='update', bot=dict(user_id=bot_profile.id, services = [dict(config_data=updated_config_data)], ), ), bot_owner_user_ids(bot_profile)) def get_service_dicts_for_bot(user_profile_id: int) -> List[Dict[str, Any]]: user_profile = get_user_profile_by_id(user_profile_id) services = get_bot_services(user_profile_id) service_dicts: List[Dict[str, Any]] = [] if user_profile.bot_type == UserProfile.OUTGOING_WEBHOOK_BOT: service_dicts = [{'base_url': service.base_url, 'interface': service.interface, 'token': service.token, } for service in services] elif user_profile.bot_type == UserProfile.EMBEDDED_BOT: try: service_dicts = [{'config_data': get_bot_config(user_profile), 'service_name': services[0].name, }] # A ConfigError just means that there are no config entries for user_profile. except ConfigError: pass return service_dicts def get_service_dicts_for_bots(bot_dicts: List[Dict[str, Any]], realm: Realm) -> Dict[int, List[Dict[str, Any]]]: bot_profile_ids = [bot_dict['id'] for bot_dict in bot_dicts] bot_services_by_uid: Dict[int, List[Service]] = defaultdict(list) for service in Service.objects.filter(user_profile_id__in=bot_profile_ids): bot_services_by_uid[service.user_profile_id].append(service) embedded_bot_ids = [bot_dict['id'] for bot_dict in bot_dicts if bot_dict['bot_type'] == UserProfile.EMBEDDED_BOT] embedded_bot_configs = get_bot_configs(embedded_bot_ids) service_dicts_by_uid: Dict[int, List[Dict[str, Any]]] = {} for bot_dict in bot_dicts: bot_profile_id = bot_dict["id"] bot_type = bot_dict["bot_type"] services = bot_services_by_uid[bot_profile_id] service_dicts: List[Dict[str, Any]] = [] if bot_type == UserProfile.OUTGOING_WEBHOOK_BOT: service_dicts = [{'base_url': service.base_url, 'interface': service.interface, 'token': service.token, } for service in services] elif bot_type == UserProfile.EMBEDDED_BOT: if bot_profile_id in embedded_bot_configs.keys(): bot_config = embedded_bot_configs[bot_profile_id] service_dicts = [{'config_data': bot_config, 'service_name': services[0].name, }] service_dicts_by_uid[bot_profile_id] = service_dicts return service_dicts_by_uid def get_owned_bot_dicts(user_profile: UserProfile, include_all_realm_bots_if_admin: bool=True) -> List[Dict[str, Any]]: if user_profile.is_realm_admin and include_all_realm_bots_if_admin: result = get_bot_dicts_in_realm(user_profile.realm) else: result = UserProfile.objects.filter(realm=user_profile.realm, is_bot=True, bot_owner=user_profile).values(*bot_dict_fields) services_by_ids = get_service_dicts_for_bots(result, user_profile.realm) return [{'email': botdict['email'], 'user_id': botdict['id'], 'full_name': botdict['full_name'], 'bot_type': botdict['bot_type'], 'is_active': botdict['is_active'], 'api_key': botdict['api_key'], 'default_sending_stream': botdict['default_sending_stream__name'], 'default_events_register_stream': botdict['default_events_register_stream__name'], 'default_all_public_streams': botdict['default_all_public_streams'], 'owner_id': botdict['bot_owner__id'], 'avatar_url': avatar_url_from_dict(botdict), 'services': services_by_ids[botdict['id']], } for botdict in result] def do_send_user_group_members_update_event(event_name: str, user_group: UserGroup, user_ids: List[int]) -> None: event = dict(type="user_group", op=event_name, group_id=user_group.id, user_ids=user_ids) send_event(user_group.realm, event, active_user_ids(user_group.realm_id)) def bulk_add_members_to_user_group(user_group: UserGroup, user_profiles: List[UserProfile]) -> None: memberships = [UserGroupMembership(user_group_id=user_group.id, user_profile=user_profile) for user_profile in user_profiles] UserGroupMembership.objects.bulk_create(memberships) user_ids = [up.id for up in user_profiles] do_send_user_group_members_update_event('add_members', user_group, user_ids) def remove_members_from_user_group(user_group: UserGroup, user_profiles: List[UserProfile]) -> None: UserGroupMembership.objects.filter( user_group_id=user_group.id, user_profile__in=user_profiles).delete() user_ids = [up.id for up in user_profiles] do_send_user_group_members_update_event('remove_members', user_group, user_ids) def do_send_delete_user_group_event(realm: Realm, user_group_id: int, realm_id: int) -> None: event = dict(type="user_group", op="remove", group_id=user_group_id) send_event(realm, event, active_user_ids(realm_id)) def check_delete_user_group(user_group_id: int, user_profile: UserProfile) -> None: user_group = access_user_group_by_id(user_group_id, user_profile) user_group.delete() do_send_delete_user_group_event(user_profile.realm, user_group_id, user_profile.realm.id) def do_send_realm_reactivation_email(realm: Realm) -> None: url = create_confirmation_link(realm, Confirmation.REALM_REACTIVATION) context = {'confirmation_url': url, 'realm_uri': realm.uri, 'realm_name': realm.name} language = realm.default_language send_email_to_admins( 'zerver/emails/realm_reactivation', realm, from_address=FromAddress.tokenized_no_reply_address(), from_name=FromAddress.security_email_from_name(language=language), language=language, context=context) def do_set_zoom_token(user: UserProfile, token: Optional[Dict[str, object]]) -> None: user.zoom_token = token user.save(update_fields=["zoom_token"]) send_event( user.realm, dict(type="has_zoom_token", value=token is not None), [user.id], ) def notify_realm_export(user_profile: UserProfile) -> None: # In the future, we may want to send this event to all realm admins. event = dict(type='realm_export', exports=get_realm_exports_serialized(user_profile)) send_event(user_profile.realm, event, [user_profile.id]) def do_delete_realm_export(user_profile: UserProfile, export: RealmAuditLog) -> None: # Give mypy a hint so it knows `ujson.loads` # isn't being passed an `Optional[str]`. export_extra_data = export.extra_data assert export_extra_data is not None export_data = ujson.loads(export_extra_data) export_path = export_data.get('export_path') if export_path: # Allow removal even if the export failed. delete_export_tarball(export_path) export_data.update({'deleted_timestamp': timezone_now().timestamp()}) export.extra_data = ujson.dumps(export_data) export.save(update_fields=['extra_data']) notify_realm_export(user_profile) def get_topic_messages(user_profile: UserProfile, stream: Stream, topic_name: str) -> List[Message]: query = UserMessage.objects.filter( user_profile=user_profile, message__recipient=stream.recipient, ).order_by("id") return [um.message for um in filter_by_topic_name_via_message(query, topic_name)]
add_and_switch_to_new_task
Adds a new task to an existing TARS model. Sets necessary attributes and finally 'switches' to the new task. Parameters are similar to the constructor except for model choice, batch size and negative sampling. This method does not store the resultant model onto disk. :param task_name: a string depicting the name of the task :param label_dictionary: dictionary of the labels you want to predict :param label_type: string to identify the label type ('ner', 'sentiment', etc.) :param multi_label: whether this task is a multi-label prediction problem :param force_switch: if True, will overwrite existing task with same name
import logging from collections import OrderedDict from pathlib import Path from typing import List, Optional, Set, Tuple, Union import numpy as np import torch from sklearn.metrics.pairwise import cosine_similarity from sklearn.preprocessing import minmax_scale from tqdm import tqdm import flair from flair.data import Dictionary, Sentence, Span, SpanLabel from flair.datasets import DataLoader, FlairDatapointDataset from flair.embeddings import ( TokenEmbeddings, TransformerDocumentEmbeddings, TransformerWordEmbeddings, ) from flair.file_utils import cached_path from flair.models.sequence_tagger_model import SequenceTagger from flair.models.text_classification_model import TextClassifier from flair.training_utils import store_embeddings log = logging.getLogger("flair") class FewshotClassifier(flair.nn.Classifier[Sentence]): def __init__(self): self._current_task = None self._task_specific_attributes = {} self.label_nearest_map = None self.tars_model: flair.nn.Classifier[Sentence] super(FewshotClassifier, self).__init__() def forward_loss( self, data_points: Union[List[Sentence], Sentence] ) -> Union[torch.Tensor, Tuple[torch.Tensor, int]]: if not isinstance(data_points, list): data_points = [data_points] # Transform input data into TARS format sentences = self._get_tars_formatted_sentences(data_points) loss = self.tars_model.forward_loss(sentences) return loss @property def tars_embeddings(self): raise NotImplementedError def _get_tars_formatted_sentence(self, label, sentence): raise NotImplementedError def _get_tars_formatted_sentences(self, sentences: List[Sentence]): label_text_pairs = [] all_labels = [label.decode("utf-8") for label in self.get_current_label_dictionary().idx2item] for sentence in sentences: label_text_pairs_for_sentence = [] if self.training and self.num_negative_labels_to_sample is not None: positive_labels = list( OrderedDict.fromkeys([label.value for label in sentence.get_labels(self.label_type)]) ) sampled_negative_labels = self._get_nearest_labels_for(positive_labels) for label in positive_labels: label_text_pairs_for_sentence.append(self._get_tars_formatted_sentence(label, sentence)) for label in sampled_negative_labels: label_text_pairs_for_sentence.append(self._get_tars_formatted_sentence(label, sentence)) else: for label in all_labels: label_text_pairs_for_sentence.append(self._get_tars_formatted_sentence(label, sentence)) label_text_pairs.extend(label_text_pairs_for_sentence) return label_text_pairs def _get_nearest_labels_for(self, labels): # if there are no labels, return a random sample as negatives if len(labels) == 0: tags = self.get_current_label_dictionary().get_items() import random sample = random.sample(tags, k=self.num_negative_labels_to_sample) return sample already_sampled_negative_labels = set() # otherwise, go through all labels for label in labels: plausible_labels = [] plausible_label_probabilities = [] for plausible_label in self.label_nearest_map[label]: if plausible_label in already_sampled_negative_labels or plausible_label in labels: continue else: plausible_labels.append(plausible_label) plausible_label_probabilities.append(self.label_nearest_map[label][plausible_label]) # make sure the probabilities always sum up to 1 plausible_label_probabilities = np.array(plausible_label_probabilities, dtype="float64") plausible_label_probabilities += 1e-08 plausible_label_probabilities /= np.sum(plausible_label_probabilities) if len(plausible_labels) > 0: num_samples = min(self.num_negative_labels_to_sample, len(plausible_labels)) sampled_negative_labels = np.random.choice( plausible_labels, num_samples, replace=False, p=plausible_label_probabilities, ) already_sampled_negative_labels.update(sampled_negative_labels) return already_sampled_negative_labels def train(self, mode=True): """Populate label similarity map based on cosine similarity before running epoch If the `num_negative_labels_to_sample` is set to an integer value then before starting each epoch the model would create a similarity measure between the label names based on cosine distances between their BERT encoded embeddings. """ if mode and self.num_negative_labels_to_sample is not None: self._compute_label_similarity_for_current_epoch() super().train(mode) super().train(mode) def _compute_label_similarity_for_current_epoch(self): """ Compute the similarity between all labels for better sampling of negatives """ # get and embed all labels by making a Sentence object that contains only the label text all_labels = [label.decode("utf-8") for label in self.get_current_label_dictionary().idx2item] label_sentences = [Sentence(label) for label in all_labels] self.tars_embeddings.eval() # TODO: check if this is necessary self.tars_embeddings.embed(label_sentences) self.tars_embeddings.train() # get each label embedding and scale between 0 and 1 if isinstance(self.tars_embeddings, TokenEmbeddings): encodings_np = [sentence[0].get_embedding().cpu().detach().numpy() for sentence in label_sentences] else: encodings_np = [sentence.get_embedding().cpu().detach().numpy() for sentence in label_sentences] normalized_encoding = minmax_scale(encodings_np) # compute similarity matrix similarity_matrix = cosine_similarity(normalized_encoding) # the higher the similarity, the greater the chance that a label is # sampled as negative example negative_label_probabilities = {} for row_index, label in enumerate(all_labels): negative_label_probabilities[label] = {} for column_index, other_label in enumerate(all_labels): if label != other_label: negative_label_probabilities[label][other_label] = similarity_matrix[row_index][column_index] self.label_nearest_map = negative_label_probabilities def get_current_label_dictionary(self): label_dictionary = self._task_specific_attributes[self._current_task]["label_dictionary"] return label_dictionary def get_current_label_type(self): return self._task_specific_attributes[self._current_task]["label_type"] def is_current_task_multi_label(self): return self._task_specific_attributes[self._current_task]["multi_label"] # MASKED: add_and_switch_to_new_task function (lines 180-224) def list_existing_tasks(self) -> Set[str]: """ Lists existing tasks in the loaded TARS model on the console. """ return set(self._task_specific_attributes.keys()) def switch_to_task(self, task_name): """ Switches to a task which was previously added. """ if task_name not in self._task_specific_attributes: log.error( "Provided `%s` does not exist in the model. Consider calling " "`add_and_switch_to_new_task` first.", task_name, ) else: self._current_task = task_name def _drop_task(self, task_name): if task_name in self._task_specific_attributes: if self._current_task == task_name: log.error( "`%s` is the current task." " Switch to some other task before dropping this.", task_name, ) else: self._task_specific_attributes.pop(task_name) else: log.warning("No task exists with the name `%s`.", task_name) @staticmethod def _filter_empty_sentences(sentences: List[Sentence]) -> List[Sentence]: filtered_sentences = [sentence for sentence in sentences if sentence.tokens] if len(sentences) != len(filtered_sentences): log.warning(f"Ignore {len(sentences) - len(filtered_sentences)} sentence(s) with no tokens.") return filtered_sentences @property def label_type(self): return self.get_current_label_type() def predict_zero_shot( self, sentences: Union[List[Sentence], Sentence], candidate_label_set: Union[List[str], Set[str], str], multi_label: bool = True, ): """ Method to make zero shot predictions from the TARS model :param sentences: input sentence objects to classify :param candidate_label_set: set of candidate labels :param multi_label: indicates whether multi-label or single class prediction. Defaults to True. """ # check if candidate_label_set is empty if candidate_label_set is None or len(candidate_label_set) == 0: log.warning("Provided candidate_label_set is empty") return # make list if only one candidate label is passed if isinstance(candidate_label_set, str): candidate_label_set = {candidate_label_set} # create label dictionary label_dictionary = Dictionary(add_unk=False) for label in candidate_label_set: label_dictionary.add_item(label) # note current task existing_current_task = self._current_task # create a temporary task self.add_and_switch_to_new_task( task_name="ZeroShot", label_dictionary=label_dictionary, label_type="-".join(label_dictionary.get_items()), multi_label=multi_label, ) try: # make zero shot predictions self.predict(sentences) finally: # switch to the pre-existing task self.switch_to_task(existing_current_task) self._drop_task("ZeroShot") return class TARSTagger(FewshotClassifier): """ TARS model for sequence tagging. In the backend, the model uses a BERT based 5-class sequence labeler which given a <label, text> pair predicts the probability for each word to belong to one of the BIOES classes. The input data is a usual Sentence object which is inflated by the model internally before pushing it through the transformer stack of BERT. """ static_label_type = "tars_label" def __init__( self, task_name: Optional[str] = None, label_dictionary: Optional[Dictionary] = None, label_type: Optional[str] = None, embeddings: Union[TransformerWordEmbeddings, str] = "bert-base-uncased", num_negative_labels_to_sample: int = 2, prefix: bool = True, **tagger_args, ): """ Initializes a TextClassifier :param task_name: a string depicting the name of the task :param label_dictionary: dictionary of labels you want to predict :param embeddings: name of the pre-trained transformer model e.g., 'bert-base-uncased' etc :param num_negative_labels_to_sample: number of negative labels to sample for each positive labels against a sentence during training. Defaults to 2 negative labels for each positive label. The model would sample all the negative labels if None is passed. That slows down the training considerably. """ super(TARSTagger, self).__init__() if isinstance(embeddings, str): embeddings = TransformerWordEmbeddings( model=embeddings, fine_tune=True, layers="-1", layer_mean=False, ) # prepare TARS dictionary tars_dictionary = Dictionary(add_unk=False) tars_dictionary.add_item("entity") tars_dictionary.span_labels = True # initialize a bare-bones sequence tagger self.tars_model: SequenceTagger = SequenceTagger( hidden_size=123, embeddings=embeddings, tag_dictionary=tars_dictionary, tag_type=self.static_label_type, use_crf=False, use_rnn=False, reproject_embeddings=False, **tagger_args, ) # transformer separator self.separator = str(self.tars_embeddings.tokenizer.sep_token) if self.tars_embeddings.tokenizer._bos_token: self.separator += str(self.tars_embeddings.tokenizer.bos_token) self.prefix = prefix self.num_negative_labels_to_sample = num_negative_labels_to_sample if task_name and label_dictionary and label_type: # Store task specific labels since TARS can handle multiple tasks self.add_and_switch_to_new_task(task_name, label_dictionary, label_type) else: log.info( "TARS initialized without a task. You need to call .add_and_switch_to_new_task() " "before training this model" ) def _get_tars_formatted_sentence(self, label, sentence): original_text = sentence.to_tokenized_string() label_text_pair = ( f"{label} {self.separator} {original_text}" if self.prefix else f"{original_text} {self.separator} {label}" ) label_length = 0 if not self.prefix else len(label.split(" ")) + len(self.separator.split(" ")) # make a tars sentence where all labels are O by default tars_sentence = Sentence(label_text_pair, use_tokenizer=False) for entity_label in sentence.get_labels(self.label_type): if entity_label.value == label: new_span = [tars_sentence.get_token(token.idx + label_length) for token in entity_label.span] tars_sentence.add_complex_label(self.static_label_type, SpanLabel(Span(new_span), value="entity")) return tars_sentence def _get_state_dict(self): model_state = { "state_dict": self.state_dict(), "current_task": self._current_task, "tag_type": self.get_current_label_type(), "tag_dictionary": self.get_current_label_dictionary(), "tars_model": self.tars_model, "num_negative_labels_to_sample": self.num_negative_labels_to_sample, "prefix": self.prefix, "task_specific_attributes": self._task_specific_attributes, } return model_state @staticmethod def _fetch_model(model_name) -> str: if model_name == "tars-ner": cache_dir = Path("models") model_name = cached_path( "https://nlp.informatik.hu-berlin.de/resources/models/tars-ner/tars-ner.pt", cache_dir=cache_dir, ) return model_name @staticmethod def _init_model_with_state_dict(state): # init new TARS classifier model = TARSTagger( task_name=state["current_task"], label_dictionary=state["tag_dictionary"], label_type=state["tag_type"], embeddings=state["tars_model"].embeddings, num_negative_labels_to_sample=state["num_negative_labels_to_sample"], prefix=state["prefix"], ) # set all task information model._task_specific_attributes = state["task_specific_attributes"] # linear layers of internal classifier model.load_state_dict(state["state_dict"]) return model @property def tars_embeddings(self): return self.tars_model.embeddings def predict( self, sentences: Union[List[Sentence], Sentence], mini_batch_size=32, return_probabilities_for_all_classes: bool = False, verbose: bool = False, label_name: Optional[str] = None, return_loss=False, embedding_storage_mode="none", most_probable_first: bool = True, ): # return """ Predict sequence tags for Named Entity Recognition task :param sentences: a Sentence or a List of Sentence :param mini_batch_size: size of the minibatch, usually bigger is more rapid but consume more memory, up to a point when it has no more effect. :param all_tag_prob: True to compute the score for each tag on each token, otherwise only the score of the best tag is returned :param verbose: set to True to display a progress bar :param return_loss: set to True to return loss :param label_name: set this to change the name of the label type that is predicted :param embedding_storage_mode: default is 'none' which is always best. Only set to 'cpu' or 'gpu' if you wish to not only predict, but also keep the generated embeddings in CPU or GPU memory respectively. 'gpu' to store embeddings in GPU memory. """ if label_name is None: label_name = self.get_current_label_type() # with torch.no_grad(): if not sentences: return sentences if not isinstance(sentences, list): sentences = [sentences] reordered_sentences = sorted(sentences, key=lambda s: len(s), reverse=True) dataloader = DataLoader( dataset=FlairDatapointDataset(reordered_sentences), batch_size=mini_batch_size, ) # progress bar for verbosity if verbose: dataloader = tqdm(dataloader) overall_loss = 0 overall_count = 0 with torch.no_grad(): for batch in dataloader: batch = self._filter_empty_sentences(batch) # stop if all sentences are empty if not batch: continue # go through each sentence in the batch for sentence in batch: # always remove tags first sentence.remove_labels(label_name) all_labels = [label.decode("utf-8") for label in self.get_current_label_dictionary().idx2item] all_detected = {} for label in all_labels: tars_sentence = self._get_tars_formatted_sentence(label, sentence) loss_and_count = self.tars_model.predict( tars_sentence, label_name=label_name, return_loss=True, ) overall_loss += loss_and_count[0].item() overall_count += loss_and_count[1] for predicted in tars_sentence.get_labels(label_name): predicted.value = label all_detected[predicted] = predicted.score if most_probable_first: import operator already_set_indices: List[int] = [] sorted_x = sorted(all_detected.items(), key=operator.itemgetter(1)) sorted_x.reverse() for tuple in sorted_x: # get the span and its label label = tuple[0] # label = span.get_labels("tars_temp_label")[0].value label_length = ( 0 if not self.prefix else len(label.value.split(" ")) + len(self.separator.split(" ")) ) # determine whether tokens in this span already have a label tag_this = True for token in label.span: corresponding_token = sentence.get_token(token.idx - label_length) if corresponding_token is None: tag_this = False continue if token.idx in already_set_indices: tag_this = False continue # only add if all tokens have no label if tag_this: already_set_indices.extend(token.idx for token in label.span) predicted_span = [sentence.get_token(token.idx - label_length) for token in label.span] sentence.add_complex_label( label_name, label=SpanLabel(Span(predicted_span), value=label.value, score=label.score), ) # clearing token embeddings to save memory store_embeddings(batch, storage_mode=embedding_storage_mode) if return_loss: return overall_loss, overall_count class TARSClassifier(FewshotClassifier): """ TARS model for text classification. In the backend, the model uses a BERT based binary text classifier which given a <label, text> pair predicts the probability of two classes "True", and "False". The input data is a usual Sentence object which is inflated by the model internally before pushing it through the transformer stack of BERT. """ static_label_type = "tars_label" LABEL_MATCH = "YES" LABEL_NO_MATCH = "NO" def __init__( self, task_name: Optional[str] = None, label_dictionary: Optional[Dictionary] = None, label_type: Optional[str] = None, embeddings: Union[TransformerDocumentEmbeddings, str] = "bert-base-uncased", num_negative_labels_to_sample: int = 2, prefix: bool = True, **tagger_args, ): """ Initializes a TextClassifier :param task_name: a string depicting the name of the task :param label_dictionary: dictionary of labels you want to predict :param embeddings: name of the pre-trained transformer model e.g., 'bert-base-uncased' etc :param num_negative_labels_to_sample: number of negative labels to sample for each positive labels against a sentence during training. Defaults to 2 negative labels for each positive label. The model would sample all the negative labels if None is passed. That slows down the training considerably. :param multi_label: auto-detected by default, but you can set this to True to force multi-label predictionor False to force single-label prediction :param multi_label_threshold: If multi-label you can set the threshold to make predictions :param beta: Parameter for F-beta score for evaluation and training annealing """ super(TARSClassifier, self).__init__() if isinstance(embeddings, str): embeddings = TransformerDocumentEmbeddings( model=embeddings, fine_tune=True, layers="-1", layer_mean=False, ) # prepare TARS dictionary tars_dictionary = Dictionary(add_unk=False) tars_dictionary.add_item(self.LABEL_NO_MATCH) tars_dictionary.add_item(self.LABEL_MATCH) # initialize a bare-bones sequence tagger self.tars_model = TextClassifier( document_embeddings=embeddings, label_dictionary=tars_dictionary, label_type=self.static_label_type, **tagger_args, ) # transformer separator self.separator = str(self.tars_embeddings.tokenizer.sep_token) if self.tars_embeddings.tokenizer._bos_token: self.separator += str(self.tars_embeddings.tokenizer.bos_token) self.prefix = prefix self.num_negative_labels_to_sample = num_negative_labels_to_sample if task_name and label_dictionary and label_type: # Store task specific labels since TARS can handle multiple tasks self.add_and_switch_to_new_task(task_name, label_dictionary, label_type) else: log.info( "TARS initialized without a task. You need to call .add_and_switch_to_new_task() " "before training this model" ) self.clean_up_labels = True def _clean(self, label_value: str) -> str: if self.clean_up_labels: return label_value.replace("_", " ") else: return label_value def _get_tars_formatted_sentence(self, label, sentence): label = self._clean(label) original_text = sentence.to_tokenized_string() label_text_pair = ( f"{label} {self.separator} {original_text}" if self.prefix else f"{original_text} {self.separator} {label}" ) sentence_labels = [self._clean(label.value) for label in sentence.get_labels(self.get_current_label_type())] tars_label = self.LABEL_MATCH if label in sentence_labels else self.LABEL_NO_MATCH tars_sentence = Sentence(label_text_pair, use_tokenizer=False).add_label(self.static_label_type, tars_label) return tars_sentence def _get_state_dict(self): model_state = { "state_dict": self.state_dict(), "current_task": self._current_task, "label_type": self.get_current_label_type(), "label_dictionary": self.get_current_label_dictionary(), "tars_model": self.tars_model, "num_negative_labels_to_sample": self.num_negative_labels_to_sample, "task_specific_attributes": self._task_specific_attributes, } return model_state @staticmethod def _init_model_with_state_dict(state): # init new TARS classifier label_dictionary = state["label_dictionary"] label_type = "default_label" if not state["label_type"] else state["label_type"] model: TARSClassifier = TARSClassifier( task_name=state["current_task"], label_dictionary=label_dictionary, label_type=label_type, embeddings=state["tars_model"].document_embeddings, num_negative_labels_to_sample=state["num_negative_labels_to_sample"], ) # set all task information model._task_specific_attributes = state["task_specific_attributes"] # linear layers of internal classifier model.load_state_dict(state["state_dict"]) return model @staticmethod def _fetch_model(model_name) -> str: model_map = {} hu_path: str = "https://nlp.informatik.hu-berlin.de/resources/models" model_map["tars-base"] = "/".join([hu_path, "tars-base", "tars-base-v8.pt"]) cache_dir = Path("models") if model_name in model_map: model_name = cached_path(model_map[model_name], cache_dir=cache_dir) return model_name @property def tars_embeddings(self): return self.tars_model.document_embeddings def predict( self, sentences: Union[List[Sentence], Sentence], mini_batch_size=32, return_probabilities_for_all_classes: bool = False, verbose: bool = False, label_name: Optional[str] = None, return_loss=False, embedding_storage_mode="none", label_threshold: float = 0.5, multi_label: Optional[bool] = None, ): """ Predict sequence tags for Named Entity Recognition task :param sentences: a Sentence or a List of Sentence :param mini_batch_size: size of the minibatch, usually bigger is more rapid but consume more memory, up to a point when it has no more effect. :param all_tag_prob: True to compute the score for each tag on each token, otherwise only the score of the best tag is returned :param verbose: set to True to display a progress bar :param return_loss: set to True to return loss :param label_name: set this to change the name of the label type that is predicted :param embedding_storage_mode: default is 'none' which is always best. Only set to 'cpu' or 'gpu' if you wish to not only predict, but also keep the generated embeddings in CPU or GPU memory respectively. 'gpu' to store embeddings in GPU memory. """ if label_name is None: label_name = self.get_current_label_type() if multi_label is None: multi_label = self.is_current_task_multi_label() # with torch.no_grad(): if not sentences: return sentences if isinstance(sentences, Sentence): sentences = [sentences] # set context if not set already previous_sentence = None for sentence in sentences: if sentence.is_context_set(): continue sentence._previous_sentence = previous_sentence sentence._next_sentence = None if previous_sentence: previous_sentence._next_sentence = sentence previous_sentence = sentence reordered_sentences = sorted(sentences, key=lambda s: len(s), reverse=True) dataloader = DataLoader( dataset=FlairDatapointDataset(reordered_sentences), batch_size=mini_batch_size, ) # progress bar for verbosity if verbose: progressbar = tqdm(dataloader) progressbar.set_description("Batch inference") dataloader = progressbar overall_loss = 0 overall_count = 0 batch_no = 0 with torch.no_grad(): for batch in dataloader: batch_no += 1 batch = self._filter_empty_sentences(batch) # stop if all sentences are empty if not batch: continue # go through each sentence in the batch for sentence in batch: # always remove tags first sentence.remove_labels(label_name) all_labels = [label.decode("utf-8") for label in self.get_current_label_dictionary().idx2item] best_label = None for label in all_labels: tars_sentence = self._get_tars_formatted_sentence(label, sentence) loss_and_count = self.tars_model.predict( tars_sentence, label_name=label_name, return_loss=True, return_probabilities_for_all_classes=True if label_threshold < 0.5 else False, ) overall_loss += loss_and_count[0].item() overall_count += loss_and_count[1] # add all labels that according to TARS match the text and are above threshold for predicted_tars_label in tars_sentence.get_labels(label_name): if ( predicted_tars_label.value == self.LABEL_MATCH and predicted_tars_label.score > label_threshold ): # do not add labels below confidence threshold sentence.add_label(label_name, label, predicted_tars_label.score) # only use label with highest confidence if enforcing single-label predictions if not multi_label: if len(sentence.get_labels(label_name)) > 0: # get all label scores and do an argmax to get the best label label_scores = torch.tensor( [label.score for label in sentence.get_labels(label_name)], dtype=torch.float, ) best_label = sentence.get_labels(label_name)[torch.argmax(label_scores)] # remove previously added labels and only add the best label sentence.remove_labels(label_name) sentence.add_label( typename=label_name, value=best_label.value, score=best_label.score, ) # clearing token embeddings to save memory store_embeddings(batch, storage_mode=embedding_storage_mode) if return_loss: return overall_loss, overall_count
def add_and_switch_to_new_task( self, task_name, label_dictionary: Union[List, Set, Dictionary, str], label_type: str, multi_label: bool = True, force_switch: bool = False, ): """ Adds a new task to an existing TARS model. Sets necessary attributes and finally 'switches' to the new task. Parameters are similar to the constructor except for model choice, batch size and negative sampling. This method does not store the resultant model onto disk. :param task_name: a string depicting the name of the task :param label_dictionary: dictionary of the labels you want to predict :param label_type: string to identify the label type ('ner', 'sentiment', etc.) :param multi_label: whether this task is a multi-label prediction problem :param force_switch: if True, will overwrite existing task with same name """ if task_name in self._task_specific_attributes and not force_switch: log.warning("Task `%s` already exists in TARS model. Switching to it.", task_name) else: # make label dictionary if no Dictionary object is passed if isinstance(label_dictionary, Dictionary): label_dictionary = label_dictionary.get_items() if type(label_dictionary) == str: label_dictionary = [label_dictionary] # prepare dictionary of tags (without B- I- prefixes and without UNK) tag_dictionary = Dictionary(add_unk=False) for tag in label_dictionary: if tag == "<unk>" or tag == "O": continue if tag[1] == "-": tag = tag[2:] tag_dictionary.add_item(tag) else: tag_dictionary.add_item(tag) self._task_specific_attributes[task_name] = { "label_dictionary": tag_dictionary, "label_type": label_type, "multi_label": multi_label, } self.switch_to_task(task_name)
180
224
import logging from collections import OrderedDict from pathlib import Path from typing import List, Optional, Set, Tuple, Union import numpy as np import torch from sklearn.metrics.pairwise import cosine_similarity from sklearn.preprocessing import minmax_scale from tqdm import tqdm import flair from flair.data import Dictionary, Sentence, Span, SpanLabel from flair.datasets import DataLoader, FlairDatapointDataset from flair.embeddings import ( TokenEmbeddings, TransformerDocumentEmbeddings, TransformerWordEmbeddings, ) from flair.file_utils import cached_path from flair.models.sequence_tagger_model import SequenceTagger from flair.models.text_classification_model import TextClassifier from flair.training_utils import store_embeddings log = logging.getLogger("flair") class FewshotClassifier(flair.nn.Classifier[Sentence]): def __init__(self): self._current_task = None self._task_specific_attributes = {} self.label_nearest_map = None self.tars_model: flair.nn.Classifier[Sentence] super(FewshotClassifier, self).__init__() def forward_loss( self, data_points: Union[List[Sentence], Sentence] ) -> Union[torch.Tensor, Tuple[torch.Tensor, int]]: if not isinstance(data_points, list): data_points = [data_points] # Transform input data into TARS format sentences = self._get_tars_formatted_sentences(data_points) loss = self.tars_model.forward_loss(sentences) return loss @property def tars_embeddings(self): raise NotImplementedError def _get_tars_formatted_sentence(self, label, sentence): raise NotImplementedError def _get_tars_formatted_sentences(self, sentences: List[Sentence]): label_text_pairs = [] all_labels = [label.decode("utf-8") for label in self.get_current_label_dictionary().idx2item] for sentence in sentences: label_text_pairs_for_sentence = [] if self.training and self.num_negative_labels_to_sample is not None: positive_labels = list( OrderedDict.fromkeys([label.value for label in sentence.get_labels(self.label_type)]) ) sampled_negative_labels = self._get_nearest_labels_for(positive_labels) for label in positive_labels: label_text_pairs_for_sentence.append(self._get_tars_formatted_sentence(label, sentence)) for label in sampled_negative_labels: label_text_pairs_for_sentence.append(self._get_tars_formatted_sentence(label, sentence)) else: for label in all_labels: label_text_pairs_for_sentence.append(self._get_tars_formatted_sentence(label, sentence)) label_text_pairs.extend(label_text_pairs_for_sentence) return label_text_pairs def _get_nearest_labels_for(self, labels): # if there are no labels, return a random sample as negatives if len(labels) == 0: tags = self.get_current_label_dictionary().get_items() import random sample = random.sample(tags, k=self.num_negative_labels_to_sample) return sample already_sampled_negative_labels = set() # otherwise, go through all labels for label in labels: plausible_labels = [] plausible_label_probabilities = [] for plausible_label in self.label_nearest_map[label]: if plausible_label in already_sampled_negative_labels or plausible_label in labels: continue else: plausible_labels.append(plausible_label) plausible_label_probabilities.append(self.label_nearest_map[label][plausible_label]) # make sure the probabilities always sum up to 1 plausible_label_probabilities = np.array(plausible_label_probabilities, dtype="float64") plausible_label_probabilities += 1e-08 plausible_label_probabilities /= np.sum(plausible_label_probabilities) if len(plausible_labels) > 0: num_samples = min(self.num_negative_labels_to_sample, len(plausible_labels)) sampled_negative_labels = np.random.choice( plausible_labels, num_samples, replace=False, p=plausible_label_probabilities, ) already_sampled_negative_labels.update(sampled_negative_labels) return already_sampled_negative_labels def train(self, mode=True): """Populate label similarity map based on cosine similarity before running epoch If the `num_negative_labels_to_sample` is set to an integer value then before starting each epoch the model would create a similarity measure between the label names based on cosine distances between their BERT encoded embeddings. """ if mode and self.num_negative_labels_to_sample is not None: self._compute_label_similarity_for_current_epoch() super().train(mode) super().train(mode) def _compute_label_similarity_for_current_epoch(self): """ Compute the similarity between all labels for better sampling of negatives """ # get and embed all labels by making a Sentence object that contains only the label text all_labels = [label.decode("utf-8") for label in self.get_current_label_dictionary().idx2item] label_sentences = [Sentence(label) for label in all_labels] self.tars_embeddings.eval() # TODO: check if this is necessary self.tars_embeddings.embed(label_sentences) self.tars_embeddings.train() # get each label embedding and scale between 0 and 1 if isinstance(self.tars_embeddings, TokenEmbeddings): encodings_np = [sentence[0].get_embedding().cpu().detach().numpy() for sentence in label_sentences] else: encodings_np = [sentence.get_embedding().cpu().detach().numpy() for sentence in label_sentences] normalized_encoding = minmax_scale(encodings_np) # compute similarity matrix similarity_matrix = cosine_similarity(normalized_encoding) # the higher the similarity, the greater the chance that a label is # sampled as negative example negative_label_probabilities = {} for row_index, label in enumerate(all_labels): negative_label_probabilities[label] = {} for column_index, other_label in enumerate(all_labels): if label != other_label: negative_label_probabilities[label][other_label] = similarity_matrix[row_index][column_index] self.label_nearest_map = negative_label_probabilities def get_current_label_dictionary(self): label_dictionary = self._task_specific_attributes[self._current_task]["label_dictionary"] return label_dictionary def get_current_label_type(self): return self._task_specific_attributes[self._current_task]["label_type"] def is_current_task_multi_label(self): return self._task_specific_attributes[self._current_task]["multi_label"] def add_and_switch_to_new_task( self, task_name, label_dictionary: Union[List, Set, Dictionary, str], label_type: str, multi_label: bool = True, force_switch: bool = False, ): """ Adds a new task to an existing TARS model. Sets necessary attributes and finally 'switches' to the new task. Parameters are similar to the constructor except for model choice, batch size and negative sampling. This method does not store the resultant model onto disk. :param task_name: a string depicting the name of the task :param label_dictionary: dictionary of the labels you want to predict :param label_type: string to identify the label type ('ner', 'sentiment', etc.) :param multi_label: whether this task is a multi-label prediction problem :param force_switch: if True, will overwrite existing task with same name """ if task_name in self._task_specific_attributes and not force_switch: log.warning("Task `%s` already exists in TARS model. Switching to it.", task_name) else: # make label dictionary if no Dictionary object is passed if isinstance(label_dictionary, Dictionary): label_dictionary = label_dictionary.get_items() if type(label_dictionary) == str: label_dictionary = [label_dictionary] # prepare dictionary of tags (without B- I- prefixes and without UNK) tag_dictionary = Dictionary(add_unk=False) for tag in label_dictionary: if tag == "<unk>" or tag == "O": continue if tag[1] == "-": tag = tag[2:] tag_dictionary.add_item(tag) else: tag_dictionary.add_item(tag) self._task_specific_attributes[task_name] = { "label_dictionary": tag_dictionary, "label_type": label_type, "multi_label": multi_label, } self.switch_to_task(task_name) def list_existing_tasks(self) -> Set[str]: """ Lists existing tasks in the loaded TARS model on the console. """ return set(self._task_specific_attributes.keys()) def switch_to_task(self, task_name): """ Switches to a task which was previously added. """ if task_name not in self._task_specific_attributes: log.error( "Provided `%s` does not exist in the model. Consider calling " "`add_and_switch_to_new_task` first.", task_name, ) else: self._current_task = task_name def _drop_task(self, task_name): if task_name in self._task_specific_attributes: if self._current_task == task_name: log.error( "`%s` is the current task." " Switch to some other task before dropping this.", task_name, ) else: self._task_specific_attributes.pop(task_name) else: log.warning("No task exists with the name `%s`.", task_name) @staticmethod def _filter_empty_sentences(sentences: List[Sentence]) -> List[Sentence]: filtered_sentences = [sentence for sentence in sentences if sentence.tokens] if len(sentences) != len(filtered_sentences): log.warning(f"Ignore {len(sentences) - len(filtered_sentences)} sentence(s) with no tokens.") return filtered_sentences @property def label_type(self): return self.get_current_label_type() def predict_zero_shot( self, sentences: Union[List[Sentence], Sentence], candidate_label_set: Union[List[str], Set[str], str], multi_label: bool = True, ): """ Method to make zero shot predictions from the TARS model :param sentences: input sentence objects to classify :param candidate_label_set: set of candidate labels :param multi_label: indicates whether multi-label or single class prediction. Defaults to True. """ # check if candidate_label_set is empty if candidate_label_set is None or len(candidate_label_set) == 0: log.warning("Provided candidate_label_set is empty") return # make list if only one candidate label is passed if isinstance(candidate_label_set, str): candidate_label_set = {candidate_label_set} # create label dictionary label_dictionary = Dictionary(add_unk=False) for label in candidate_label_set: label_dictionary.add_item(label) # note current task existing_current_task = self._current_task # create a temporary task self.add_and_switch_to_new_task( task_name="ZeroShot", label_dictionary=label_dictionary, label_type="-".join(label_dictionary.get_items()), multi_label=multi_label, ) try: # make zero shot predictions self.predict(sentences) finally: # switch to the pre-existing task self.switch_to_task(existing_current_task) self._drop_task("ZeroShot") return class TARSTagger(FewshotClassifier): """ TARS model for sequence tagging. In the backend, the model uses a BERT based 5-class sequence labeler which given a <label, text> pair predicts the probability for each word to belong to one of the BIOES classes. The input data is a usual Sentence object which is inflated by the model internally before pushing it through the transformer stack of BERT. """ static_label_type = "tars_label" def __init__( self, task_name: Optional[str] = None, label_dictionary: Optional[Dictionary] = None, label_type: Optional[str] = None, embeddings: Union[TransformerWordEmbeddings, str] = "bert-base-uncased", num_negative_labels_to_sample: int = 2, prefix: bool = True, **tagger_args, ): """ Initializes a TextClassifier :param task_name: a string depicting the name of the task :param label_dictionary: dictionary of labels you want to predict :param embeddings: name of the pre-trained transformer model e.g., 'bert-base-uncased' etc :param num_negative_labels_to_sample: number of negative labels to sample for each positive labels against a sentence during training. Defaults to 2 negative labels for each positive label. The model would sample all the negative labels if None is passed. That slows down the training considerably. """ super(TARSTagger, self).__init__() if isinstance(embeddings, str): embeddings = TransformerWordEmbeddings( model=embeddings, fine_tune=True, layers="-1", layer_mean=False, ) # prepare TARS dictionary tars_dictionary = Dictionary(add_unk=False) tars_dictionary.add_item("entity") tars_dictionary.span_labels = True # initialize a bare-bones sequence tagger self.tars_model: SequenceTagger = SequenceTagger( hidden_size=123, embeddings=embeddings, tag_dictionary=tars_dictionary, tag_type=self.static_label_type, use_crf=False, use_rnn=False, reproject_embeddings=False, **tagger_args, ) # transformer separator self.separator = str(self.tars_embeddings.tokenizer.sep_token) if self.tars_embeddings.tokenizer._bos_token: self.separator += str(self.tars_embeddings.tokenizer.bos_token) self.prefix = prefix self.num_negative_labels_to_sample = num_negative_labels_to_sample if task_name and label_dictionary and label_type: # Store task specific labels since TARS can handle multiple tasks self.add_and_switch_to_new_task(task_name, label_dictionary, label_type) else: log.info( "TARS initialized without a task. You need to call .add_and_switch_to_new_task() " "before training this model" ) def _get_tars_formatted_sentence(self, label, sentence): original_text = sentence.to_tokenized_string() label_text_pair = ( f"{label} {self.separator} {original_text}" if self.prefix else f"{original_text} {self.separator} {label}" ) label_length = 0 if not self.prefix else len(label.split(" ")) + len(self.separator.split(" ")) # make a tars sentence where all labels are O by default tars_sentence = Sentence(label_text_pair, use_tokenizer=False) for entity_label in sentence.get_labels(self.label_type): if entity_label.value == label: new_span = [tars_sentence.get_token(token.idx + label_length) for token in entity_label.span] tars_sentence.add_complex_label(self.static_label_type, SpanLabel(Span(new_span), value="entity")) return tars_sentence def _get_state_dict(self): model_state = { "state_dict": self.state_dict(), "current_task": self._current_task, "tag_type": self.get_current_label_type(), "tag_dictionary": self.get_current_label_dictionary(), "tars_model": self.tars_model, "num_negative_labels_to_sample": self.num_negative_labels_to_sample, "prefix": self.prefix, "task_specific_attributes": self._task_specific_attributes, } return model_state @staticmethod def _fetch_model(model_name) -> str: if model_name == "tars-ner": cache_dir = Path("models") model_name = cached_path( "https://nlp.informatik.hu-berlin.de/resources/models/tars-ner/tars-ner.pt", cache_dir=cache_dir, ) return model_name @staticmethod def _init_model_with_state_dict(state): # init new TARS classifier model = TARSTagger( task_name=state["current_task"], label_dictionary=state["tag_dictionary"], label_type=state["tag_type"], embeddings=state["tars_model"].embeddings, num_negative_labels_to_sample=state["num_negative_labels_to_sample"], prefix=state["prefix"], ) # set all task information model._task_specific_attributes = state["task_specific_attributes"] # linear layers of internal classifier model.load_state_dict(state["state_dict"]) return model @property def tars_embeddings(self): return self.tars_model.embeddings def predict( self, sentences: Union[List[Sentence], Sentence], mini_batch_size=32, return_probabilities_for_all_classes: bool = False, verbose: bool = False, label_name: Optional[str] = None, return_loss=False, embedding_storage_mode="none", most_probable_first: bool = True, ): # return """ Predict sequence tags for Named Entity Recognition task :param sentences: a Sentence or a List of Sentence :param mini_batch_size: size of the minibatch, usually bigger is more rapid but consume more memory, up to a point when it has no more effect. :param all_tag_prob: True to compute the score for each tag on each token, otherwise only the score of the best tag is returned :param verbose: set to True to display a progress bar :param return_loss: set to True to return loss :param label_name: set this to change the name of the label type that is predicted :param embedding_storage_mode: default is 'none' which is always best. Only set to 'cpu' or 'gpu' if you wish to not only predict, but also keep the generated embeddings in CPU or GPU memory respectively. 'gpu' to store embeddings in GPU memory. """ if label_name is None: label_name = self.get_current_label_type() # with torch.no_grad(): if not sentences: return sentences if not isinstance(sentences, list): sentences = [sentences] reordered_sentences = sorted(sentences, key=lambda s: len(s), reverse=True) dataloader = DataLoader( dataset=FlairDatapointDataset(reordered_sentences), batch_size=mini_batch_size, ) # progress bar for verbosity if verbose: dataloader = tqdm(dataloader) overall_loss = 0 overall_count = 0 with torch.no_grad(): for batch in dataloader: batch = self._filter_empty_sentences(batch) # stop if all sentences are empty if not batch: continue # go through each sentence in the batch for sentence in batch: # always remove tags first sentence.remove_labels(label_name) all_labels = [label.decode("utf-8") for label in self.get_current_label_dictionary().idx2item] all_detected = {} for label in all_labels: tars_sentence = self._get_tars_formatted_sentence(label, sentence) loss_and_count = self.tars_model.predict( tars_sentence, label_name=label_name, return_loss=True, ) overall_loss += loss_and_count[0].item() overall_count += loss_and_count[1] for predicted in tars_sentence.get_labels(label_name): predicted.value = label all_detected[predicted] = predicted.score if most_probable_first: import operator already_set_indices: List[int] = [] sorted_x = sorted(all_detected.items(), key=operator.itemgetter(1)) sorted_x.reverse() for tuple in sorted_x: # get the span and its label label = tuple[0] # label = span.get_labels("tars_temp_label")[0].value label_length = ( 0 if not self.prefix else len(label.value.split(" ")) + len(self.separator.split(" ")) ) # determine whether tokens in this span already have a label tag_this = True for token in label.span: corresponding_token = sentence.get_token(token.idx - label_length) if corresponding_token is None: tag_this = False continue if token.idx in already_set_indices: tag_this = False continue # only add if all tokens have no label if tag_this: already_set_indices.extend(token.idx for token in label.span) predicted_span = [sentence.get_token(token.idx - label_length) for token in label.span] sentence.add_complex_label( label_name, label=SpanLabel(Span(predicted_span), value=label.value, score=label.score), ) # clearing token embeddings to save memory store_embeddings(batch, storage_mode=embedding_storage_mode) if return_loss: return overall_loss, overall_count class TARSClassifier(FewshotClassifier): """ TARS model for text classification. In the backend, the model uses a BERT based binary text classifier which given a <label, text> pair predicts the probability of two classes "True", and "False". The input data is a usual Sentence object which is inflated by the model internally before pushing it through the transformer stack of BERT. """ static_label_type = "tars_label" LABEL_MATCH = "YES" LABEL_NO_MATCH = "NO" def __init__( self, task_name: Optional[str] = None, label_dictionary: Optional[Dictionary] = None, label_type: Optional[str] = None, embeddings: Union[TransformerDocumentEmbeddings, str] = "bert-base-uncased", num_negative_labels_to_sample: int = 2, prefix: bool = True, **tagger_args, ): """ Initializes a TextClassifier :param task_name: a string depicting the name of the task :param label_dictionary: dictionary of labels you want to predict :param embeddings: name of the pre-trained transformer model e.g., 'bert-base-uncased' etc :param num_negative_labels_to_sample: number of negative labels to sample for each positive labels against a sentence during training. Defaults to 2 negative labels for each positive label. The model would sample all the negative labels if None is passed. That slows down the training considerably. :param multi_label: auto-detected by default, but you can set this to True to force multi-label predictionor False to force single-label prediction :param multi_label_threshold: If multi-label you can set the threshold to make predictions :param beta: Parameter for F-beta score for evaluation and training annealing """ super(TARSClassifier, self).__init__() if isinstance(embeddings, str): embeddings = TransformerDocumentEmbeddings( model=embeddings, fine_tune=True, layers="-1", layer_mean=False, ) # prepare TARS dictionary tars_dictionary = Dictionary(add_unk=False) tars_dictionary.add_item(self.LABEL_NO_MATCH) tars_dictionary.add_item(self.LABEL_MATCH) # initialize a bare-bones sequence tagger self.tars_model = TextClassifier( document_embeddings=embeddings, label_dictionary=tars_dictionary, label_type=self.static_label_type, **tagger_args, ) # transformer separator self.separator = str(self.tars_embeddings.tokenizer.sep_token) if self.tars_embeddings.tokenizer._bos_token: self.separator += str(self.tars_embeddings.tokenizer.bos_token) self.prefix = prefix self.num_negative_labels_to_sample = num_negative_labels_to_sample if task_name and label_dictionary and label_type: # Store task specific labels since TARS can handle multiple tasks self.add_and_switch_to_new_task(task_name, label_dictionary, label_type) else: log.info( "TARS initialized without a task. You need to call .add_and_switch_to_new_task() " "before training this model" ) self.clean_up_labels = True def _clean(self, label_value: str) -> str: if self.clean_up_labels: return label_value.replace("_", " ") else: return label_value def _get_tars_formatted_sentence(self, label, sentence): label = self._clean(label) original_text = sentence.to_tokenized_string() label_text_pair = ( f"{label} {self.separator} {original_text}" if self.prefix else f"{original_text} {self.separator} {label}" ) sentence_labels = [self._clean(label.value) for label in sentence.get_labels(self.get_current_label_type())] tars_label = self.LABEL_MATCH if label in sentence_labels else self.LABEL_NO_MATCH tars_sentence = Sentence(label_text_pair, use_tokenizer=False).add_label(self.static_label_type, tars_label) return tars_sentence def _get_state_dict(self): model_state = { "state_dict": self.state_dict(), "current_task": self._current_task, "label_type": self.get_current_label_type(), "label_dictionary": self.get_current_label_dictionary(), "tars_model": self.tars_model, "num_negative_labels_to_sample": self.num_negative_labels_to_sample, "task_specific_attributes": self._task_specific_attributes, } return model_state @staticmethod def _init_model_with_state_dict(state): # init new TARS classifier label_dictionary = state["label_dictionary"] label_type = "default_label" if not state["label_type"] else state["label_type"] model: TARSClassifier = TARSClassifier( task_name=state["current_task"], label_dictionary=label_dictionary, label_type=label_type, embeddings=state["tars_model"].document_embeddings, num_negative_labels_to_sample=state["num_negative_labels_to_sample"], ) # set all task information model._task_specific_attributes = state["task_specific_attributes"] # linear layers of internal classifier model.load_state_dict(state["state_dict"]) return model @staticmethod def _fetch_model(model_name) -> str: model_map = {} hu_path: str = "https://nlp.informatik.hu-berlin.de/resources/models" model_map["tars-base"] = "/".join([hu_path, "tars-base", "tars-base-v8.pt"]) cache_dir = Path("models") if model_name in model_map: model_name = cached_path(model_map[model_name], cache_dir=cache_dir) return model_name @property def tars_embeddings(self): return self.tars_model.document_embeddings def predict( self, sentences: Union[List[Sentence], Sentence], mini_batch_size=32, return_probabilities_for_all_classes: bool = False, verbose: bool = False, label_name: Optional[str] = None, return_loss=False, embedding_storage_mode="none", label_threshold: float = 0.5, multi_label: Optional[bool] = None, ): """ Predict sequence tags for Named Entity Recognition task :param sentences: a Sentence or a List of Sentence :param mini_batch_size: size of the minibatch, usually bigger is more rapid but consume more memory, up to a point when it has no more effect. :param all_tag_prob: True to compute the score for each tag on each token, otherwise only the score of the best tag is returned :param verbose: set to True to display a progress bar :param return_loss: set to True to return loss :param label_name: set this to change the name of the label type that is predicted :param embedding_storage_mode: default is 'none' which is always best. Only set to 'cpu' or 'gpu' if you wish to not only predict, but also keep the generated embeddings in CPU or GPU memory respectively. 'gpu' to store embeddings in GPU memory. """ if label_name is None: label_name = self.get_current_label_type() if multi_label is None: multi_label = self.is_current_task_multi_label() # with torch.no_grad(): if not sentences: return sentences if isinstance(sentences, Sentence): sentences = [sentences] # set context if not set already previous_sentence = None for sentence in sentences: if sentence.is_context_set(): continue sentence._previous_sentence = previous_sentence sentence._next_sentence = None if previous_sentence: previous_sentence._next_sentence = sentence previous_sentence = sentence reordered_sentences = sorted(sentences, key=lambda s: len(s), reverse=True) dataloader = DataLoader( dataset=FlairDatapointDataset(reordered_sentences), batch_size=mini_batch_size, ) # progress bar for verbosity if verbose: progressbar = tqdm(dataloader) progressbar.set_description("Batch inference") dataloader = progressbar overall_loss = 0 overall_count = 0 batch_no = 0 with torch.no_grad(): for batch in dataloader: batch_no += 1 batch = self._filter_empty_sentences(batch) # stop if all sentences are empty if not batch: continue # go through each sentence in the batch for sentence in batch: # always remove tags first sentence.remove_labels(label_name) all_labels = [label.decode("utf-8") for label in self.get_current_label_dictionary().idx2item] best_label = None for label in all_labels: tars_sentence = self._get_tars_formatted_sentence(label, sentence) loss_and_count = self.tars_model.predict( tars_sentence, label_name=label_name, return_loss=True, return_probabilities_for_all_classes=True if label_threshold < 0.5 else False, ) overall_loss += loss_and_count[0].item() overall_count += loss_and_count[1] # add all labels that according to TARS match the text and are above threshold for predicted_tars_label in tars_sentence.get_labels(label_name): if ( predicted_tars_label.value == self.LABEL_MATCH and predicted_tars_label.score > label_threshold ): # do not add labels below confidence threshold sentence.add_label(label_name, label, predicted_tars_label.score) # only use label with highest confidence if enforcing single-label predictions if not multi_label: if len(sentence.get_labels(label_name)) > 0: # get all label scores and do an argmax to get the best label label_scores = torch.tensor( [label.score for label in sentence.get_labels(label_name)], dtype=torch.float, ) best_label = sentence.get_labels(label_name)[torch.argmax(label_scores)] # remove previously added labels and only add the best label sentence.remove_labels(label_name) sentence.add_label( typename=label_name, value=best_label.value, score=best_label.score, ) # clearing token embeddings to save memory store_embeddings(batch, storage_mode=embedding_storage_mode) if return_loss: return overall_loss, overall_count
__init__
Initializes a TextClassifier :param task_name: a string depicting the name of the task :param label_dictionary: dictionary of labels you want to predict :param embeddings: name of the pre-trained transformer model e.g., 'bert-base-uncased' etc :param num_negative_labels_to_sample: number of negative labels to sample for each positive labels against a sentence during training. Defaults to 2 negative labels for each positive label. The model would sample all the negative labels if None is passed. That slows down the training considerably.
import logging from collections import OrderedDict from pathlib import Path from typing import List, Optional, Set, Tuple, Union import numpy as np import torch from sklearn.metrics.pairwise import cosine_similarity from sklearn.preprocessing import minmax_scale from tqdm import tqdm import flair from flair.data import Dictionary, Sentence, Span, SpanLabel from flair.datasets import DataLoader, FlairDatapointDataset from flair.embeddings import ( TokenEmbeddings, TransformerDocumentEmbeddings, TransformerWordEmbeddings, ) from flair.file_utils import cached_path from flair.models.sequence_tagger_model import SequenceTagger from flair.models.text_classification_model import TextClassifier from flair.training_utils import store_embeddings log = logging.getLogger("flair") class FewshotClassifier(flair.nn.Classifier[Sentence]): def __init__(self): self._current_task = None self._task_specific_attributes = {} self.label_nearest_map = None self.tars_model: flair.nn.Classifier[Sentence] super(FewshotClassifier, self).__init__() def forward_loss( self, data_points: Union[List[Sentence], Sentence] ) -> Union[torch.Tensor, Tuple[torch.Tensor, int]]: if not isinstance(data_points, list): data_points = [data_points] # Transform input data into TARS format sentences = self._get_tars_formatted_sentences(data_points) loss = self.tars_model.forward_loss(sentences) return loss @property def tars_embeddings(self): raise NotImplementedError def _get_tars_formatted_sentence(self, label, sentence): raise NotImplementedError def _get_tars_formatted_sentences(self, sentences: List[Sentence]): label_text_pairs = [] all_labels = [label.decode("utf-8") for label in self.get_current_label_dictionary().idx2item] for sentence in sentences: label_text_pairs_for_sentence = [] if self.training and self.num_negative_labels_to_sample is not None: positive_labels = list( OrderedDict.fromkeys([label.value for label in sentence.get_labels(self.label_type)]) ) sampled_negative_labels = self._get_nearest_labels_for(positive_labels) for label in positive_labels: label_text_pairs_for_sentence.append(self._get_tars_formatted_sentence(label, sentence)) for label in sampled_negative_labels: label_text_pairs_for_sentence.append(self._get_tars_formatted_sentence(label, sentence)) else: for label in all_labels: label_text_pairs_for_sentence.append(self._get_tars_formatted_sentence(label, sentence)) label_text_pairs.extend(label_text_pairs_for_sentence) return label_text_pairs def _get_nearest_labels_for(self, labels): # if there are no labels, return a random sample as negatives if len(labels) == 0: tags = self.get_current_label_dictionary().get_items() import random sample = random.sample(tags, k=self.num_negative_labels_to_sample) return sample already_sampled_negative_labels = set() # otherwise, go through all labels for label in labels: plausible_labels = [] plausible_label_probabilities = [] for plausible_label in self.label_nearest_map[label]: if plausible_label in already_sampled_negative_labels or plausible_label in labels: continue else: plausible_labels.append(plausible_label) plausible_label_probabilities.append(self.label_nearest_map[label][plausible_label]) # make sure the probabilities always sum up to 1 plausible_label_probabilities = np.array(plausible_label_probabilities, dtype="float64") plausible_label_probabilities += 1e-08 plausible_label_probabilities /= np.sum(plausible_label_probabilities) if len(plausible_labels) > 0: num_samples = min(self.num_negative_labels_to_sample, len(plausible_labels)) sampled_negative_labels = np.random.choice( plausible_labels, num_samples, replace=False, p=plausible_label_probabilities, ) already_sampled_negative_labels.update(sampled_negative_labels) return already_sampled_negative_labels def train(self, mode=True): """Populate label similarity map based on cosine similarity before running epoch If the `num_negative_labels_to_sample` is set to an integer value then before starting each epoch the model would create a similarity measure between the label names based on cosine distances between their BERT encoded embeddings. """ if mode and self.num_negative_labels_to_sample is not None: self._compute_label_similarity_for_current_epoch() super().train(mode) super().train(mode) def _compute_label_similarity_for_current_epoch(self): """ Compute the similarity between all labels for better sampling of negatives """ # get and embed all labels by making a Sentence object that contains only the label text all_labels = [label.decode("utf-8") for label in self.get_current_label_dictionary().idx2item] label_sentences = [Sentence(label) for label in all_labels] self.tars_embeddings.eval() # TODO: check if this is necessary self.tars_embeddings.embed(label_sentences) self.tars_embeddings.train() # get each label embedding and scale between 0 and 1 if isinstance(self.tars_embeddings, TokenEmbeddings): encodings_np = [sentence[0].get_embedding().cpu().detach().numpy() for sentence in label_sentences] else: encodings_np = [sentence.get_embedding().cpu().detach().numpy() for sentence in label_sentences] normalized_encoding = minmax_scale(encodings_np) # compute similarity matrix similarity_matrix = cosine_similarity(normalized_encoding) # the higher the similarity, the greater the chance that a label is # sampled as negative example negative_label_probabilities = {} for row_index, label in enumerate(all_labels): negative_label_probabilities[label] = {} for column_index, other_label in enumerate(all_labels): if label != other_label: negative_label_probabilities[label][other_label] = similarity_matrix[row_index][column_index] self.label_nearest_map = negative_label_probabilities def get_current_label_dictionary(self): label_dictionary = self._task_specific_attributes[self._current_task]["label_dictionary"] return label_dictionary def get_current_label_type(self): return self._task_specific_attributes[self._current_task]["label_type"] def is_current_task_multi_label(self): return self._task_specific_attributes[self._current_task]["multi_label"] def add_and_switch_to_new_task( self, task_name, label_dictionary: Union[List, Set, Dictionary, str], label_type: str, multi_label: bool = True, force_switch: bool = False, ): """ Adds a new task to an existing TARS model. Sets necessary attributes and finally 'switches' to the new task. Parameters are similar to the constructor except for model choice, batch size and negative sampling. This method does not store the resultant model onto disk. :param task_name: a string depicting the name of the task :param label_dictionary: dictionary of the labels you want to predict :param label_type: string to identify the label type ('ner', 'sentiment', etc.) :param multi_label: whether this task is a multi-label prediction problem :param force_switch: if True, will overwrite existing task with same name """ if task_name in self._task_specific_attributes and not force_switch: log.warning("Task `%s` already exists in TARS model. Switching to it.", task_name) else: # make label dictionary if no Dictionary object is passed if isinstance(label_dictionary, Dictionary): label_dictionary = label_dictionary.get_items() if type(label_dictionary) == str: label_dictionary = [label_dictionary] # prepare dictionary of tags (without B- I- prefixes and without UNK) tag_dictionary = Dictionary(add_unk=False) for tag in label_dictionary: if tag == "<unk>" or tag == "O": continue if tag[1] == "-": tag = tag[2:] tag_dictionary.add_item(tag) else: tag_dictionary.add_item(tag) self._task_specific_attributes[task_name] = { "label_dictionary": tag_dictionary, "label_type": label_type, "multi_label": multi_label, } self.switch_to_task(task_name) def list_existing_tasks(self) -> Set[str]: """ Lists existing tasks in the loaded TARS model on the console. """ return set(self._task_specific_attributes.keys()) def switch_to_task(self, task_name): """ Switches to a task which was previously added. """ if task_name not in self._task_specific_attributes: log.error( "Provided `%s` does not exist in the model. Consider calling " "`add_and_switch_to_new_task` first.", task_name, ) else: self._current_task = task_name def _drop_task(self, task_name): if task_name in self._task_specific_attributes: if self._current_task == task_name: log.error( "`%s` is the current task." " Switch to some other task before dropping this.", task_name, ) else: self._task_specific_attributes.pop(task_name) else: log.warning("No task exists with the name `%s`.", task_name) @staticmethod def _filter_empty_sentences(sentences: List[Sentence]) -> List[Sentence]: filtered_sentences = [sentence for sentence in sentences if sentence.tokens] if len(sentences) != len(filtered_sentences): log.warning(f"Ignore {len(sentences) - len(filtered_sentences)} sentence(s) with no tokens.") return filtered_sentences @property def label_type(self): return self.get_current_label_type() def predict_zero_shot( self, sentences: Union[List[Sentence], Sentence], candidate_label_set: Union[List[str], Set[str], str], multi_label: bool = True, ): """ Method to make zero shot predictions from the TARS model :param sentences: input sentence objects to classify :param candidate_label_set: set of candidate labels :param multi_label: indicates whether multi-label or single class prediction. Defaults to True. """ # check if candidate_label_set is empty if candidate_label_set is None or len(candidate_label_set) == 0: log.warning("Provided candidate_label_set is empty") return # make list if only one candidate label is passed if isinstance(candidate_label_set, str): candidate_label_set = {candidate_label_set} # create label dictionary label_dictionary = Dictionary(add_unk=False) for label in candidate_label_set: label_dictionary.add_item(label) # note current task existing_current_task = self._current_task # create a temporary task self.add_and_switch_to_new_task( task_name="ZeroShot", label_dictionary=label_dictionary, label_type="-".join(label_dictionary.get_items()), multi_label=multi_label, ) try: # make zero shot predictions self.predict(sentences) finally: # switch to the pre-existing task self.switch_to_task(existing_current_task) self._drop_task("ZeroShot") return class TARSTagger(FewshotClassifier): """ TARS model for sequence tagging. In the backend, the model uses a BERT based 5-class sequence labeler which given a <label, text> pair predicts the probability for each word to belong to one of the BIOES classes. The input data is a usual Sentence object which is inflated by the model internally before pushing it through the transformer stack of BERT. """ static_label_type = "tars_label" # MASKED: __init__ function (lines 326-389) def _get_tars_formatted_sentence(self, label, sentence): original_text = sentence.to_tokenized_string() label_text_pair = ( f"{label} {self.separator} {original_text}" if self.prefix else f"{original_text} {self.separator} {label}" ) label_length = 0 if not self.prefix else len(label.split(" ")) + len(self.separator.split(" ")) # make a tars sentence where all labels are O by default tars_sentence = Sentence(label_text_pair, use_tokenizer=False) for entity_label in sentence.get_labels(self.label_type): if entity_label.value == label: new_span = [tars_sentence.get_token(token.idx + label_length) for token in entity_label.span] tars_sentence.add_complex_label(self.static_label_type, SpanLabel(Span(new_span), value="entity")) return tars_sentence def _get_state_dict(self): model_state = { "state_dict": self.state_dict(), "current_task": self._current_task, "tag_type": self.get_current_label_type(), "tag_dictionary": self.get_current_label_dictionary(), "tars_model": self.tars_model, "num_negative_labels_to_sample": self.num_negative_labels_to_sample, "prefix": self.prefix, "task_specific_attributes": self._task_specific_attributes, } return model_state @staticmethod def _fetch_model(model_name) -> str: if model_name == "tars-ner": cache_dir = Path("models") model_name = cached_path( "https://nlp.informatik.hu-berlin.de/resources/models/tars-ner/tars-ner.pt", cache_dir=cache_dir, ) return model_name @staticmethod def _init_model_with_state_dict(state): # init new TARS classifier model = TARSTagger( task_name=state["current_task"], label_dictionary=state["tag_dictionary"], label_type=state["tag_type"], embeddings=state["tars_model"].embeddings, num_negative_labels_to_sample=state["num_negative_labels_to_sample"], prefix=state["prefix"], ) # set all task information model._task_specific_attributes = state["task_specific_attributes"] # linear layers of internal classifier model.load_state_dict(state["state_dict"]) return model @property def tars_embeddings(self): return self.tars_model.embeddings def predict( self, sentences: Union[List[Sentence], Sentence], mini_batch_size=32, return_probabilities_for_all_classes: bool = False, verbose: bool = False, label_name: Optional[str] = None, return_loss=False, embedding_storage_mode="none", most_probable_first: bool = True, ): # return """ Predict sequence tags for Named Entity Recognition task :param sentences: a Sentence or a List of Sentence :param mini_batch_size: size of the minibatch, usually bigger is more rapid but consume more memory, up to a point when it has no more effect. :param all_tag_prob: True to compute the score for each tag on each token, otherwise only the score of the best tag is returned :param verbose: set to True to display a progress bar :param return_loss: set to True to return loss :param label_name: set this to change the name of the label type that is predicted :param embedding_storage_mode: default is 'none' which is always best. Only set to 'cpu' or 'gpu' if you wish to not only predict, but also keep the generated embeddings in CPU or GPU memory respectively. 'gpu' to store embeddings in GPU memory. """ if label_name is None: label_name = self.get_current_label_type() # with torch.no_grad(): if not sentences: return sentences if not isinstance(sentences, list): sentences = [sentences] reordered_sentences = sorted(sentences, key=lambda s: len(s), reverse=True) dataloader = DataLoader( dataset=FlairDatapointDataset(reordered_sentences), batch_size=mini_batch_size, ) # progress bar for verbosity if verbose: dataloader = tqdm(dataloader) overall_loss = 0 overall_count = 0 with torch.no_grad(): for batch in dataloader: batch = self._filter_empty_sentences(batch) # stop if all sentences are empty if not batch: continue # go through each sentence in the batch for sentence in batch: # always remove tags first sentence.remove_labels(label_name) all_labels = [label.decode("utf-8") for label in self.get_current_label_dictionary().idx2item] all_detected = {} for label in all_labels: tars_sentence = self._get_tars_formatted_sentence(label, sentence) loss_and_count = self.tars_model.predict( tars_sentence, label_name=label_name, return_loss=True, ) overall_loss += loss_and_count[0].item() overall_count += loss_and_count[1] for predicted in tars_sentence.get_labels(label_name): predicted.value = label all_detected[predicted] = predicted.score if most_probable_first: import operator already_set_indices: List[int] = [] sorted_x = sorted(all_detected.items(), key=operator.itemgetter(1)) sorted_x.reverse() for tuple in sorted_x: # get the span and its label label = tuple[0] # label = span.get_labels("tars_temp_label")[0].value label_length = ( 0 if not self.prefix else len(label.value.split(" ")) + len(self.separator.split(" ")) ) # determine whether tokens in this span already have a label tag_this = True for token in label.span: corresponding_token = sentence.get_token(token.idx - label_length) if corresponding_token is None: tag_this = False continue if token.idx in already_set_indices: tag_this = False continue # only add if all tokens have no label if tag_this: already_set_indices.extend(token.idx for token in label.span) predicted_span = [sentence.get_token(token.idx - label_length) for token in label.span] sentence.add_complex_label( label_name, label=SpanLabel(Span(predicted_span), value=label.value, score=label.score), ) # clearing token embeddings to save memory store_embeddings(batch, storage_mode=embedding_storage_mode) if return_loss: return overall_loss, overall_count class TARSClassifier(FewshotClassifier): """ TARS model for text classification. In the backend, the model uses a BERT based binary text classifier which given a <label, text> pair predicts the probability of two classes "True", and "False". The input data is a usual Sentence object which is inflated by the model internally before pushing it through the transformer stack of BERT. """ static_label_type = "tars_label" LABEL_MATCH = "YES" LABEL_NO_MATCH = "NO" def __init__( self, task_name: Optional[str] = None, label_dictionary: Optional[Dictionary] = None, label_type: Optional[str] = None, embeddings: Union[TransformerDocumentEmbeddings, str] = "bert-base-uncased", num_negative_labels_to_sample: int = 2, prefix: bool = True, **tagger_args, ): """ Initializes a TextClassifier :param task_name: a string depicting the name of the task :param label_dictionary: dictionary of labels you want to predict :param embeddings: name of the pre-trained transformer model e.g., 'bert-base-uncased' etc :param num_negative_labels_to_sample: number of negative labels to sample for each positive labels against a sentence during training. Defaults to 2 negative labels for each positive label. The model would sample all the negative labels if None is passed. That slows down the training considerably. :param multi_label: auto-detected by default, but you can set this to True to force multi-label predictionor False to force single-label prediction :param multi_label_threshold: If multi-label you can set the threshold to make predictions :param beta: Parameter for F-beta score for evaluation and training annealing """ super(TARSClassifier, self).__init__() if isinstance(embeddings, str): embeddings = TransformerDocumentEmbeddings( model=embeddings, fine_tune=True, layers="-1", layer_mean=False, ) # prepare TARS dictionary tars_dictionary = Dictionary(add_unk=False) tars_dictionary.add_item(self.LABEL_NO_MATCH) tars_dictionary.add_item(self.LABEL_MATCH) # initialize a bare-bones sequence tagger self.tars_model = TextClassifier( document_embeddings=embeddings, label_dictionary=tars_dictionary, label_type=self.static_label_type, **tagger_args, ) # transformer separator self.separator = str(self.tars_embeddings.tokenizer.sep_token) if self.tars_embeddings.tokenizer._bos_token: self.separator += str(self.tars_embeddings.tokenizer.bos_token) self.prefix = prefix self.num_negative_labels_to_sample = num_negative_labels_to_sample if task_name and label_dictionary and label_type: # Store task specific labels since TARS can handle multiple tasks self.add_and_switch_to_new_task(task_name, label_dictionary, label_type) else: log.info( "TARS initialized without a task. You need to call .add_and_switch_to_new_task() " "before training this model" ) self.clean_up_labels = True def _clean(self, label_value: str) -> str: if self.clean_up_labels: return label_value.replace("_", " ") else: return label_value def _get_tars_formatted_sentence(self, label, sentence): label = self._clean(label) original_text = sentence.to_tokenized_string() label_text_pair = ( f"{label} {self.separator} {original_text}" if self.prefix else f"{original_text} {self.separator} {label}" ) sentence_labels = [self._clean(label.value) for label in sentence.get_labels(self.get_current_label_type())] tars_label = self.LABEL_MATCH if label in sentence_labels else self.LABEL_NO_MATCH tars_sentence = Sentence(label_text_pair, use_tokenizer=False).add_label(self.static_label_type, tars_label) return tars_sentence def _get_state_dict(self): model_state = { "state_dict": self.state_dict(), "current_task": self._current_task, "label_type": self.get_current_label_type(), "label_dictionary": self.get_current_label_dictionary(), "tars_model": self.tars_model, "num_negative_labels_to_sample": self.num_negative_labels_to_sample, "task_specific_attributes": self._task_specific_attributes, } return model_state @staticmethod def _init_model_with_state_dict(state): # init new TARS classifier label_dictionary = state["label_dictionary"] label_type = "default_label" if not state["label_type"] else state["label_type"] model: TARSClassifier = TARSClassifier( task_name=state["current_task"], label_dictionary=label_dictionary, label_type=label_type, embeddings=state["tars_model"].document_embeddings, num_negative_labels_to_sample=state["num_negative_labels_to_sample"], ) # set all task information model._task_specific_attributes = state["task_specific_attributes"] # linear layers of internal classifier model.load_state_dict(state["state_dict"]) return model @staticmethod def _fetch_model(model_name) -> str: model_map = {} hu_path: str = "https://nlp.informatik.hu-berlin.de/resources/models" model_map["tars-base"] = "/".join([hu_path, "tars-base", "tars-base-v8.pt"]) cache_dir = Path("models") if model_name in model_map: model_name = cached_path(model_map[model_name], cache_dir=cache_dir) return model_name @property def tars_embeddings(self): return self.tars_model.document_embeddings def predict( self, sentences: Union[List[Sentence], Sentence], mini_batch_size=32, return_probabilities_for_all_classes: bool = False, verbose: bool = False, label_name: Optional[str] = None, return_loss=False, embedding_storage_mode="none", label_threshold: float = 0.5, multi_label: Optional[bool] = None, ): """ Predict sequence tags for Named Entity Recognition task :param sentences: a Sentence or a List of Sentence :param mini_batch_size: size of the minibatch, usually bigger is more rapid but consume more memory, up to a point when it has no more effect. :param all_tag_prob: True to compute the score for each tag on each token, otherwise only the score of the best tag is returned :param verbose: set to True to display a progress bar :param return_loss: set to True to return loss :param label_name: set this to change the name of the label type that is predicted :param embedding_storage_mode: default is 'none' which is always best. Only set to 'cpu' or 'gpu' if you wish to not only predict, but also keep the generated embeddings in CPU or GPU memory respectively. 'gpu' to store embeddings in GPU memory. """ if label_name is None: label_name = self.get_current_label_type() if multi_label is None: multi_label = self.is_current_task_multi_label() # with torch.no_grad(): if not sentences: return sentences if isinstance(sentences, Sentence): sentences = [sentences] # set context if not set already previous_sentence = None for sentence in sentences: if sentence.is_context_set(): continue sentence._previous_sentence = previous_sentence sentence._next_sentence = None if previous_sentence: previous_sentence._next_sentence = sentence previous_sentence = sentence reordered_sentences = sorted(sentences, key=lambda s: len(s), reverse=True) dataloader = DataLoader( dataset=FlairDatapointDataset(reordered_sentences), batch_size=mini_batch_size, ) # progress bar for verbosity if verbose: progressbar = tqdm(dataloader) progressbar.set_description("Batch inference") dataloader = progressbar overall_loss = 0 overall_count = 0 batch_no = 0 with torch.no_grad(): for batch in dataloader: batch_no += 1 batch = self._filter_empty_sentences(batch) # stop if all sentences are empty if not batch: continue # go through each sentence in the batch for sentence in batch: # always remove tags first sentence.remove_labels(label_name) all_labels = [label.decode("utf-8") for label in self.get_current_label_dictionary().idx2item] best_label = None for label in all_labels: tars_sentence = self._get_tars_formatted_sentence(label, sentence) loss_and_count = self.tars_model.predict( tars_sentence, label_name=label_name, return_loss=True, return_probabilities_for_all_classes=True if label_threshold < 0.5 else False, ) overall_loss += loss_and_count[0].item() overall_count += loss_and_count[1] # add all labels that according to TARS match the text and are above threshold for predicted_tars_label in tars_sentence.get_labels(label_name): if ( predicted_tars_label.value == self.LABEL_MATCH and predicted_tars_label.score > label_threshold ): # do not add labels below confidence threshold sentence.add_label(label_name, label, predicted_tars_label.score) # only use label with highest confidence if enforcing single-label predictions if not multi_label: if len(sentence.get_labels(label_name)) > 0: # get all label scores and do an argmax to get the best label label_scores = torch.tensor( [label.score for label in sentence.get_labels(label_name)], dtype=torch.float, ) best_label = sentence.get_labels(label_name)[torch.argmax(label_scores)] # remove previously added labels and only add the best label sentence.remove_labels(label_name) sentence.add_label( typename=label_name, value=best_label.value, score=best_label.score, ) # clearing token embeddings to save memory store_embeddings(batch, storage_mode=embedding_storage_mode) if return_loss: return overall_loss, overall_count
def __init__( self, task_name: Optional[str] = None, label_dictionary: Optional[Dictionary] = None, label_type: Optional[str] = None, embeddings: Union[TransformerWordEmbeddings, str] = "bert-base-uncased", num_negative_labels_to_sample: int = 2, prefix: bool = True, **tagger_args, ): """ Initializes a TextClassifier :param task_name: a string depicting the name of the task :param label_dictionary: dictionary of labels you want to predict :param embeddings: name of the pre-trained transformer model e.g., 'bert-base-uncased' etc :param num_negative_labels_to_sample: number of negative labels to sample for each positive labels against a sentence during training. Defaults to 2 negative labels for each positive label. The model would sample all the negative labels if None is passed. That slows down the training considerably. """ super(TARSTagger, self).__init__() if isinstance(embeddings, str): embeddings = TransformerWordEmbeddings( model=embeddings, fine_tune=True, layers="-1", layer_mean=False, ) # prepare TARS dictionary tars_dictionary = Dictionary(add_unk=False) tars_dictionary.add_item("entity") tars_dictionary.span_labels = True # initialize a bare-bones sequence tagger self.tars_model: SequenceTagger = SequenceTagger( hidden_size=123, embeddings=embeddings, tag_dictionary=tars_dictionary, tag_type=self.static_label_type, use_crf=False, use_rnn=False, reproject_embeddings=False, **tagger_args, ) # transformer separator self.separator = str(self.tars_embeddings.tokenizer.sep_token) if self.tars_embeddings.tokenizer._bos_token: self.separator += str(self.tars_embeddings.tokenizer.bos_token) self.prefix = prefix self.num_negative_labels_to_sample = num_negative_labels_to_sample if task_name and label_dictionary and label_type: # Store task specific labels since TARS can handle multiple tasks self.add_and_switch_to_new_task(task_name, label_dictionary, label_type) else: log.info( "TARS initialized without a task. You need to call .add_and_switch_to_new_task() " "before training this model" )
326
389
import logging from collections import OrderedDict from pathlib import Path from typing import List, Optional, Set, Tuple, Union import numpy as np import torch from sklearn.metrics.pairwise import cosine_similarity from sklearn.preprocessing import minmax_scale from tqdm import tqdm import flair from flair.data import Dictionary, Sentence, Span, SpanLabel from flair.datasets import DataLoader, FlairDatapointDataset from flair.embeddings import ( TokenEmbeddings, TransformerDocumentEmbeddings, TransformerWordEmbeddings, ) from flair.file_utils import cached_path from flair.models.sequence_tagger_model import SequenceTagger from flair.models.text_classification_model import TextClassifier from flair.training_utils import store_embeddings log = logging.getLogger("flair") class FewshotClassifier(flair.nn.Classifier[Sentence]): def __init__(self): self._current_task = None self._task_specific_attributes = {} self.label_nearest_map = None self.tars_model: flair.nn.Classifier[Sentence] super(FewshotClassifier, self).__init__() def forward_loss( self, data_points: Union[List[Sentence], Sentence] ) -> Union[torch.Tensor, Tuple[torch.Tensor, int]]: if not isinstance(data_points, list): data_points = [data_points] # Transform input data into TARS format sentences = self._get_tars_formatted_sentences(data_points) loss = self.tars_model.forward_loss(sentences) return loss @property def tars_embeddings(self): raise NotImplementedError def _get_tars_formatted_sentence(self, label, sentence): raise NotImplementedError def _get_tars_formatted_sentences(self, sentences: List[Sentence]): label_text_pairs = [] all_labels = [label.decode("utf-8") for label in self.get_current_label_dictionary().idx2item] for sentence in sentences: label_text_pairs_for_sentence = [] if self.training and self.num_negative_labels_to_sample is not None: positive_labels = list( OrderedDict.fromkeys([label.value for label in sentence.get_labels(self.label_type)]) ) sampled_negative_labels = self._get_nearest_labels_for(positive_labels) for label in positive_labels: label_text_pairs_for_sentence.append(self._get_tars_formatted_sentence(label, sentence)) for label in sampled_negative_labels: label_text_pairs_for_sentence.append(self._get_tars_formatted_sentence(label, sentence)) else: for label in all_labels: label_text_pairs_for_sentence.append(self._get_tars_formatted_sentence(label, sentence)) label_text_pairs.extend(label_text_pairs_for_sentence) return label_text_pairs def _get_nearest_labels_for(self, labels): # if there are no labels, return a random sample as negatives if len(labels) == 0: tags = self.get_current_label_dictionary().get_items() import random sample = random.sample(tags, k=self.num_negative_labels_to_sample) return sample already_sampled_negative_labels = set() # otherwise, go through all labels for label in labels: plausible_labels = [] plausible_label_probabilities = [] for plausible_label in self.label_nearest_map[label]: if plausible_label in already_sampled_negative_labels or plausible_label in labels: continue else: plausible_labels.append(plausible_label) plausible_label_probabilities.append(self.label_nearest_map[label][plausible_label]) # make sure the probabilities always sum up to 1 plausible_label_probabilities = np.array(plausible_label_probabilities, dtype="float64") plausible_label_probabilities += 1e-08 plausible_label_probabilities /= np.sum(plausible_label_probabilities) if len(plausible_labels) > 0: num_samples = min(self.num_negative_labels_to_sample, len(plausible_labels)) sampled_negative_labels = np.random.choice( plausible_labels, num_samples, replace=False, p=plausible_label_probabilities, ) already_sampled_negative_labels.update(sampled_negative_labels) return already_sampled_negative_labels def train(self, mode=True): """Populate label similarity map based on cosine similarity before running epoch If the `num_negative_labels_to_sample` is set to an integer value then before starting each epoch the model would create a similarity measure between the label names based on cosine distances between their BERT encoded embeddings. """ if mode and self.num_negative_labels_to_sample is not None: self._compute_label_similarity_for_current_epoch() super().train(mode) super().train(mode) def _compute_label_similarity_for_current_epoch(self): """ Compute the similarity between all labels for better sampling of negatives """ # get and embed all labels by making a Sentence object that contains only the label text all_labels = [label.decode("utf-8") for label in self.get_current_label_dictionary().idx2item] label_sentences = [Sentence(label) for label in all_labels] self.tars_embeddings.eval() # TODO: check if this is necessary self.tars_embeddings.embed(label_sentences) self.tars_embeddings.train() # get each label embedding and scale between 0 and 1 if isinstance(self.tars_embeddings, TokenEmbeddings): encodings_np = [sentence[0].get_embedding().cpu().detach().numpy() for sentence in label_sentences] else: encodings_np = [sentence.get_embedding().cpu().detach().numpy() for sentence in label_sentences] normalized_encoding = minmax_scale(encodings_np) # compute similarity matrix similarity_matrix = cosine_similarity(normalized_encoding) # the higher the similarity, the greater the chance that a label is # sampled as negative example negative_label_probabilities = {} for row_index, label in enumerate(all_labels): negative_label_probabilities[label] = {} for column_index, other_label in enumerate(all_labels): if label != other_label: negative_label_probabilities[label][other_label] = similarity_matrix[row_index][column_index] self.label_nearest_map = negative_label_probabilities def get_current_label_dictionary(self): label_dictionary = self._task_specific_attributes[self._current_task]["label_dictionary"] return label_dictionary def get_current_label_type(self): return self._task_specific_attributes[self._current_task]["label_type"] def is_current_task_multi_label(self): return self._task_specific_attributes[self._current_task]["multi_label"] def add_and_switch_to_new_task( self, task_name, label_dictionary: Union[List, Set, Dictionary, str], label_type: str, multi_label: bool = True, force_switch: bool = False, ): """ Adds a new task to an existing TARS model. Sets necessary attributes and finally 'switches' to the new task. Parameters are similar to the constructor except for model choice, batch size and negative sampling. This method does not store the resultant model onto disk. :param task_name: a string depicting the name of the task :param label_dictionary: dictionary of the labels you want to predict :param label_type: string to identify the label type ('ner', 'sentiment', etc.) :param multi_label: whether this task is a multi-label prediction problem :param force_switch: if True, will overwrite existing task with same name """ if task_name in self._task_specific_attributes and not force_switch: log.warning("Task `%s` already exists in TARS model. Switching to it.", task_name) else: # make label dictionary if no Dictionary object is passed if isinstance(label_dictionary, Dictionary): label_dictionary = label_dictionary.get_items() if type(label_dictionary) == str: label_dictionary = [label_dictionary] # prepare dictionary of tags (without B- I- prefixes and without UNK) tag_dictionary = Dictionary(add_unk=False) for tag in label_dictionary: if tag == "<unk>" or tag == "O": continue if tag[1] == "-": tag = tag[2:] tag_dictionary.add_item(tag) else: tag_dictionary.add_item(tag) self._task_specific_attributes[task_name] = { "label_dictionary": tag_dictionary, "label_type": label_type, "multi_label": multi_label, } self.switch_to_task(task_name) def list_existing_tasks(self) -> Set[str]: """ Lists existing tasks in the loaded TARS model on the console. """ return set(self._task_specific_attributes.keys()) def switch_to_task(self, task_name): """ Switches to a task which was previously added. """ if task_name not in self._task_specific_attributes: log.error( "Provided `%s` does not exist in the model. Consider calling " "`add_and_switch_to_new_task` first.", task_name, ) else: self._current_task = task_name def _drop_task(self, task_name): if task_name in self._task_specific_attributes: if self._current_task == task_name: log.error( "`%s` is the current task." " Switch to some other task before dropping this.", task_name, ) else: self._task_specific_attributes.pop(task_name) else: log.warning("No task exists with the name `%s`.", task_name) @staticmethod def _filter_empty_sentences(sentences: List[Sentence]) -> List[Sentence]: filtered_sentences = [sentence for sentence in sentences if sentence.tokens] if len(sentences) != len(filtered_sentences): log.warning(f"Ignore {len(sentences) - len(filtered_sentences)} sentence(s) with no tokens.") return filtered_sentences @property def label_type(self): return self.get_current_label_type() def predict_zero_shot( self, sentences: Union[List[Sentence], Sentence], candidate_label_set: Union[List[str], Set[str], str], multi_label: bool = True, ): """ Method to make zero shot predictions from the TARS model :param sentences: input sentence objects to classify :param candidate_label_set: set of candidate labels :param multi_label: indicates whether multi-label or single class prediction. Defaults to True. """ # check if candidate_label_set is empty if candidate_label_set is None or len(candidate_label_set) == 0: log.warning("Provided candidate_label_set is empty") return # make list if only one candidate label is passed if isinstance(candidate_label_set, str): candidate_label_set = {candidate_label_set} # create label dictionary label_dictionary = Dictionary(add_unk=False) for label in candidate_label_set: label_dictionary.add_item(label) # note current task existing_current_task = self._current_task # create a temporary task self.add_and_switch_to_new_task( task_name="ZeroShot", label_dictionary=label_dictionary, label_type="-".join(label_dictionary.get_items()), multi_label=multi_label, ) try: # make zero shot predictions self.predict(sentences) finally: # switch to the pre-existing task self.switch_to_task(existing_current_task) self._drop_task("ZeroShot") return class TARSTagger(FewshotClassifier): """ TARS model for sequence tagging. In the backend, the model uses a BERT based 5-class sequence labeler which given a <label, text> pair predicts the probability for each word to belong to one of the BIOES classes. The input data is a usual Sentence object which is inflated by the model internally before pushing it through the transformer stack of BERT. """ static_label_type = "tars_label" def __init__( self, task_name: Optional[str] = None, label_dictionary: Optional[Dictionary] = None, label_type: Optional[str] = None, embeddings: Union[TransformerWordEmbeddings, str] = "bert-base-uncased", num_negative_labels_to_sample: int = 2, prefix: bool = True, **tagger_args, ): """ Initializes a TextClassifier :param task_name: a string depicting the name of the task :param label_dictionary: dictionary of labels you want to predict :param embeddings: name of the pre-trained transformer model e.g., 'bert-base-uncased' etc :param num_negative_labels_to_sample: number of negative labels to sample for each positive labels against a sentence during training. Defaults to 2 negative labels for each positive label. The model would sample all the negative labels if None is passed. That slows down the training considerably. """ super(TARSTagger, self).__init__() if isinstance(embeddings, str): embeddings = TransformerWordEmbeddings( model=embeddings, fine_tune=True, layers="-1", layer_mean=False, ) # prepare TARS dictionary tars_dictionary = Dictionary(add_unk=False) tars_dictionary.add_item("entity") tars_dictionary.span_labels = True # initialize a bare-bones sequence tagger self.tars_model: SequenceTagger = SequenceTagger( hidden_size=123, embeddings=embeddings, tag_dictionary=tars_dictionary, tag_type=self.static_label_type, use_crf=False, use_rnn=False, reproject_embeddings=False, **tagger_args, ) # transformer separator self.separator = str(self.tars_embeddings.tokenizer.sep_token) if self.tars_embeddings.tokenizer._bos_token: self.separator += str(self.tars_embeddings.tokenizer.bos_token) self.prefix = prefix self.num_negative_labels_to_sample = num_negative_labels_to_sample if task_name and label_dictionary and label_type: # Store task specific labels since TARS can handle multiple tasks self.add_and_switch_to_new_task(task_name, label_dictionary, label_type) else: log.info( "TARS initialized without a task. You need to call .add_and_switch_to_new_task() " "before training this model" ) def _get_tars_formatted_sentence(self, label, sentence): original_text = sentence.to_tokenized_string() label_text_pair = ( f"{label} {self.separator} {original_text}" if self.prefix else f"{original_text} {self.separator} {label}" ) label_length = 0 if not self.prefix else len(label.split(" ")) + len(self.separator.split(" ")) # make a tars sentence where all labels are O by default tars_sentence = Sentence(label_text_pair, use_tokenizer=False) for entity_label in sentence.get_labels(self.label_type): if entity_label.value == label: new_span = [tars_sentence.get_token(token.idx + label_length) for token in entity_label.span] tars_sentence.add_complex_label(self.static_label_type, SpanLabel(Span(new_span), value="entity")) return tars_sentence def _get_state_dict(self): model_state = { "state_dict": self.state_dict(), "current_task": self._current_task, "tag_type": self.get_current_label_type(), "tag_dictionary": self.get_current_label_dictionary(), "tars_model": self.tars_model, "num_negative_labels_to_sample": self.num_negative_labels_to_sample, "prefix": self.prefix, "task_specific_attributes": self._task_specific_attributes, } return model_state @staticmethod def _fetch_model(model_name) -> str: if model_name == "tars-ner": cache_dir = Path("models") model_name = cached_path( "https://nlp.informatik.hu-berlin.de/resources/models/tars-ner/tars-ner.pt", cache_dir=cache_dir, ) return model_name @staticmethod def _init_model_with_state_dict(state): # init new TARS classifier model = TARSTagger( task_name=state["current_task"], label_dictionary=state["tag_dictionary"], label_type=state["tag_type"], embeddings=state["tars_model"].embeddings, num_negative_labels_to_sample=state["num_negative_labels_to_sample"], prefix=state["prefix"], ) # set all task information model._task_specific_attributes = state["task_specific_attributes"] # linear layers of internal classifier model.load_state_dict(state["state_dict"]) return model @property def tars_embeddings(self): return self.tars_model.embeddings def predict( self, sentences: Union[List[Sentence], Sentence], mini_batch_size=32, return_probabilities_for_all_classes: bool = False, verbose: bool = False, label_name: Optional[str] = None, return_loss=False, embedding_storage_mode="none", most_probable_first: bool = True, ): # return """ Predict sequence tags for Named Entity Recognition task :param sentences: a Sentence or a List of Sentence :param mini_batch_size: size of the minibatch, usually bigger is more rapid but consume more memory, up to a point when it has no more effect. :param all_tag_prob: True to compute the score for each tag on each token, otherwise only the score of the best tag is returned :param verbose: set to True to display a progress bar :param return_loss: set to True to return loss :param label_name: set this to change the name of the label type that is predicted :param embedding_storage_mode: default is 'none' which is always best. Only set to 'cpu' or 'gpu' if you wish to not only predict, but also keep the generated embeddings in CPU or GPU memory respectively. 'gpu' to store embeddings in GPU memory. """ if label_name is None: label_name = self.get_current_label_type() # with torch.no_grad(): if not sentences: return sentences if not isinstance(sentences, list): sentences = [sentences] reordered_sentences = sorted(sentences, key=lambda s: len(s), reverse=True) dataloader = DataLoader( dataset=FlairDatapointDataset(reordered_sentences), batch_size=mini_batch_size, ) # progress bar for verbosity if verbose: dataloader = tqdm(dataloader) overall_loss = 0 overall_count = 0 with torch.no_grad(): for batch in dataloader: batch = self._filter_empty_sentences(batch) # stop if all sentences are empty if not batch: continue # go through each sentence in the batch for sentence in batch: # always remove tags first sentence.remove_labels(label_name) all_labels = [label.decode("utf-8") for label in self.get_current_label_dictionary().idx2item] all_detected = {} for label in all_labels: tars_sentence = self._get_tars_formatted_sentence(label, sentence) loss_and_count = self.tars_model.predict( tars_sentence, label_name=label_name, return_loss=True, ) overall_loss += loss_and_count[0].item() overall_count += loss_and_count[1] for predicted in tars_sentence.get_labels(label_name): predicted.value = label all_detected[predicted] = predicted.score if most_probable_first: import operator already_set_indices: List[int] = [] sorted_x = sorted(all_detected.items(), key=operator.itemgetter(1)) sorted_x.reverse() for tuple in sorted_x: # get the span and its label label = tuple[0] # label = span.get_labels("tars_temp_label")[0].value label_length = ( 0 if not self.prefix else len(label.value.split(" ")) + len(self.separator.split(" ")) ) # determine whether tokens in this span already have a label tag_this = True for token in label.span: corresponding_token = sentence.get_token(token.idx - label_length) if corresponding_token is None: tag_this = False continue if token.idx in already_set_indices: tag_this = False continue # only add if all tokens have no label if tag_this: already_set_indices.extend(token.idx for token in label.span) predicted_span = [sentence.get_token(token.idx - label_length) for token in label.span] sentence.add_complex_label( label_name, label=SpanLabel(Span(predicted_span), value=label.value, score=label.score), ) # clearing token embeddings to save memory store_embeddings(batch, storage_mode=embedding_storage_mode) if return_loss: return overall_loss, overall_count class TARSClassifier(FewshotClassifier): """ TARS model for text classification. In the backend, the model uses a BERT based binary text classifier which given a <label, text> pair predicts the probability of two classes "True", and "False". The input data is a usual Sentence object which is inflated by the model internally before pushing it through the transformer stack of BERT. """ static_label_type = "tars_label" LABEL_MATCH = "YES" LABEL_NO_MATCH = "NO" def __init__( self, task_name: Optional[str] = None, label_dictionary: Optional[Dictionary] = None, label_type: Optional[str] = None, embeddings: Union[TransformerDocumentEmbeddings, str] = "bert-base-uncased", num_negative_labels_to_sample: int = 2, prefix: bool = True, **tagger_args, ): """ Initializes a TextClassifier :param task_name: a string depicting the name of the task :param label_dictionary: dictionary of labels you want to predict :param embeddings: name of the pre-trained transformer model e.g., 'bert-base-uncased' etc :param num_negative_labels_to_sample: number of negative labels to sample for each positive labels against a sentence during training. Defaults to 2 negative labels for each positive label. The model would sample all the negative labels if None is passed. That slows down the training considerably. :param multi_label: auto-detected by default, but you can set this to True to force multi-label predictionor False to force single-label prediction :param multi_label_threshold: If multi-label you can set the threshold to make predictions :param beta: Parameter for F-beta score for evaluation and training annealing """ super(TARSClassifier, self).__init__() if isinstance(embeddings, str): embeddings = TransformerDocumentEmbeddings( model=embeddings, fine_tune=True, layers="-1", layer_mean=False, ) # prepare TARS dictionary tars_dictionary = Dictionary(add_unk=False) tars_dictionary.add_item(self.LABEL_NO_MATCH) tars_dictionary.add_item(self.LABEL_MATCH) # initialize a bare-bones sequence tagger self.tars_model = TextClassifier( document_embeddings=embeddings, label_dictionary=tars_dictionary, label_type=self.static_label_type, **tagger_args, ) # transformer separator self.separator = str(self.tars_embeddings.tokenizer.sep_token) if self.tars_embeddings.tokenizer._bos_token: self.separator += str(self.tars_embeddings.tokenizer.bos_token) self.prefix = prefix self.num_negative_labels_to_sample = num_negative_labels_to_sample if task_name and label_dictionary and label_type: # Store task specific labels since TARS can handle multiple tasks self.add_and_switch_to_new_task(task_name, label_dictionary, label_type) else: log.info( "TARS initialized without a task. You need to call .add_and_switch_to_new_task() " "before training this model" ) self.clean_up_labels = True def _clean(self, label_value: str) -> str: if self.clean_up_labels: return label_value.replace("_", " ") else: return label_value def _get_tars_formatted_sentence(self, label, sentence): label = self._clean(label) original_text = sentence.to_tokenized_string() label_text_pair = ( f"{label} {self.separator} {original_text}" if self.prefix else f"{original_text} {self.separator} {label}" ) sentence_labels = [self._clean(label.value) for label in sentence.get_labels(self.get_current_label_type())] tars_label = self.LABEL_MATCH if label in sentence_labels else self.LABEL_NO_MATCH tars_sentence = Sentence(label_text_pair, use_tokenizer=False).add_label(self.static_label_type, tars_label) return tars_sentence def _get_state_dict(self): model_state = { "state_dict": self.state_dict(), "current_task": self._current_task, "label_type": self.get_current_label_type(), "label_dictionary": self.get_current_label_dictionary(), "tars_model": self.tars_model, "num_negative_labels_to_sample": self.num_negative_labels_to_sample, "task_specific_attributes": self._task_specific_attributes, } return model_state @staticmethod def _init_model_with_state_dict(state): # init new TARS classifier label_dictionary = state["label_dictionary"] label_type = "default_label" if not state["label_type"] else state["label_type"] model: TARSClassifier = TARSClassifier( task_name=state["current_task"], label_dictionary=label_dictionary, label_type=label_type, embeddings=state["tars_model"].document_embeddings, num_negative_labels_to_sample=state["num_negative_labels_to_sample"], ) # set all task information model._task_specific_attributes = state["task_specific_attributes"] # linear layers of internal classifier model.load_state_dict(state["state_dict"]) return model @staticmethod def _fetch_model(model_name) -> str: model_map = {} hu_path: str = "https://nlp.informatik.hu-berlin.de/resources/models" model_map["tars-base"] = "/".join([hu_path, "tars-base", "tars-base-v8.pt"]) cache_dir = Path("models") if model_name in model_map: model_name = cached_path(model_map[model_name], cache_dir=cache_dir) return model_name @property def tars_embeddings(self): return self.tars_model.document_embeddings def predict( self, sentences: Union[List[Sentence], Sentence], mini_batch_size=32, return_probabilities_for_all_classes: bool = False, verbose: bool = False, label_name: Optional[str] = None, return_loss=False, embedding_storage_mode="none", label_threshold: float = 0.5, multi_label: Optional[bool] = None, ): """ Predict sequence tags for Named Entity Recognition task :param sentences: a Sentence or a List of Sentence :param mini_batch_size: size of the minibatch, usually bigger is more rapid but consume more memory, up to a point when it has no more effect. :param all_tag_prob: True to compute the score for each tag on each token, otherwise only the score of the best tag is returned :param verbose: set to True to display a progress bar :param return_loss: set to True to return loss :param label_name: set this to change the name of the label type that is predicted :param embedding_storage_mode: default is 'none' which is always best. Only set to 'cpu' or 'gpu' if you wish to not only predict, but also keep the generated embeddings in CPU or GPU memory respectively. 'gpu' to store embeddings in GPU memory. """ if label_name is None: label_name = self.get_current_label_type() if multi_label is None: multi_label = self.is_current_task_multi_label() # with torch.no_grad(): if not sentences: return sentences if isinstance(sentences, Sentence): sentences = [sentences] # set context if not set already previous_sentence = None for sentence in sentences: if sentence.is_context_set(): continue sentence._previous_sentence = previous_sentence sentence._next_sentence = None if previous_sentence: previous_sentence._next_sentence = sentence previous_sentence = sentence reordered_sentences = sorted(sentences, key=lambda s: len(s), reverse=True) dataloader = DataLoader( dataset=FlairDatapointDataset(reordered_sentences), batch_size=mini_batch_size, ) # progress bar for verbosity if verbose: progressbar = tqdm(dataloader) progressbar.set_description("Batch inference") dataloader = progressbar overall_loss = 0 overall_count = 0 batch_no = 0 with torch.no_grad(): for batch in dataloader: batch_no += 1 batch = self._filter_empty_sentences(batch) # stop if all sentences are empty if not batch: continue # go through each sentence in the batch for sentence in batch: # always remove tags first sentence.remove_labels(label_name) all_labels = [label.decode("utf-8") for label in self.get_current_label_dictionary().idx2item] best_label = None for label in all_labels: tars_sentence = self._get_tars_formatted_sentence(label, sentence) loss_and_count = self.tars_model.predict( tars_sentence, label_name=label_name, return_loss=True, return_probabilities_for_all_classes=True if label_threshold < 0.5 else False, ) overall_loss += loss_and_count[0].item() overall_count += loss_and_count[1] # add all labels that according to TARS match the text and are above threshold for predicted_tars_label in tars_sentence.get_labels(label_name): if ( predicted_tars_label.value == self.LABEL_MATCH and predicted_tars_label.score > label_threshold ): # do not add labels below confidence threshold sentence.add_label(label_name, label, predicted_tars_label.score) # only use label with highest confidence if enforcing single-label predictions if not multi_label: if len(sentence.get_labels(label_name)) > 0: # get all label scores and do an argmax to get the best label label_scores = torch.tensor( [label.score for label in sentence.get_labels(label_name)], dtype=torch.float, ) best_label = sentence.get_labels(label_name)[torch.argmax(label_scores)] # remove previously added labels and only add the best label sentence.remove_labels(label_name) sentence.add_label( typename=label_name, value=best_label.value, score=best_label.score, ) # clearing token embeddings to save memory store_embeddings(batch, storage_mode=embedding_storage_mode) if return_loss: return overall_loss, overall_count
__init__
Initializes a TextClassifier :param task_name: a string depicting the name of the task :param label_dictionary: dictionary of labels you want to predict :param embeddings: name of the pre-trained transformer model e.g., 'bert-base-uncased' etc :param num_negative_labels_to_sample: number of negative labels to sample for each positive labels against a sentence during training. Defaults to 2 negative labels for each positive label. The model would sample all the negative labels if None is passed. That slows down the training considerably. :param multi_label: auto-detected by default, but you can set this to True to force multi-label predictionor False to force single-label prediction :param multi_label_threshold: If multi-label you can set the threshold to make predictions :param beta: Parameter for F-beta score for evaluation and training annealing
import logging from collections import OrderedDict from pathlib import Path from typing import List, Optional, Set, Tuple, Union import numpy as np import torch from sklearn.metrics.pairwise import cosine_similarity from sklearn.preprocessing import minmax_scale from tqdm import tqdm import flair from flair.data import Dictionary, Sentence, Span, SpanLabel from flair.datasets import DataLoader, FlairDatapointDataset from flair.embeddings import ( TokenEmbeddings, TransformerDocumentEmbeddings, TransformerWordEmbeddings, ) from flair.file_utils import cached_path from flair.models.sequence_tagger_model import SequenceTagger from flair.models.text_classification_model import TextClassifier from flair.training_utils import store_embeddings log = logging.getLogger("flair") class FewshotClassifier(flair.nn.Classifier[Sentence]): def __init__(self): self._current_task = None self._task_specific_attributes = {} self.label_nearest_map = None self.tars_model: flair.nn.Classifier[Sentence] super(FewshotClassifier, self).__init__() def forward_loss( self, data_points: Union[List[Sentence], Sentence] ) -> Union[torch.Tensor, Tuple[torch.Tensor, int]]: if not isinstance(data_points, list): data_points = [data_points] # Transform input data into TARS format sentences = self._get_tars_formatted_sentences(data_points) loss = self.tars_model.forward_loss(sentences) return loss @property def tars_embeddings(self): raise NotImplementedError def _get_tars_formatted_sentence(self, label, sentence): raise NotImplementedError def _get_tars_formatted_sentences(self, sentences: List[Sentence]): label_text_pairs = [] all_labels = [label.decode("utf-8") for label in self.get_current_label_dictionary().idx2item] for sentence in sentences: label_text_pairs_for_sentence = [] if self.training and self.num_negative_labels_to_sample is not None: positive_labels = list( OrderedDict.fromkeys([label.value for label in sentence.get_labels(self.label_type)]) ) sampled_negative_labels = self._get_nearest_labels_for(positive_labels) for label in positive_labels: label_text_pairs_for_sentence.append(self._get_tars_formatted_sentence(label, sentence)) for label in sampled_negative_labels: label_text_pairs_for_sentence.append(self._get_tars_formatted_sentence(label, sentence)) else: for label in all_labels: label_text_pairs_for_sentence.append(self._get_tars_formatted_sentence(label, sentence)) label_text_pairs.extend(label_text_pairs_for_sentence) return label_text_pairs def _get_nearest_labels_for(self, labels): # if there are no labels, return a random sample as negatives if len(labels) == 0: tags = self.get_current_label_dictionary().get_items() import random sample = random.sample(tags, k=self.num_negative_labels_to_sample) return sample already_sampled_negative_labels = set() # otherwise, go through all labels for label in labels: plausible_labels = [] plausible_label_probabilities = [] for plausible_label in self.label_nearest_map[label]: if plausible_label in already_sampled_negative_labels or plausible_label in labels: continue else: plausible_labels.append(plausible_label) plausible_label_probabilities.append(self.label_nearest_map[label][plausible_label]) # make sure the probabilities always sum up to 1 plausible_label_probabilities = np.array(plausible_label_probabilities, dtype="float64") plausible_label_probabilities += 1e-08 plausible_label_probabilities /= np.sum(plausible_label_probabilities) if len(plausible_labels) > 0: num_samples = min(self.num_negative_labels_to_sample, len(plausible_labels)) sampled_negative_labels = np.random.choice( plausible_labels, num_samples, replace=False, p=plausible_label_probabilities, ) already_sampled_negative_labels.update(sampled_negative_labels) return already_sampled_negative_labels def train(self, mode=True): """Populate label similarity map based on cosine similarity before running epoch If the `num_negative_labels_to_sample` is set to an integer value then before starting each epoch the model would create a similarity measure between the label names based on cosine distances between their BERT encoded embeddings. """ if mode and self.num_negative_labels_to_sample is not None: self._compute_label_similarity_for_current_epoch() super().train(mode) super().train(mode) def _compute_label_similarity_for_current_epoch(self): """ Compute the similarity between all labels for better sampling of negatives """ # get and embed all labels by making a Sentence object that contains only the label text all_labels = [label.decode("utf-8") for label in self.get_current_label_dictionary().idx2item] label_sentences = [Sentence(label) for label in all_labels] self.tars_embeddings.eval() # TODO: check if this is necessary self.tars_embeddings.embed(label_sentences) self.tars_embeddings.train() # get each label embedding and scale between 0 and 1 if isinstance(self.tars_embeddings, TokenEmbeddings): encodings_np = [sentence[0].get_embedding().cpu().detach().numpy() for sentence in label_sentences] else: encodings_np = [sentence.get_embedding().cpu().detach().numpy() for sentence in label_sentences] normalized_encoding = minmax_scale(encodings_np) # compute similarity matrix similarity_matrix = cosine_similarity(normalized_encoding) # the higher the similarity, the greater the chance that a label is # sampled as negative example negative_label_probabilities = {} for row_index, label in enumerate(all_labels): negative_label_probabilities[label] = {} for column_index, other_label in enumerate(all_labels): if label != other_label: negative_label_probabilities[label][other_label] = similarity_matrix[row_index][column_index] self.label_nearest_map = negative_label_probabilities def get_current_label_dictionary(self): label_dictionary = self._task_specific_attributes[self._current_task]["label_dictionary"] return label_dictionary def get_current_label_type(self): return self._task_specific_attributes[self._current_task]["label_type"] def is_current_task_multi_label(self): return self._task_specific_attributes[self._current_task]["multi_label"] def add_and_switch_to_new_task( self, task_name, label_dictionary: Union[List, Set, Dictionary, str], label_type: str, multi_label: bool = True, force_switch: bool = False, ): """ Adds a new task to an existing TARS model. Sets necessary attributes and finally 'switches' to the new task. Parameters are similar to the constructor except for model choice, batch size and negative sampling. This method does not store the resultant model onto disk. :param task_name: a string depicting the name of the task :param label_dictionary: dictionary of the labels you want to predict :param label_type: string to identify the label type ('ner', 'sentiment', etc.) :param multi_label: whether this task is a multi-label prediction problem :param force_switch: if True, will overwrite existing task with same name """ if task_name in self._task_specific_attributes and not force_switch: log.warning("Task `%s` already exists in TARS model. Switching to it.", task_name) else: # make label dictionary if no Dictionary object is passed if isinstance(label_dictionary, Dictionary): label_dictionary = label_dictionary.get_items() if type(label_dictionary) == str: label_dictionary = [label_dictionary] # prepare dictionary of tags (without B- I- prefixes and without UNK) tag_dictionary = Dictionary(add_unk=False) for tag in label_dictionary: if tag == "<unk>" or tag == "O": continue if tag[1] == "-": tag = tag[2:] tag_dictionary.add_item(tag) else: tag_dictionary.add_item(tag) self._task_specific_attributes[task_name] = { "label_dictionary": tag_dictionary, "label_type": label_type, "multi_label": multi_label, } self.switch_to_task(task_name) def list_existing_tasks(self) -> Set[str]: """ Lists existing tasks in the loaded TARS model on the console. """ return set(self._task_specific_attributes.keys()) def switch_to_task(self, task_name): """ Switches to a task which was previously added. """ if task_name not in self._task_specific_attributes: log.error( "Provided `%s` does not exist in the model. Consider calling " "`add_and_switch_to_new_task` first.", task_name, ) else: self._current_task = task_name def _drop_task(self, task_name): if task_name in self._task_specific_attributes: if self._current_task == task_name: log.error( "`%s` is the current task." " Switch to some other task before dropping this.", task_name, ) else: self._task_specific_attributes.pop(task_name) else: log.warning("No task exists with the name `%s`.", task_name) @staticmethod def _filter_empty_sentences(sentences: List[Sentence]) -> List[Sentence]: filtered_sentences = [sentence for sentence in sentences if sentence.tokens] if len(sentences) != len(filtered_sentences): log.warning(f"Ignore {len(sentences) - len(filtered_sentences)} sentence(s) with no tokens.") return filtered_sentences @property def label_type(self): return self.get_current_label_type() def predict_zero_shot( self, sentences: Union[List[Sentence], Sentence], candidate_label_set: Union[List[str], Set[str], str], multi_label: bool = True, ): """ Method to make zero shot predictions from the TARS model :param sentences: input sentence objects to classify :param candidate_label_set: set of candidate labels :param multi_label: indicates whether multi-label or single class prediction. Defaults to True. """ # check if candidate_label_set is empty if candidate_label_set is None or len(candidate_label_set) == 0: log.warning("Provided candidate_label_set is empty") return # make list if only one candidate label is passed if isinstance(candidate_label_set, str): candidate_label_set = {candidate_label_set} # create label dictionary label_dictionary = Dictionary(add_unk=False) for label in candidate_label_set: label_dictionary.add_item(label) # note current task existing_current_task = self._current_task # create a temporary task self.add_and_switch_to_new_task( task_name="ZeroShot", label_dictionary=label_dictionary, label_type="-".join(label_dictionary.get_items()), multi_label=multi_label, ) try: # make zero shot predictions self.predict(sentences) finally: # switch to the pre-existing task self.switch_to_task(existing_current_task) self._drop_task("ZeroShot") return class TARSTagger(FewshotClassifier): """ TARS model for sequence tagging. In the backend, the model uses a BERT based 5-class sequence labeler which given a <label, text> pair predicts the probability for each word to belong to one of the BIOES classes. The input data is a usual Sentence object which is inflated by the model internally before pushing it through the transformer stack of BERT. """ static_label_type = "tars_label" def __init__( self, task_name: Optional[str] = None, label_dictionary: Optional[Dictionary] = None, label_type: Optional[str] = None, embeddings: Union[TransformerWordEmbeddings, str] = "bert-base-uncased", num_negative_labels_to_sample: int = 2, prefix: bool = True, **tagger_args, ): """ Initializes a TextClassifier :param task_name: a string depicting the name of the task :param label_dictionary: dictionary of labels you want to predict :param embeddings: name of the pre-trained transformer model e.g., 'bert-base-uncased' etc :param num_negative_labels_to_sample: number of negative labels to sample for each positive labels against a sentence during training. Defaults to 2 negative labels for each positive label. The model would sample all the negative labels if None is passed. That slows down the training considerably. """ super(TARSTagger, self).__init__() if isinstance(embeddings, str): embeddings = TransformerWordEmbeddings( model=embeddings, fine_tune=True, layers="-1", layer_mean=False, ) # prepare TARS dictionary tars_dictionary = Dictionary(add_unk=False) tars_dictionary.add_item("entity") tars_dictionary.span_labels = True # initialize a bare-bones sequence tagger self.tars_model: SequenceTagger = SequenceTagger( hidden_size=123, embeddings=embeddings, tag_dictionary=tars_dictionary, tag_type=self.static_label_type, use_crf=False, use_rnn=False, reproject_embeddings=False, **tagger_args, ) # transformer separator self.separator = str(self.tars_embeddings.tokenizer.sep_token) if self.tars_embeddings.tokenizer._bos_token: self.separator += str(self.tars_embeddings.tokenizer.bos_token) self.prefix = prefix self.num_negative_labels_to_sample = num_negative_labels_to_sample if task_name and label_dictionary and label_type: # Store task specific labels since TARS can handle multiple tasks self.add_and_switch_to_new_task(task_name, label_dictionary, label_type) else: log.info( "TARS initialized without a task. You need to call .add_and_switch_to_new_task() " "before training this model" ) def _get_tars_formatted_sentence(self, label, sentence): original_text = sentence.to_tokenized_string() label_text_pair = ( f"{label} {self.separator} {original_text}" if self.prefix else f"{original_text} {self.separator} {label}" ) label_length = 0 if not self.prefix else len(label.split(" ")) + len(self.separator.split(" ")) # make a tars sentence where all labels are O by default tars_sentence = Sentence(label_text_pair, use_tokenizer=False) for entity_label in sentence.get_labels(self.label_type): if entity_label.value == label: new_span = [tars_sentence.get_token(token.idx + label_length) for token in entity_label.span] tars_sentence.add_complex_label(self.static_label_type, SpanLabel(Span(new_span), value="entity")) return tars_sentence def _get_state_dict(self): model_state = { "state_dict": self.state_dict(), "current_task": self._current_task, "tag_type": self.get_current_label_type(), "tag_dictionary": self.get_current_label_dictionary(), "tars_model": self.tars_model, "num_negative_labels_to_sample": self.num_negative_labels_to_sample, "prefix": self.prefix, "task_specific_attributes": self._task_specific_attributes, } return model_state @staticmethod def _fetch_model(model_name) -> str: if model_name == "tars-ner": cache_dir = Path("models") model_name = cached_path( "https://nlp.informatik.hu-berlin.de/resources/models/tars-ner/tars-ner.pt", cache_dir=cache_dir, ) return model_name @staticmethod def _init_model_with_state_dict(state): # init new TARS classifier model = TARSTagger( task_name=state["current_task"], label_dictionary=state["tag_dictionary"], label_type=state["tag_type"], embeddings=state["tars_model"].embeddings, num_negative_labels_to_sample=state["num_negative_labels_to_sample"], prefix=state["prefix"], ) # set all task information model._task_specific_attributes = state["task_specific_attributes"] # linear layers of internal classifier model.load_state_dict(state["state_dict"]) return model @property def tars_embeddings(self): return self.tars_model.embeddings def predict( self, sentences: Union[List[Sentence], Sentence], mini_batch_size=32, return_probabilities_for_all_classes: bool = False, verbose: bool = False, label_name: Optional[str] = None, return_loss=False, embedding_storage_mode="none", most_probable_first: bool = True, ): # return """ Predict sequence tags for Named Entity Recognition task :param sentences: a Sentence or a List of Sentence :param mini_batch_size: size of the minibatch, usually bigger is more rapid but consume more memory, up to a point when it has no more effect. :param all_tag_prob: True to compute the score for each tag on each token, otherwise only the score of the best tag is returned :param verbose: set to True to display a progress bar :param return_loss: set to True to return loss :param label_name: set this to change the name of the label type that is predicted :param embedding_storage_mode: default is 'none' which is always best. Only set to 'cpu' or 'gpu' if you wish to not only predict, but also keep the generated embeddings in CPU or GPU memory respectively. 'gpu' to store embeddings in GPU memory. """ if label_name is None: label_name = self.get_current_label_type() # with torch.no_grad(): if not sentences: return sentences if not isinstance(sentences, list): sentences = [sentences] reordered_sentences = sorted(sentences, key=lambda s: len(s), reverse=True) dataloader = DataLoader( dataset=FlairDatapointDataset(reordered_sentences), batch_size=mini_batch_size, ) # progress bar for verbosity if verbose: dataloader = tqdm(dataloader) overall_loss = 0 overall_count = 0 with torch.no_grad(): for batch in dataloader: batch = self._filter_empty_sentences(batch) # stop if all sentences are empty if not batch: continue # go through each sentence in the batch for sentence in batch: # always remove tags first sentence.remove_labels(label_name) all_labels = [label.decode("utf-8") for label in self.get_current_label_dictionary().idx2item] all_detected = {} for label in all_labels: tars_sentence = self._get_tars_formatted_sentence(label, sentence) loss_and_count = self.tars_model.predict( tars_sentence, label_name=label_name, return_loss=True, ) overall_loss += loss_and_count[0].item() overall_count += loss_and_count[1] for predicted in tars_sentence.get_labels(label_name): predicted.value = label all_detected[predicted] = predicted.score if most_probable_first: import operator already_set_indices: List[int] = [] sorted_x = sorted(all_detected.items(), key=operator.itemgetter(1)) sorted_x.reverse() for tuple in sorted_x: # get the span and its label label = tuple[0] # label = span.get_labels("tars_temp_label")[0].value label_length = ( 0 if not self.prefix else len(label.value.split(" ")) + len(self.separator.split(" ")) ) # determine whether tokens in this span already have a label tag_this = True for token in label.span: corresponding_token = sentence.get_token(token.idx - label_length) if corresponding_token is None: tag_this = False continue if token.idx in already_set_indices: tag_this = False continue # only add if all tokens have no label if tag_this: already_set_indices.extend(token.idx for token in label.span) predicted_span = [sentence.get_token(token.idx - label_length) for token in label.span] sentence.add_complex_label( label_name, label=SpanLabel(Span(predicted_span), value=label.value, score=label.score), ) # clearing token embeddings to save memory store_embeddings(batch, storage_mode=embedding_storage_mode) if return_loss: return overall_loss, overall_count class TARSClassifier(FewshotClassifier): """ TARS model for text classification. In the backend, the model uses a BERT based binary text classifier which given a <label, text> pair predicts the probability of two classes "True", and "False". The input data is a usual Sentence object which is inflated by the model internally before pushing it through the transformer stack of BERT. """ static_label_type = "tars_label" LABEL_MATCH = "YES" LABEL_NO_MATCH = "NO" # MASKED: __init__ function (lines 595-660) def _clean(self, label_value: str) -> str: if self.clean_up_labels: return label_value.replace("_", " ") else: return label_value def _get_tars_formatted_sentence(self, label, sentence): label = self._clean(label) original_text = sentence.to_tokenized_string() label_text_pair = ( f"{label} {self.separator} {original_text}" if self.prefix else f"{original_text} {self.separator} {label}" ) sentence_labels = [self._clean(label.value) for label in sentence.get_labels(self.get_current_label_type())] tars_label = self.LABEL_MATCH if label in sentence_labels else self.LABEL_NO_MATCH tars_sentence = Sentence(label_text_pair, use_tokenizer=False).add_label(self.static_label_type, tars_label) return tars_sentence def _get_state_dict(self): model_state = { "state_dict": self.state_dict(), "current_task": self._current_task, "label_type": self.get_current_label_type(), "label_dictionary": self.get_current_label_dictionary(), "tars_model": self.tars_model, "num_negative_labels_to_sample": self.num_negative_labels_to_sample, "task_specific_attributes": self._task_specific_attributes, } return model_state @staticmethod def _init_model_with_state_dict(state): # init new TARS classifier label_dictionary = state["label_dictionary"] label_type = "default_label" if not state["label_type"] else state["label_type"] model: TARSClassifier = TARSClassifier( task_name=state["current_task"], label_dictionary=label_dictionary, label_type=label_type, embeddings=state["tars_model"].document_embeddings, num_negative_labels_to_sample=state["num_negative_labels_to_sample"], ) # set all task information model._task_specific_attributes = state["task_specific_attributes"] # linear layers of internal classifier model.load_state_dict(state["state_dict"]) return model @staticmethod def _fetch_model(model_name) -> str: model_map = {} hu_path: str = "https://nlp.informatik.hu-berlin.de/resources/models" model_map["tars-base"] = "/".join([hu_path, "tars-base", "tars-base-v8.pt"]) cache_dir = Path("models") if model_name in model_map: model_name = cached_path(model_map[model_name], cache_dir=cache_dir) return model_name @property def tars_embeddings(self): return self.tars_model.document_embeddings def predict( self, sentences: Union[List[Sentence], Sentence], mini_batch_size=32, return_probabilities_for_all_classes: bool = False, verbose: bool = False, label_name: Optional[str] = None, return_loss=False, embedding_storage_mode="none", label_threshold: float = 0.5, multi_label: Optional[bool] = None, ): """ Predict sequence tags for Named Entity Recognition task :param sentences: a Sentence or a List of Sentence :param mini_batch_size: size of the minibatch, usually bigger is more rapid but consume more memory, up to a point when it has no more effect. :param all_tag_prob: True to compute the score for each tag on each token, otherwise only the score of the best tag is returned :param verbose: set to True to display a progress bar :param return_loss: set to True to return loss :param label_name: set this to change the name of the label type that is predicted :param embedding_storage_mode: default is 'none' which is always best. Only set to 'cpu' or 'gpu' if you wish to not only predict, but also keep the generated embeddings in CPU or GPU memory respectively. 'gpu' to store embeddings in GPU memory. """ if label_name is None: label_name = self.get_current_label_type() if multi_label is None: multi_label = self.is_current_task_multi_label() # with torch.no_grad(): if not sentences: return sentences if isinstance(sentences, Sentence): sentences = [sentences] # set context if not set already previous_sentence = None for sentence in sentences: if sentence.is_context_set(): continue sentence._previous_sentence = previous_sentence sentence._next_sentence = None if previous_sentence: previous_sentence._next_sentence = sentence previous_sentence = sentence reordered_sentences = sorted(sentences, key=lambda s: len(s), reverse=True) dataloader = DataLoader( dataset=FlairDatapointDataset(reordered_sentences), batch_size=mini_batch_size, ) # progress bar for verbosity if verbose: progressbar = tqdm(dataloader) progressbar.set_description("Batch inference") dataloader = progressbar overall_loss = 0 overall_count = 0 batch_no = 0 with torch.no_grad(): for batch in dataloader: batch_no += 1 batch = self._filter_empty_sentences(batch) # stop if all sentences are empty if not batch: continue # go through each sentence in the batch for sentence in batch: # always remove tags first sentence.remove_labels(label_name) all_labels = [label.decode("utf-8") for label in self.get_current_label_dictionary().idx2item] best_label = None for label in all_labels: tars_sentence = self._get_tars_formatted_sentence(label, sentence) loss_and_count = self.tars_model.predict( tars_sentence, label_name=label_name, return_loss=True, return_probabilities_for_all_classes=True if label_threshold < 0.5 else False, ) overall_loss += loss_and_count[0].item() overall_count += loss_and_count[1] # add all labels that according to TARS match the text and are above threshold for predicted_tars_label in tars_sentence.get_labels(label_name): if ( predicted_tars_label.value == self.LABEL_MATCH and predicted_tars_label.score > label_threshold ): # do not add labels below confidence threshold sentence.add_label(label_name, label, predicted_tars_label.score) # only use label with highest confidence if enforcing single-label predictions if not multi_label: if len(sentence.get_labels(label_name)) > 0: # get all label scores and do an argmax to get the best label label_scores = torch.tensor( [label.score for label in sentence.get_labels(label_name)], dtype=torch.float, ) best_label = sentence.get_labels(label_name)[torch.argmax(label_scores)] # remove previously added labels and only add the best label sentence.remove_labels(label_name) sentence.add_label( typename=label_name, value=best_label.value, score=best_label.score, ) # clearing token embeddings to save memory store_embeddings(batch, storage_mode=embedding_storage_mode) if return_loss: return overall_loss, overall_count
def __init__( self, task_name: Optional[str] = None, label_dictionary: Optional[Dictionary] = None, label_type: Optional[str] = None, embeddings: Union[TransformerDocumentEmbeddings, str] = "bert-base-uncased", num_negative_labels_to_sample: int = 2, prefix: bool = True, **tagger_args, ): """ Initializes a TextClassifier :param task_name: a string depicting the name of the task :param label_dictionary: dictionary of labels you want to predict :param embeddings: name of the pre-trained transformer model e.g., 'bert-base-uncased' etc :param num_negative_labels_to_sample: number of negative labels to sample for each positive labels against a sentence during training. Defaults to 2 negative labels for each positive label. The model would sample all the negative labels if None is passed. That slows down the training considerably. :param multi_label: auto-detected by default, but you can set this to True to force multi-label predictionor False to force single-label prediction :param multi_label_threshold: If multi-label you can set the threshold to make predictions :param beta: Parameter for F-beta score for evaluation and training annealing """ super(TARSClassifier, self).__init__() if isinstance(embeddings, str): embeddings = TransformerDocumentEmbeddings( model=embeddings, fine_tune=True, layers="-1", layer_mean=False, ) # prepare TARS dictionary tars_dictionary = Dictionary(add_unk=False) tars_dictionary.add_item(self.LABEL_NO_MATCH) tars_dictionary.add_item(self.LABEL_MATCH) # initialize a bare-bones sequence tagger self.tars_model = TextClassifier( document_embeddings=embeddings, label_dictionary=tars_dictionary, label_type=self.static_label_type, **tagger_args, ) # transformer separator self.separator = str(self.tars_embeddings.tokenizer.sep_token) if self.tars_embeddings.tokenizer._bos_token: self.separator += str(self.tars_embeddings.tokenizer.bos_token) self.prefix = prefix self.num_negative_labels_to_sample = num_negative_labels_to_sample if task_name and label_dictionary and label_type: # Store task specific labels since TARS can handle multiple tasks self.add_and_switch_to_new_task(task_name, label_dictionary, label_type) else: log.info( "TARS initialized without a task. You need to call .add_and_switch_to_new_task() " "before training this model" ) self.clean_up_labels = True
595
660
import logging from collections import OrderedDict from pathlib import Path from typing import List, Optional, Set, Tuple, Union import numpy as np import torch from sklearn.metrics.pairwise import cosine_similarity from sklearn.preprocessing import minmax_scale from tqdm import tqdm import flair from flair.data import Dictionary, Sentence, Span, SpanLabel from flair.datasets import DataLoader, FlairDatapointDataset from flair.embeddings import ( TokenEmbeddings, TransformerDocumentEmbeddings, TransformerWordEmbeddings, ) from flair.file_utils import cached_path from flair.models.sequence_tagger_model import SequenceTagger from flair.models.text_classification_model import TextClassifier from flair.training_utils import store_embeddings log = logging.getLogger("flair") class FewshotClassifier(flair.nn.Classifier[Sentence]): def __init__(self): self._current_task = None self._task_specific_attributes = {} self.label_nearest_map = None self.tars_model: flair.nn.Classifier[Sentence] super(FewshotClassifier, self).__init__() def forward_loss( self, data_points: Union[List[Sentence], Sentence] ) -> Union[torch.Tensor, Tuple[torch.Tensor, int]]: if not isinstance(data_points, list): data_points = [data_points] # Transform input data into TARS format sentences = self._get_tars_formatted_sentences(data_points) loss = self.tars_model.forward_loss(sentences) return loss @property def tars_embeddings(self): raise NotImplementedError def _get_tars_formatted_sentence(self, label, sentence): raise NotImplementedError def _get_tars_formatted_sentences(self, sentences: List[Sentence]): label_text_pairs = [] all_labels = [label.decode("utf-8") for label in self.get_current_label_dictionary().idx2item] for sentence in sentences: label_text_pairs_for_sentence = [] if self.training and self.num_negative_labels_to_sample is not None: positive_labels = list( OrderedDict.fromkeys([label.value for label in sentence.get_labels(self.label_type)]) ) sampled_negative_labels = self._get_nearest_labels_for(positive_labels) for label in positive_labels: label_text_pairs_for_sentence.append(self._get_tars_formatted_sentence(label, sentence)) for label in sampled_negative_labels: label_text_pairs_for_sentence.append(self._get_tars_formatted_sentence(label, sentence)) else: for label in all_labels: label_text_pairs_for_sentence.append(self._get_tars_formatted_sentence(label, sentence)) label_text_pairs.extend(label_text_pairs_for_sentence) return label_text_pairs def _get_nearest_labels_for(self, labels): # if there are no labels, return a random sample as negatives if len(labels) == 0: tags = self.get_current_label_dictionary().get_items() import random sample = random.sample(tags, k=self.num_negative_labels_to_sample) return sample already_sampled_negative_labels = set() # otherwise, go through all labels for label in labels: plausible_labels = [] plausible_label_probabilities = [] for plausible_label in self.label_nearest_map[label]: if plausible_label in already_sampled_negative_labels or plausible_label in labels: continue else: plausible_labels.append(plausible_label) plausible_label_probabilities.append(self.label_nearest_map[label][plausible_label]) # make sure the probabilities always sum up to 1 plausible_label_probabilities = np.array(plausible_label_probabilities, dtype="float64") plausible_label_probabilities += 1e-08 plausible_label_probabilities /= np.sum(plausible_label_probabilities) if len(plausible_labels) > 0: num_samples = min(self.num_negative_labels_to_sample, len(plausible_labels)) sampled_negative_labels = np.random.choice( plausible_labels, num_samples, replace=False, p=plausible_label_probabilities, ) already_sampled_negative_labels.update(sampled_negative_labels) return already_sampled_negative_labels def train(self, mode=True): """Populate label similarity map based on cosine similarity before running epoch If the `num_negative_labels_to_sample` is set to an integer value then before starting each epoch the model would create a similarity measure between the label names based on cosine distances between their BERT encoded embeddings. """ if mode and self.num_negative_labels_to_sample is not None: self._compute_label_similarity_for_current_epoch() super().train(mode) super().train(mode) def _compute_label_similarity_for_current_epoch(self): """ Compute the similarity between all labels for better sampling of negatives """ # get and embed all labels by making a Sentence object that contains only the label text all_labels = [label.decode("utf-8") for label in self.get_current_label_dictionary().idx2item] label_sentences = [Sentence(label) for label in all_labels] self.tars_embeddings.eval() # TODO: check if this is necessary self.tars_embeddings.embed(label_sentences) self.tars_embeddings.train() # get each label embedding and scale between 0 and 1 if isinstance(self.tars_embeddings, TokenEmbeddings): encodings_np = [sentence[0].get_embedding().cpu().detach().numpy() for sentence in label_sentences] else: encodings_np = [sentence.get_embedding().cpu().detach().numpy() for sentence in label_sentences] normalized_encoding = minmax_scale(encodings_np) # compute similarity matrix similarity_matrix = cosine_similarity(normalized_encoding) # the higher the similarity, the greater the chance that a label is # sampled as negative example negative_label_probabilities = {} for row_index, label in enumerate(all_labels): negative_label_probabilities[label] = {} for column_index, other_label in enumerate(all_labels): if label != other_label: negative_label_probabilities[label][other_label] = similarity_matrix[row_index][column_index] self.label_nearest_map = negative_label_probabilities def get_current_label_dictionary(self): label_dictionary = self._task_specific_attributes[self._current_task]["label_dictionary"] return label_dictionary def get_current_label_type(self): return self._task_specific_attributes[self._current_task]["label_type"] def is_current_task_multi_label(self): return self._task_specific_attributes[self._current_task]["multi_label"] def add_and_switch_to_new_task( self, task_name, label_dictionary: Union[List, Set, Dictionary, str], label_type: str, multi_label: bool = True, force_switch: bool = False, ): """ Adds a new task to an existing TARS model. Sets necessary attributes and finally 'switches' to the new task. Parameters are similar to the constructor except for model choice, batch size and negative sampling. This method does not store the resultant model onto disk. :param task_name: a string depicting the name of the task :param label_dictionary: dictionary of the labels you want to predict :param label_type: string to identify the label type ('ner', 'sentiment', etc.) :param multi_label: whether this task is a multi-label prediction problem :param force_switch: if True, will overwrite existing task with same name """ if task_name in self._task_specific_attributes and not force_switch: log.warning("Task `%s` already exists in TARS model. Switching to it.", task_name) else: # make label dictionary if no Dictionary object is passed if isinstance(label_dictionary, Dictionary): label_dictionary = label_dictionary.get_items() if type(label_dictionary) == str: label_dictionary = [label_dictionary] # prepare dictionary of tags (without B- I- prefixes and without UNK) tag_dictionary = Dictionary(add_unk=False) for tag in label_dictionary: if tag == "<unk>" or tag == "O": continue if tag[1] == "-": tag = tag[2:] tag_dictionary.add_item(tag) else: tag_dictionary.add_item(tag) self._task_specific_attributes[task_name] = { "label_dictionary": tag_dictionary, "label_type": label_type, "multi_label": multi_label, } self.switch_to_task(task_name) def list_existing_tasks(self) -> Set[str]: """ Lists existing tasks in the loaded TARS model on the console. """ return set(self._task_specific_attributes.keys()) def switch_to_task(self, task_name): """ Switches to a task which was previously added. """ if task_name not in self._task_specific_attributes: log.error( "Provided `%s` does not exist in the model. Consider calling " "`add_and_switch_to_new_task` first.", task_name, ) else: self._current_task = task_name def _drop_task(self, task_name): if task_name in self._task_specific_attributes: if self._current_task == task_name: log.error( "`%s` is the current task." " Switch to some other task before dropping this.", task_name, ) else: self._task_specific_attributes.pop(task_name) else: log.warning("No task exists with the name `%s`.", task_name) @staticmethod def _filter_empty_sentences(sentences: List[Sentence]) -> List[Sentence]: filtered_sentences = [sentence for sentence in sentences if sentence.tokens] if len(sentences) != len(filtered_sentences): log.warning(f"Ignore {len(sentences) - len(filtered_sentences)} sentence(s) with no tokens.") return filtered_sentences @property def label_type(self): return self.get_current_label_type() def predict_zero_shot( self, sentences: Union[List[Sentence], Sentence], candidate_label_set: Union[List[str], Set[str], str], multi_label: bool = True, ): """ Method to make zero shot predictions from the TARS model :param sentences: input sentence objects to classify :param candidate_label_set: set of candidate labels :param multi_label: indicates whether multi-label or single class prediction. Defaults to True. """ # check if candidate_label_set is empty if candidate_label_set is None or len(candidate_label_set) == 0: log.warning("Provided candidate_label_set is empty") return # make list if only one candidate label is passed if isinstance(candidate_label_set, str): candidate_label_set = {candidate_label_set} # create label dictionary label_dictionary = Dictionary(add_unk=False) for label in candidate_label_set: label_dictionary.add_item(label) # note current task existing_current_task = self._current_task # create a temporary task self.add_and_switch_to_new_task( task_name="ZeroShot", label_dictionary=label_dictionary, label_type="-".join(label_dictionary.get_items()), multi_label=multi_label, ) try: # make zero shot predictions self.predict(sentences) finally: # switch to the pre-existing task self.switch_to_task(existing_current_task) self._drop_task("ZeroShot") return class TARSTagger(FewshotClassifier): """ TARS model for sequence tagging. In the backend, the model uses a BERT based 5-class sequence labeler which given a <label, text> pair predicts the probability for each word to belong to one of the BIOES classes. The input data is a usual Sentence object which is inflated by the model internally before pushing it through the transformer stack of BERT. """ static_label_type = "tars_label" def __init__( self, task_name: Optional[str] = None, label_dictionary: Optional[Dictionary] = None, label_type: Optional[str] = None, embeddings: Union[TransformerWordEmbeddings, str] = "bert-base-uncased", num_negative_labels_to_sample: int = 2, prefix: bool = True, **tagger_args, ): """ Initializes a TextClassifier :param task_name: a string depicting the name of the task :param label_dictionary: dictionary of labels you want to predict :param embeddings: name of the pre-trained transformer model e.g., 'bert-base-uncased' etc :param num_negative_labels_to_sample: number of negative labels to sample for each positive labels against a sentence during training. Defaults to 2 negative labels for each positive label. The model would sample all the negative labels if None is passed. That slows down the training considerably. """ super(TARSTagger, self).__init__() if isinstance(embeddings, str): embeddings = TransformerWordEmbeddings( model=embeddings, fine_tune=True, layers="-1", layer_mean=False, ) # prepare TARS dictionary tars_dictionary = Dictionary(add_unk=False) tars_dictionary.add_item("entity") tars_dictionary.span_labels = True # initialize a bare-bones sequence tagger self.tars_model: SequenceTagger = SequenceTagger( hidden_size=123, embeddings=embeddings, tag_dictionary=tars_dictionary, tag_type=self.static_label_type, use_crf=False, use_rnn=False, reproject_embeddings=False, **tagger_args, ) # transformer separator self.separator = str(self.tars_embeddings.tokenizer.sep_token) if self.tars_embeddings.tokenizer._bos_token: self.separator += str(self.tars_embeddings.tokenizer.bos_token) self.prefix = prefix self.num_negative_labels_to_sample = num_negative_labels_to_sample if task_name and label_dictionary and label_type: # Store task specific labels since TARS can handle multiple tasks self.add_and_switch_to_new_task(task_name, label_dictionary, label_type) else: log.info( "TARS initialized without a task. You need to call .add_and_switch_to_new_task() " "before training this model" ) def _get_tars_formatted_sentence(self, label, sentence): original_text = sentence.to_tokenized_string() label_text_pair = ( f"{label} {self.separator} {original_text}" if self.prefix else f"{original_text} {self.separator} {label}" ) label_length = 0 if not self.prefix else len(label.split(" ")) + len(self.separator.split(" ")) # make a tars sentence where all labels are O by default tars_sentence = Sentence(label_text_pair, use_tokenizer=False) for entity_label in sentence.get_labels(self.label_type): if entity_label.value == label: new_span = [tars_sentence.get_token(token.idx + label_length) for token in entity_label.span] tars_sentence.add_complex_label(self.static_label_type, SpanLabel(Span(new_span), value="entity")) return tars_sentence def _get_state_dict(self): model_state = { "state_dict": self.state_dict(), "current_task": self._current_task, "tag_type": self.get_current_label_type(), "tag_dictionary": self.get_current_label_dictionary(), "tars_model": self.tars_model, "num_negative_labels_to_sample": self.num_negative_labels_to_sample, "prefix": self.prefix, "task_specific_attributes": self._task_specific_attributes, } return model_state @staticmethod def _fetch_model(model_name) -> str: if model_name == "tars-ner": cache_dir = Path("models") model_name = cached_path( "https://nlp.informatik.hu-berlin.de/resources/models/tars-ner/tars-ner.pt", cache_dir=cache_dir, ) return model_name @staticmethod def _init_model_with_state_dict(state): # init new TARS classifier model = TARSTagger( task_name=state["current_task"], label_dictionary=state["tag_dictionary"], label_type=state["tag_type"], embeddings=state["tars_model"].embeddings, num_negative_labels_to_sample=state["num_negative_labels_to_sample"], prefix=state["prefix"], ) # set all task information model._task_specific_attributes = state["task_specific_attributes"] # linear layers of internal classifier model.load_state_dict(state["state_dict"]) return model @property def tars_embeddings(self): return self.tars_model.embeddings def predict( self, sentences: Union[List[Sentence], Sentence], mini_batch_size=32, return_probabilities_for_all_classes: bool = False, verbose: bool = False, label_name: Optional[str] = None, return_loss=False, embedding_storage_mode="none", most_probable_first: bool = True, ): # return """ Predict sequence tags for Named Entity Recognition task :param sentences: a Sentence or a List of Sentence :param mini_batch_size: size of the minibatch, usually bigger is more rapid but consume more memory, up to a point when it has no more effect. :param all_tag_prob: True to compute the score for each tag on each token, otherwise only the score of the best tag is returned :param verbose: set to True to display a progress bar :param return_loss: set to True to return loss :param label_name: set this to change the name of the label type that is predicted :param embedding_storage_mode: default is 'none' which is always best. Only set to 'cpu' or 'gpu' if you wish to not only predict, but also keep the generated embeddings in CPU or GPU memory respectively. 'gpu' to store embeddings in GPU memory. """ if label_name is None: label_name = self.get_current_label_type() # with torch.no_grad(): if not sentences: return sentences if not isinstance(sentences, list): sentences = [sentences] reordered_sentences = sorted(sentences, key=lambda s: len(s), reverse=True) dataloader = DataLoader( dataset=FlairDatapointDataset(reordered_sentences), batch_size=mini_batch_size, ) # progress bar for verbosity if verbose: dataloader = tqdm(dataloader) overall_loss = 0 overall_count = 0 with torch.no_grad(): for batch in dataloader: batch = self._filter_empty_sentences(batch) # stop if all sentences are empty if not batch: continue # go through each sentence in the batch for sentence in batch: # always remove tags first sentence.remove_labels(label_name) all_labels = [label.decode("utf-8") for label in self.get_current_label_dictionary().idx2item] all_detected = {} for label in all_labels: tars_sentence = self._get_tars_formatted_sentence(label, sentence) loss_and_count = self.tars_model.predict( tars_sentence, label_name=label_name, return_loss=True, ) overall_loss += loss_and_count[0].item() overall_count += loss_and_count[1] for predicted in tars_sentence.get_labels(label_name): predicted.value = label all_detected[predicted] = predicted.score if most_probable_first: import operator already_set_indices: List[int] = [] sorted_x = sorted(all_detected.items(), key=operator.itemgetter(1)) sorted_x.reverse() for tuple in sorted_x: # get the span and its label label = tuple[0] # label = span.get_labels("tars_temp_label")[0].value label_length = ( 0 if not self.prefix else len(label.value.split(" ")) + len(self.separator.split(" ")) ) # determine whether tokens in this span already have a label tag_this = True for token in label.span: corresponding_token = sentence.get_token(token.idx - label_length) if corresponding_token is None: tag_this = False continue if token.idx in already_set_indices: tag_this = False continue # only add if all tokens have no label if tag_this: already_set_indices.extend(token.idx for token in label.span) predicted_span = [sentence.get_token(token.idx - label_length) for token in label.span] sentence.add_complex_label( label_name, label=SpanLabel(Span(predicted_span), value=label.value, score=label.score), ) # clearing token embeddings to save memory store_embeddings(batch, storage_mode=embedding_storage_mode) if return_loss: return overall_loss, overall_count class TARSClassifier(FewshotClassifier): """ TARS model for text classification. In the backend, the model uses a BERT based binary text classifier which given a <label, text> pair predicts the probability of two classes "True", and "False". The input data is a usual Sentence object which is inflated by the model internally before pushing it through the transformer stack of BERT. """ static_label_type = "tars_label" LABEL_MATCH = "YES" LABEL_NO_MATCH = "NO" def __init__( self, task_name: Optional[str] = None, label_dictionary: Optional[Dictionary] = None, label_type: Optional[str] = None, embeddings: Union[TransformerDocumentEmbeddings, str] = "bert-base-uncased", num_negative_labels_to_sample: int = 2, prefix: bool = True, **tagger_args, ): """ Initializes a TextClassifier :param task_name: a string depicting the name of the task :param label_dictionary: dictionary of labels you want to predict :param embeddings: name of the pre-trained transformer model e.g., 'bert-base-uncased' etc :param num_negative_labels_to_sample: number of negative labels to sample for each positive labels against a sentence during training. Defaults to 2 negative labels for each positive label. The model would sample all the negative labels if None is passed. That slows down the training considerably. :param multi_label: auto-detected by default, but you can set this to True to force multi-label predictionor False to force single-label prediction :param multi_label_threshold: If multi-label you can set the threshold to make predictions :param beta: Parameter for F-beta score for evaluation and training annealing """ super(TARSClassifier, self).__init__() if isinstance(embeddings, str): embeddings = TransformerDocumentEmbeddings( model=embeddings, fine_tune=True, layers="-1", layer_mean=False, ) # prepare TARS dictionary tars_dictionary = Dictionary(add_unk=False) tars_dictionary.add_item(self.LABEL_NO_MATCH) tars_dictionary.add_item(self.LABEL_MATCH) # initialize a bare-bones sequence tagger self.tars_model = TextClassifier( document_embeddings=embeddings, label_dictionary=tars_dictionary, label_type=self.static_label_type, **tagger_args, ) # transformer separator self.separator = str(self.tars_embeddings.tokenizer.sep_token) if self.tars_embeddings.tokenizer._bos_token: self.separator += str(self.tars_embeddings.tokenizer.bos_token) self.prefix = prefix self.num_negative_labels_to_sample = num_negative_labels_to_sample if task_name and label_dictionary and label_type: # Store task specific labels since TARS can handle multiple tasks self.add_and_switch_to_new_task(task_name, label_dictionary, label_type) else: log.info( "TARS initialized without a task. You need to call .add_and_switch_to_new_task() " "before training this model" ) self.clean_up_labels = True def _clean(self, label_value: str) -> str: if self.clean_up_labels: return label_value.replace("_", " ") else: return label_value def _get_tars_formatted_sentence(self, label, sentence): label = self._clean(label) original_text = sentence.to_tokenized_string() label_text_pair = ( f"{label} {self.separator} {original_text}" if self.prefix else f"{original_text} {self.separator} {label}" ) sentence_labels = [self._clean(label.value) for label in sentence.get_labels(self.get_current_label_type())] tars_label = self.LABEL_MATCH if label in sentence_labels else self.LABEL_NO_MATCH tars_sentence = Sentence(label_text_pair, use_tokenizer=False).add_label(self.static_label_type, tars_label) return tars_sentence def _get_state_dict(self): model_state = { "state_dict": self.state_dict(), "current_task": self._current_task, "label_type": self.get_current_label_type(), "label_dictionary": self.get_current_label_dictionary(), "tars_model": self.tars_model, "num_negative_labels_to_sample": self.num_negative_labels_to_sample, "task_specific_attributes": self._task_specific_attributes, } return model_state @staticmethod def _init_model_with_state_dict(state): # init new TARS classifier label_dictionary = state["label_dictionary"] label_type = "default_label" if not state["label_type"] else state["label_type"] model: TARSClassifier = TARSClassifier( task_name=state["current_task"], label_dictionary=label_dictionary, label_type=label_type, embeddings=state["tars_model"].document_embeddings, num_negative_labels_to_sample=state["num_negative_labels_to_sample"], ) # set all task information model._task_specific_attributes = state["task_specific_attributes"] # linear layers of internal classifier model.load_state_dict(state["state_dict"]) return model @staticmethod def _fetch_model(model_name) -> str: model_map = {} hu_path: str = "https://nlp.informatik.hu-berlin.de/resources/models" model_map["tars-base"] = "/".join([hu_path, "tars-base", "tars-base-v8.pt"]) cache_dir = Path("models") if model_name in model_map: model_name = cached_path(model_map[model_name], cache_dir=cache_dir) return model_name @property def tars_embeddings(self): return self.tars_model.document_embeddings def predict( self, sentences: Union[List[Sentence], Sentence], mini_batch_size=32, return_probabilities_for_all_classes: bool = False, verbose: bool = False, label_name: Optional[str] = None, return_loss=False, embedding_storage_mode="none", label_threshold: float = 0.5, multi_label: Optional[bool] = None, ): """ Predict sequence tags for Named Entity Recognition task :param sentences: a Sentence or a List of Sentence :param mini_batch_size: size of the minibatch, usually bigger is more rapid but consume more memory, up to a point when it has no more effect. :param all_tag_prob: True to compute the score for each tag on each token, otherwise only the score of the best tag is returned :param verbose: set to True to display a progress bar :param return_loss: set to True to return loss :param label_name: set this to change the name of the label type that is predicted :param embedding_storage_mode: default is 'none' which is always best. Only set to 'cpu' or 'gpu' if you wish to not only predict, but also keep the generated embeddings in CPU or GPU memory respectively. 'gpu' to store embeddings in GPU memory. """ if label_name is None: label_name = self.get_current_label_type() if multi_label is None: multi_label = self.is_current_task_multi_label() # with torch.no_grad(): if not sentences: return sentences if isinstance(sentences, Sentence): sentences = [sentences] # set context if not set already previous_sentence = None for sentence in sentences: if sentence.is_context_set(): continue sentence._previous_sentence = previous_sentence sentence._next_sentence = None if previous_sentence: previous_sentence._next_sentence = sentence previous_sentence = sentence reordered_sentences = sorted(sentences, key=lambda s: len(s), reverse=True) dataloader = DataLoader( dataset=FlairDatapointDataset(reordered_sentences), batch_size=mini_batch_size, ) # progress bar for verbosity if verbose: progressbar = tqdm(dataloader) progressbar.set_description("Batch inference") dataloader = progressbar overall_loss = 0 overall_count = 0 batch_no = 0 with torch.no_grad(): for batch in dataloader: batch_no += 1 batch = self._filter_empty_sentences(batch) # stop if all sentences are empty if not batch: continue # go through each sentence in the batch for sentence in batch: # always remove tags first sentence.remove_labels(label_name) all_labels = [label.decode("utf-8") for label in self.get_current_label_dictionary().idx2item] best_label = None for label in all_labels: tars_sentence = self._get_tars_formatted_sentence(label, sentence) loss_and_count = self.tars_model.predict( tars_sentence, label_name=label_name, return_loss=True, return_probabilities_for_all_classes=True if label_threshold < 0.5 else False, ) overall_loss += loss_and_count[0].item() overall_count += loss_and_count[1] # add all labels that according to TARS match the text and are above threshold for predicted_tars_label in tars_sentence.get_labels(label_name): if ( predicted_tars_label.value == self.LABEL_MATCH and predicted_tars_label.score > label_threshold ): # do not add labels below confidence threshold sentence.add_label(label_name, label, predicted_tars_label.score) # only use label with highest confidence if enforcing single-label predictions if not multi_label: if len(sentence.get_labels(label_name)) > 0: # get all label scores and do an argmax to get the best label label_scores = torch.tensor( [label.score for label in sentence.get_labels(label_name)], dtype=torch.float, ) best_label = sentence.get_labels(label_name)[torch.argmax(label_scores)] # remove previously added labels and only add the best label sentence.remove_labels(label_name) sentence.add_label( typename=label_name, value=best_label.value, score=best_label.score, ) # clearing token embeddings to save memory store_embeddings(batch, storage_mode=embedding_storage_mode) if return_loss: return overall_loss, overall_count
__repr__
Returns a string representation of this Point. For each coordinate (x and y), the representation: - Uses no decimal points if the number is close to an integer, - Else it uses 2 decimal places after the decimal point. Examples: Point(10, 3.14) Point(3.01, 2.99)
""" A simple Line class. NOTE: This is NOT rosegraphics -- it is your OWN Line class. Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher, Mark Hays, Amanda Stouder, Aaron Wilkin, their colleagues, and Jacob Jarski. """ # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE. import math import m1t_test_Line as m1t ############################################################################### # IMPORTANT: # Your instructor will help you get started on this exercise. ############################################################################### # ----------------------------------------------------------------------------- # DONE: 2. Right-click on the src folder and # Mark Directory as ... Sources Root, # if you have not already done so. # # Then, with your instructor, READ THE INSTRUCTIONS in file # m0_INSTRUCTIONS.txt # asking questions as needed. Once you understand the instructions, # mark this _TODO_ as DONE. # ----------------------------------------------------------------------------- ############################################################################### # NOTE: For ALL of the methods that you implement, the method is allowed # to have additional side effects as needed by it and/or other methods. ############################################################################### def main(): """ Calls the TEST functions in this module, but ONLY if the method to be tested has at least a partial implementation. That is, a TEST function will not be called until you begin work on the code that it is testing. """ if m1t.is_implemented('__init__'): run_test_init() if m1t.is_implemented('clone'): run_test_clone() if m1t.is_implemented('reverse'): run_test_reverse() if m1t.is_implemented('slope'): run_test_slope() if m1t.is_implemented('length'): run_test_length() if m1t.is_implemented('get_number_of_clones'): run_test_get_number_of_clones() if m1t.is_implemented('line_plus'): run_test_line_plus() if m1t.is_implemented('line_minus'): run_test_line_minus() if m1t.is_implemented('midpoint'): run_test_midpoint() if m1t.is_implemented('is_parallel'): run_test_is_parallel() if m1t.is_implemented('reset'): run_test_reset() ############################################################################### # Students: # Do NOT touch the following Point class - it has no TO DO. # Do NOT copy code from the methods in this Point class. # # DO ** READ ** this Point class, # asking questions about any of it that you do not understand. # # DO ** CALL ** methods in this Point class as needed # in implementing and testing the methods of the ** Line ** class. # # IMPORTANT, IMPORTANT, IMPORTANT: # *** In your ** Line ** class methods, you should NEVER have code # *** that a ** Point ** class method could do for you. ############################################################################### # The Point class (and its methods) begins here. ############################################################################### class Point(object): """ Represents a point in 2-dimensional space. """ def __init__(self, x, y): """ Sets instance variables x and y to the given coordinates. """ self.x = x self.y = y # MASKED: __repr__ function (lines 94-119) def __eq__(self, p2): """ Defines == for Points: a == b is equivalent to a.__eq__(b). Treats two numbers as "equal" if they are within 6 decimal places of each other for both x and y coordinates. """ return (round(self.x, 6) == round(p2.x, 6) and round(self.y, 6) == round(p2.y, 6)) def clone(self): """ Returns a new Point at the same (x, y) as this Point. """ return Point(self.x, self.y) def distance_from(self, p2): """ Returns the distance this Point is from the given Point. """ dx_squared = (self.x - p2.x) ** 2 dy_squared = (self.y - p2.y) ** 2 return math.sqrt(dx_squared + dy_squared) def halfway_to(self, p2): """ Given another Point object p2, returns a new Point that is half-way between this Point and the given Point (p2). """ return Point((self.x + p2.x) / 2, (self.y + p2.y) / 2) def plus(self, p2): """ Returns a Point whose coordinates are those of this Point PLUS the given Point. For example: p1 = Point(500, 20) p2 = Point(100, 13) p3 = p1.plus(p2) print(p3) would print: Point(600, 33) """ return Point(self.x + p2.x, self.y + p2.y) def minus(self, p2): """ Returns a Point whose coordinates are those of this Point MINUS the given Point. For example: p1 = Point(500, 20) p2 = Point(100, 13) p3 = p1.minus(p2) print(p3) would print: Point(400, 7) """ return Point(self.x - p2.x, self.y - p2.y) ############################################################################### # The Line class (and its methods) begins here. ############################################################################### class Line(object): """ Represents a line segment in 2-dimensional space. """ def __init__(self, start, end): self.start = start.clone() self.originalstart = start.clone() self.end = end.clone() self.originalend = end.clone() self.timescloned = 0 """ What comes in: -- self -- a Point object named start -- a Point object named end where the two Points are to be the initial start and end points, respectively, of this Line. What goes out: Nothing (i.e., None). Side effects: MUTATEs this Line by setting two instance variables named: -- start -- end to CLONES of the two Point arguments, respectively. Other methods must maintain those instance variables as needed so that they always indicate the CURRENT start and end points of this Line. Also, initializes other instance variables as needed by other Line methods. Example: This __init__ method runs when one constructs a Line. So the 3rd of the following statements invokes the __init__ method of this Line class: p1 = Point(30, 17) p2 = Point(50, 80) line = Line(p1, p2) # Causes __init__ to run print(line.start) # Should print Point(30, 17) print(line.end) # Should print Point(50, 80) print(line.start == p1) # Should print True print(line.start is p1) # Should print False Type hints: :type start: Point :type end: Point """ # --------------------------------------------------------------------- # DONE: 3. # a. READ the above specification, including the Example. # ** ASK QUESTIONS AS NEEDED. ** # ** Be sure you understand it, ESPECIALLY the Example. # b. Implement and test this method. # The tests are already written (below). # They include the Example in the above doc-string. # --------------------------------------------------------------------- def __repr__(self): """ What comes in: -- self What goes out: Returns a string representation of this Line, in the form: Line[(x1, y1), (x2, y2)] Side effects: None. Note: print(BLAH) causes BLAH's __repr__ to be called. BLAH's __repr__ returns a string, which the print function then prints. Example: Since the print function calls __repr__ on the object to be printed: p1 = Point(30, 17) p2 = Point(50, 80) line = Line(p1, p2) # Causes __init__ to run # The following statement causes __repr__ to run, # hence should print: Line[(30, 17), (50, 80)] print(line) Type hints: :rtype: str """ # --------------------------------------------------------------------- # We have already implemented this __repr__ function for you. # Do NOT modify it. # --------------------------------------------------------------------- start = repr(self.start).replace('Point', '') end = repr(self.end).replace('Point', '') return 'Line[{}, {}]'.format(start, end) def __eq__(self, line2): """ What comes in: -- self -- a Line object What goes out: Returns True if: this Line's start point is equal to line2's start point AND this Line's end point is equal to line2's end point. Returns False otherwise. Side effects: None. Note: a == b is equivalent to a.__eq__(b). Examples: p1 = Point(30, 17) p2 = Point(50, 80) line1 = Line(p1, p2) line2 = Line(p1, p2) line3 = Line(p2, p1) print(line1 == line1) # Should print: True print(line1 == line2) # Should print: True print(line1 == line3) # Should print: False line1.start = Point(0, 0) print(line1 == line2) # Should now print: False Type hints: :type line2: Line :rtype: bool """ # --------------------------------------------------------------------- # We have already implemented this __eq__ function for you. # Do NOT modify it. # --------------------------------------------------------------------- return (self.start == line2.start) and (self.end == line2.end) def clone(self): self.timescloned = self.timescloned + 1 clone = Line(self.start, self.end) return clone """ What comes in: -- self What goes out: Returns a new Line whose START is a clone of this Line's START and whose END is a clone of this Line's END. Side effects: None. Example: p1 = Point(30, 17) p2 = Point(50, 80) line1 = Line(p1, p2) line2 = line1.clone() print(line1) # Should print: Line[(30, 17), (50, 80)] print(line2) # Should print: Line[(30, 17), (50, 80)] print(line1 == line2) # Should print: True print(line1 is line2) # Should print: False print(line1.start is line2.start) # Should print: False print(line1.end is line2.end) # Should print: False line1.start = Point(11, 12) print(line1) # Should print: Line[(11, 12), (50, 80)] print(line2) # Should print: Line[(30, 17), (50, 80)] print(line1 == line2) # Should now print: False Type hints: :rtype: Line """ # --------------------------------------------------------------------- # DONE: 4. # a. READ the above specification, including the Example. # ** ASK QUESTIONS AS NEEDED. ** # ** Be sure you understand it, ESPECIALLY the Example. # b. Implement and test this method. # The tests are already written (below). # They include the Example in the above doc-string. # --------------------------------------------------------------------- def reverse(self): reversestart = self.end reverseend = self.start self.start = reversestart self.end = reverseend """ What comes in: -- self What goes out: Nothing (i.e., None). Side effects: MUTATES this Line so that its direction is reversed (that is, its start and end points are swapped). ** Must NOT mutate its start and end points -- just SWAP them. ** Examples: p1 = Point(30, 17) p2 = Point(50, 80) line1 = Line(p1, p2) line2 = line1.clone() print(line1) # Should print: Line[(30, 17), (50, 80)] line1.reverse() print(line1) # Should print: Line[(50, 80), (30, 17)] print(line1 == line2) # Should print: False line1.reverse() print(line1 == line2) # Should now print: True """ # --------------------------------------------------------------------- # DONE: 5. # a. READ the above specification, including the Example. # ** ASK QUESTIONS AS NEEDED. ** # ** Be sure you understand it, ESPECIALLY the Example. # b. Implement and test this method. # The tests are already written (below). # They include the Example in the above doc-string. # --------------------------------------------------------------------- def slope(self): slopex = (self.end.x-self.start.x) slopey = (self.end.y-self.start.y) if slopex == 0: return math.inf else: return slopey/slopex """ What comes in: -- self What goes out: Returns the slope of this Line, or math.inf if the line is vertical (i.e., has "infinite" slope). Side effects: None. Examples: p1 = Point(30, 3) p2 = Point(50, 8) line1 = Line(p1, p2) # Since the slope is (8 - 3) / (50 - 30) , which is 0.25: print(line1.slope()) # Should print [approximately]: 0.25 line2 = Line(Point(10, 10), Point(10, 5)) print(line2.slope()) # Should print: inf # math.inf is NOT the STRING 'inf', so: print(line2.slope() == 'inf') # Should print False Type hints: :rtype: float """ # --------------------------------------------------------------------- # DONE: 6. # a. READ the above specification, including the Example. # ** ASK QUESTIONS AS NEEDED. ** # ** Be sure you understand it, ESPECIALLY the Example. # b. Implement and test this method. # The tests are already written (below). # They include the Example in the above doc-string. # --------------------------------------------------------------------- def length(self): length = math.sqrt(((self.start.x- self.end.x) ** 2) + ((self.start.y - self.end.y) ** 2)) return length """ What comes in: -- self What goes out: Returns the length of this Line. Side effects: None. Example: p1 = Point(166, 10) p2 = Point(100, 10) line1 = Line(p1, p2) # Since the distance from p1 to p2 is 66: print(line1.length()) # Should print: 66.0 p3 = Point(0, 0) p4 = Point(3, 4) line2 = Line(p3, p4) print(line2.length()) # Should print about 5.0 Type hints: :rtype: float """ # --------------------------------------------------------------------- # DONE: 7. # a. READ the above specification, including the Example. # ** ASK QUESTIONS AS NEEDED. ** # ** Be sure you understand it, ESPECIALLY the Example. # b. Implement and test this method. # The tests are already written (below). # They include the Example in the above doc-string. # --------------------------------------------------------------------- def get_number_of_clones(self): return self.timescloned """ What comes in: -- self What goes out: -- Returns the number of times that this Line has been cloned (via the clone method). Side effects: None. Example: line1 = Line(Point(500, 20), Point(100, 8)) line2 = line1.clone() line3 = line1.clone() line4 = line3.clone() line5 = line1.clone() print(line1.get_number_of_clones()) print(line2.get_number_of_clones()) print(line3.get_number_of_clones()) print(line4.get_number_of_clones()) print(line5.get_number_of_clones()) would print: 3 [since there are three line1.clone() statements] 0 [since there are no line2.clone() statements] 1 [since there is one line3.clone() statement] 0 [since there are no line4.clone() statements] 0 [since there are no line5.clone() statements] Type hints: :rtype: int: """ # --------------------------------------------------------------------- # DONE: 8. # a. READ the above specification, including the Example. # ** ASK QUESTIONS AS NEEDED. ** # ** Be sure you understand it, ESPECIALLY the Example. # b. Implement and test this method. # The tests are already written (below). # They include the Example in the above doc-string. # --------------------------------------------------------------------- def line_plus(self, other_line): """ What comes in: -- self -- another Line object What goes out: -- Returns a Line whose: -- start is the sum of this Line's start (a Point) and the other_line's start (another Point). -- end is the sum of this Line's end (a Point) and the other_line's end (another Point). Side effects: None. Example: line1 = Line(Point(500, 20), Point(100, 8)) line2 = Line(Point(100, 13), Point(400, 8)) line3 = line1.line_plus(line2) print(line3) would print: Line[(600, 33), (500, 16)] Type hints: :type other_line: Line :rtype: Line: """ # --------------------------------------------------------------------- # DONE: 9. # a. READ the above specification, including the Example. # ** ASK QUESTIONS AS NEEDED. ** # ** Be sure you understand it, ESPECIALLY the Example. # b. Implement and test this method. # The tests are already written (below). # They include the Example in the above doc-string. # --------------------------------------------------------------------- start = Point(self.start.x + other_line.start.x, self.start.y + other_line.start.y) end = Point(self.end.x + other_line.end.x, self.end.y + other_line.end.y) line_plus = Line(start, end) return line_plus def line_minus(self, other_line): """ What comes in: -- self -- another Line object What goes out: -- Returns a Line whose: -- start is this Line's start (a Point) minus the other_line's start (another Point). -- end is this Line's end (a Point) minus the other_line's end (another Point). Side effects: None. Example: line1 = Line(Point(500, 20), Point(100, 8)) line2 = Line(Point(100, 13), Point(400, 8)) line3 = line1.line_minus(line2) print(line3) would print: Line[(400, 7), (-300, 0)] Type hints: :type other_line: Line :rtype: Line: """ # --------------------------------------------------------------------- # DONE: 10. # a. READ the above specification, including the Example. # ** ASK QUESTIONS AS NEEDED. ** # ** Be sure you understand it, ESPECIALLY the Example. # b. Implement and test this method. # The tests are already written (below). # They include the Example in the above doc-string. # --------------------------------------------------------------------- start = Point(self.start.x - other_line.start.x, self.start.y - other_line.start.y) end = Point(self.end.x - other_line.end.x, self.end.y - other_line.end.y) line_minus = Line(start, end) return line_minus def midpoint(self): """ What comes in: -- self What goes out: returns a Point at the midpoint of this Line. Side effects: None. Example: p1 = Point(3, 10) p2 = Point(9, 20) line1 = Line(p1, p2) print(line1.midpoint()) # Should print: Point(6, 15) Type hints: :rtype: Point """ # --------------------------------------------------------------------- # DONE: 11. # a. READ the above specification, including the Example. # ** ASK QUESTIONS AS NEEDED. ** # ** Be sure you understand it, ESPECIALLY the Example. # b. Implement and test this method. # The tests are already written (below). # They include the Example in the above doc-string. # --------------------------------------------------------------------- midpoint = Point((self.end.x + self.start.x)/2, (self.end.y + self.start.y)/2) return midpoint def is_parallel(self, line2): """ What comes in: -- self -- another Line object (line2) What goes out: Returns True if this Line is parallel to the given Line (line2). Returns False otherwise. *** SEE THE IMPORTANT NOTE BELOW, re ROUNDING numbers. Side effects: None. Examples: line1 = Line(Point(15, 30), Point(17, 50)) # slope is 10.0 line2 = Line(Point(10, 10), Point(15, 60)) # slope is 10.0 line3 = Line(Point(10, 10), Point(80, 80)) # slope is 7.0 line4 = Line(Point(10, 10), Point(10, 20)) # slope is inf print(line1.is_parallel(line2)) # Should print: True print(line2.is_parallel(line1)) # Should print: True print(line1.is_parallel(line3)) # Should print: False print(line1.is_parallel(line4)) # Should print: False print(line1.is_parallel(line1)) # Should print: True print(line4.is_parallel(line4)) # Should print: True Type hints: :type line2: Line :rtype: bool """ selfslopex = (self.end.x - self.start.x) line2slopex = (line2.end.x - line2.start.x) if line2slopex == 0: if line2slopex == selfslopex: return True else: return False if selfslopex == 0: return False selfslope =((self.end.y - self.start.y)/(self.end.x - self.start.x)) line2slope = ((line2.end.y - line2.start.y)/ (line2.end.x - line2.start.x)) if round(line2slope, 10) == round(selfslope, 10): return True else: return False # --------------------------------------------------------------------- # DONE: 12. # a. READ the above specification, including the Example. # ** ASK QUESTIONS AS NEEDED. ** # ** Be sure you understand it, ESPECIALLY the Example. # b. Implement and test this method. # The tests are already written (below). # They include the Example in the above doc-string. # --------------------------------------------------------------------- ####################################################################### # # IMPORTANT: When you test whether two FLOATING POINT numbers # are "equal", you must deal with the imprecision # of floating-point arithmetic. For example, in REAL arithmetic, # 1 / (24 * math.pi - 20 * math.pi) # and # 3 / (72 * math.pi - 60 * math.pi) # are equal. But in FLOATING point arithmetic, they are: # 0.07957747154594767 # and # 0.07957747154594765 # respectively (hence NOT equal). # Try it out if you don't believe me! # ####################################################################### # IMPORTANT BOTTOM-LINE: When you want to test whether two # FLOATING POINT numbers a and b are the same, as in this method, # DON'T use: a == b # INSTEAD use: round(a, 12) == round(b, 12) ######################################################################## # # The latter compares the numbers rounded to 12 decimal places. # In the context of this exercise, doing so is adequate to ignore # floating-point errors while distinguishing numbers that really # are different from each other. ####################################################################### def reset(self): self.start = self.originalstart self.end = self.originalend """ What comes in: -- self What goes out: Nothing (i.e., None). Side effects: MUTATES this Line so that its start and end points revert to what they were when this Line was constructed. Examples: p1 = Point(-3, -4) p2 = Point(3, 4) line1 = Line(p1, p2) line2 = Line(Point(0, 1), Point(10, 20)) ... [various actions, including some like these:] line1.start = Point(100, 300) line2.end = Point(99, 4) line1.reverse() # Should print: Line[(x1, y1), (x2, y2)] where (x1, y1) and # (x2, y2) are the CURRENT coordinates of line1's endpoints. print(line1) print(line2) # Similarly for line2 line1.reset() line2.reset() print(line1) # Should print: Line[(-3, -4), (3, 4)] print(line2) # Should print: Line[(0, 1), (10, 20)] """ # --------------------------------------------------------------------- # DONE: 13. # a. READ the above specification, including the Example. # ** ASK QUESTIONS AS NEEDED. ** # ** Be sure you understand it, ESPECIALLY the Example. # b. Implement and test this method. # The tests are already written (below). # They include the Example in the above doc-string. # --------------------------------------------------------------------- ############################################################################### # The TEST functions for the Line class begin here. # # We have already written the TEST functions. They all take the form: # -- m1t.run_test_BLAH() # This runs OUR tests. # -- One more test (or set of tests) that came directly from the Example # in the specification. ############################################################################### def run_test_init(): """ Tests the __init__ method of the Line class. """ m1t.run_test_init() # This runs OUR tests. # ------------------------------------------------------------------------- # One ADDITIONAL test (or set of tests). # ------------------------------------------------------------------------- p1 = Point(30, 17) p2 = Point(50, 80) line = Line(p1, p2) # Causes __init__ to run print(line.start) # Should print Point(30, 17) print(line.end) # Should print Point(50, 80) print(line.start == p1) # Should print True print(line.start is p1) # Should print False print('The above should print:') print(' Point(30, 17)') print(' Point(50, 80)') print(' True') print(' False') def run_test_clone(): """ Tests the clone method of the Line class. """ m1t.run_test_clone() # This runs OUR tests. # ------------------------------------------------------------------------- # One ADDITIONAL test (or set of tests). # ------------------------------------------------------------------------- p1 = Point(30, 17) p2 = Point(50, 80) line1 = Line(p1, p2) line2 = line1.clone() print(line1) # Should print: Line[(30, 17), (50, 80)] print(line2) # Should print: Line[(30, 17), (50, 80)] print(line1 == line2) # Should print: True print(line1 is line2) # Should print: False print(line1.start is line2.start) # Should print: False print(line1.end is line2.end) # Should print: False line1.start = Point(11, 12) print(line1) # Should print: Line[(11, 12), (50, 80)] print(line2) # Should print: Line[(30, 17), (50, 80)] print(line1 == line2) # Should now print: False print('The above should print:') print(' Line[(30, 17), (50, 80)]') print(' Line[(30, 17), (50, 80)]') print(' True') print(' False') print(' False') print(' False') print(' Line[(11, 12), (50, 80)]') print(' Line[(30, 17), (50, 80)') print(' False') def run_test_reverse(): """ Tests the reverse method of the Line class. """ m1t.run_test_reverse() # This runs OUR tests. # ------------------------------------------------------------------------- # One ADDITIONAL test (or set of tests). # ------------------------------------------------------------------------- p1 = Point(30, 17) p2 = Point(50, 80) line1 = Line(p1, p2) line2 = line1.clone() print(line1) # Should print: Line[(30, 17), (50, 80)] line1.reverse() print(line1) # Should print: Line[(50, 80), (30, 17)] print(line1 == line2) # Should print: False line1.reverse() print(line1 == line2) # Should now print: True print('The above should print:') print(' Line[(30, 17), (50, 80)]') print(' Line[(50, 80), (30, 17)') print(' False') print(' True') def run_test_slope(): """ Tests the slope method of the Line class. """ m1t.run_test_slope() # This runs OUR tests. # ------------------------------------------------------------------------- # One ADDITIONAL test (or set of tests). # ------------------------------------------------------------------------- p1 = Point(30, 3) p2 = Point(50, 8) line1 = Line(p1, p2) # Since the slope is (8 - 3) / (50 - 30) , which is 0.25: print(line1.slope()) # Should print [approximately]: 0.25 line2 = Line(Point(10, 10), Point(10, 5)) print(line2.slope()) # Should print: inf # math.inf is NOT the STRING 'inf', so: print(line2.slope() == 'inf') # Should print False print('The above should print:') print(' 0.25 (approximately)') print(' inf') print(' False') def run_test_length(): """ Tests the length method of the Line class. """ m1t.run_test_length() # This runs OUR tests. # ------------------------------------------------------------------------- # One ADDITIONAL test (or set of tests). # ------------------------------------------------------------------------- p1 = Point(166, 10) p2 = Point(100, 10) line1 = Line(p1, p2) # Since the distance from p1 to p2 is 66: print(line1.length()) # Should print: 66.0 p3 = Point(0, 0) p4 = Point(3, 4) line2 = Line(p3, p4) print(line2.length()) # Should print about 5.0 print('The above should print:') print(' 66.0') print(' 5.0 (approximately)') def run_test_get_number_of_clones(): """ Tests the get_number_of_clones method of the Line class. """ m1t.run_test_get_number_of_clones() # This runs OUR tests. # ------------------------------------------------------------------------- # One ADDITIONAL test (or set of tests). # ------------------------------------------------------------------------- line1 = Line(Point(500, 20), Point(100, 8)) line2 = line1.clone() line3 = line1.clone() line4 = line3.clone() line5 = line1.clone() print(line1.get_number_of_clones()) print(line2.get_number_of_clones()) print(line3.get_number_of_clones()) print(line4.get_number_of_clones()) print(line5.get_number_of_clones()) print('The above should print 3, then 0, then 1, then 0, then 0.') def run_test_line_plus(): """ Tests the line_plus method of the Line class. """ m1t.run_test_line_plus() # This runs OUR tests. # ------------------------------------------------------------------------- # One ADDITIONAL test (or set of tests). # ------------------------------------------------------------------------- line1 = Line(Point(500, 20), Point(100, 8)) line2 = Line(Point(100, 13), Point(400, 8)) line3 = line1.line_plus(line2) print(line3) print('The above should print: Line[(600, 33), (500, 16)]') def run_test_line_minus(): """ Tests the line_minus method of the Line class. """ m1t.run_test_line_minus() # This runs OUR tests. # ------------------------------------------------------------------------- # One ADDITIONAL test (or set of tests). # ------------------------------------------------------------------------- line1 = Line(Point(500, 20), Point(100, 8)) line2 = Line(Point(100, 13), Point(400, 8)) line3 = line1.line_minus(line2) print(line3) print('The above should print: Line[(400, 7), (-300, 0)]') def run_test_midpoint(): """ Tests the midpoint method of the Line class. """ m1t.run_test_midpoint() # This runs OUR tests. # ------------------------------------------------------------------------- # One ADDITIONAL test (or set of tests). # ------------------------------------------------------------------------- p1 = Point(3, 10) p2 = Point(9, 20) line1 = Line(p1, p2) print(line1.midpoint()) # Should print: Point(6, 15) print('The above should print: Point(6, 15)') def run_test_is_parallel(): """ Tests the is_parallel method of the Line class. """ m1t.run_test_is_parallel() # This runs OUR tests. # ------------------------------------------------------------------------- # One ADDITIONAL test (or set of tests). # ------------------------------------------------------------------------- line1 = Line(Point(15, 30), Point(17, 50)) # slope is 10.0 line2 = Line(Point(10, 10), Point(15, 60)) # slope is 10.0 line3 = Line(Point(10, 10), Point(80, 80)) # slope is 7.0 line4 = Line(Point(10, 10), Point(10, 20)) # slope is inf print(line1.is_parallel(line2)) # Should print: True print(line2.is_parallel(line1)) # Should print: True print(line1.is_parallel(line3)) # Should print: False print(line1.is_parallel(line4)) # Should print: False print(line1.is_parallel(line1)) # Should print: True print(line4.is_parallel(line4)) # Should print: True print('The above should print:') print(' True, True, False, False, True, True') def run_test_reset(): """ Tests the reset method of the Line class. """ m1t.run_test_reset() # This runs OUR tests. # ------------------------------------------------------------------------- # One ADDITIONAL test (or set of tests). # ------------------------------------------------------------------------- p1 = Point(-3, -4) p2 = Point(3, 4) line1 = Line(p1, p2) line2 = Line(Point(0, 1), Point(10, 20)) line1.start = Point(100, 300) line2.end = Point(99, 4) line1.reverse() # Should print: Line[(x1, y1), (x2, y2)] where (x1, y1) and # (x2, y2) are the CURRENT coordinates of line1's endpoints. print(line1) print(line2) # Similarly for line2 line1.reset() line2.reset() print(line1) # Should print: Line[(-3, -4), (3, 4)] print(line2) # Should print: Line[(0, 1), (10, 20)] print('The above should print:') print(' Line[(3, 4), (100, 300)]') print(' Line[(0, 1), (99, 4)]') print(' Line[(-3, -4), (3, 4)]') print(' Line[(0, 1), (10, 20)]') # ----------------------------------------------------------------------------- # If this module is running at the top level (as opposed to being # imported by another module), then call the 'main' function. # It is necessary here to enable the automatic testing in m1t_test_Line.py. # ----------------------------------------------------------------------------- if __name__ == '__main__': main()
def __repr__(self): """ Returns a string representation of this Point. For each coordinate (x and y), the representation: - Uses no decimal points if the number is close to an integer, - Else it uses 2 decimal places after the decimal point. Examples: Point(10, 3.14) Point(3.01, 2.99) """ decimal_places = 2 # Use 2 places after the decimal point formats = [] numbers = [] for coordinate in (self.x, self.y): if abs(coordinate - round(coordinate)) < (10 ** -decimal_places): # Treat it as an integer: formats.append('{}') numbers.append(round(coordinate)) else: # Treat it as a float to decimal_places decimal places: formats.append('{:.' + str(decimal_places) + 'f}') numbers.append(round(coordinate, decimal_places)) format_string = 'Point(' + formats[0] + ', ' + formats[1] + ')' return format_string.format(numbers[0], numbers[1])
94
119
""" A simple Line class. NOTE: This is NOT rosegraphics -- it is your OWN Line class. Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher, Mark Hays, Amanda Stouder, Aaron Wilkin, their colleagues, and Jacob Jarski. """ # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE. import math import m1t_test_Line as m1t ############################################################################### # IMPORTANT: # Your instructor will help you get started on this exercise. ############################################################################### # ----------------------------------------------------------------------------- # DONE: 2. Right-click on the src folder and # Mark Directory as ... Sources Root, # if you have not already done so. # # Then, with your instructor, READ THE INSTRUCTIONS in file # m0_INSTRUCTIONS.txt # asking questions as needed. Once you understand the instructions, # mark this _TODO_ as DONE. # ----------------------------------------------------------------------------- ############################################################################### # NOTE: For ALL of the methods that you implement, the method is allowed # to have additional side effects as needed by it and/or other methods. ############################################################################### def main(): """ Calls the TEST functions in this module, but ONLY if the method to be tested has at least a partial implementation. That is, a TEST function will not be called until you begin work on the code that it is testing. """ if m1t.is_implemented('__init__'): run_test_init() if m1t.is_implemented('clone'): run_test_clone() if m1t.is_implemented('reverse'): run_test_reverse() if m1t.is_implemented('slope'): run_test_slope() if m1t.is_implemented('length'): run_test_length() if m1t.is_implemented('get_number_of_clones'): run_test_get_number_of_clones() if m1t.is_implemented('line_plus'): run_test_line_plus() if m1t.is_implemented('line_minus'): run_test_line_minus() if m1t.is_implemented('midpoint'): run_test_midpoint() if m1t.is_implemented('is_parallel'): run_test_is_parallel() if m1t.is_implemented('reset'): run_test_reset() ############################################################################### # Students: # Do NOT touch the following Point class - it has no TO DO. # Do NOT copy code from the methods in this Point class. # # DO ** READ ** this Point class, # asking questions about any of it that you do not understand. # # DO ** CALL ** methods in this Point class as needed # in implementing and testing the methods of the ** Line ** class. # # IMPORTANT, IMPORTANT, IMPORTANT: # *** In your ** Line ** class methods, you should NEVER have code # *** that a ** Point ** class method could do for you. ############################################################################### # The Point class (and its methods) begins here. ############################################################################### class Point(object): """ Represents a point in 2-dimensional space. """ def __init__(self, x, y): """ Sets instance variables x and y to the given coordinates. """ self.x = x self.y = y def __repr__(self): """ Returns a string representation of this Point. For each coordinate (x and y), the representation: - Uses no decimal points if the number is close to an integer, - Else it uses 2 decimal places after the decimal point. Examples: Point(10, 3.14) Point(3.01, 2.99) """ decimal_places = 2 # Use 2 places after the decimal point formats = [] numbers = [] for coordinate in (self.x, self.y): if abs(coordinate - round(coordinate)) < (10 ** -decimal_places): # Treat it as an integer: formats.append('{}') numbers.append(round(coordinate)) else: # Treat it as a float to decimal_places decimal places: formats.append('{:.' + str(decimal_places) + 'f}') numbers.append(round(coordinate, decimal_places)) format_string = 'Point(' + formats[0] + ', ' + formats[1] + ')' return format_string.format(numbers[0], numbers[1]) def __eq__(self, p2): """ Defines == for Points: a == b is equivalent to a.__eq__(b). Treats two numbers as "equal" if they are within 6 decimal places of each other for both x and y coordinates. """ return (round(self.x, 6) == round(p2.x, 6) and round(self.y, 6) == round(p2.y, 6)) def clone(self): """ Returns a new Point at the same (x, y) as this Point. """ return Point(self.x, self.y) def distance_from(self, p2): """ Returns the distance this Point is from the given Point. """ dx_squared = (self.x - p2.x) ** 2 dy_squared = (self.y - p2.y) ** 2 return math.sqrt(dx_squared + dy_squared) def halfway_to(self, p2): """ Given another Point object p2, returns a new Point that is half-way between this Point and the given Point (p2). """ return Point((self.x + p2.x) / 2, (self.y + p2.y) / 2) def plus(self, p2): """ Returns a Point whose coordinates are those of this Point PLUS the given Point. For example: p1 = Point(500, 20) p2 = Point(100, 13) p3 = p1.plus(p2) print(p3) would print: Point(600, 33) """ return Point(self.x + p2.x, self.y + p2.y) def minus(self, p2): """ Returns a Point whose coordinates are those of this Point MINUS the given Point. For example: p1 = Point(500, 20) p2 = Point(100, 13) p3 = p1.minus(p2) print(p3) would print: Point(400, 7) """ return Point(self.x - p2.x, self.y - p2.y) ############################################################################### # The Line class (and its methods) begins here. ############################################################################### class Line(object): """ Represents a line segment in 2-dimensional space. """ def __init__(self, start, end): self.start = start.clone() self.originalstart = start.clone() self.end = end.clone() self.originalend = end.clone() self.timescloned = 0 """ What comes in: -- self -- a Point object named start -- a Point object named end where the two Points are to be the initial start and end points, respectively, of this Line. What goes out: Nothing (i.e., None). Side effects: MUTATEs this Line by setting two instance variables named: -- start -- end to CLONES of the two Point arguments, respectively. Other methods must maintain those instance variables as needed so that they always indicate the CURRENT start and end points of this Line. Also, initializes other instance variables as needed by other Line methods. Example: This __init__ method runs when one constructs a Line. So the 3rd of the following statements invokes the __init__ method of this Line class: p1 = Point(30, 17) p2 = Point(50, 80) line = Line(p1, p2) # Causes __init__ to run print(line.start) # Should print Point(30, 17) print(line.end) # Should print Point(50, 80) print(line.start == p1) # Should print True print(line.start is p1) # Should print False Type hints: :type start: Point :type end: Point """ # --------------------------------------------------------------------- # DONE: 3. # a. READ the above specification, including the Example. # ** ASK QUESTIONS AS NEEDED. ** # ** Be sure you understand it, ESPECIALLY the Example. # b. Implement and test this method. # The tests are already written (below). # They include the Example in the above doc-string. # --------------------------------------------------------------------- def __repr__(self): """ What comes in: -- self What goes out: Returns a string representation of this Line, in the form: Line[(x1, y1), (x2, y2)] Side effects: None. Note: print(BLAH) causes BLAH's __repr__ to be called. BLAH's __repr__ returns a string, which the print function then prints. Example: Since the print function calls __repr__ on the object to be printed: p1 = Point(30, 17) p2 = Point(50, 80) line = Line(p1, p2) # Causes __init__ to run # The following statement causes __repr__ to run, # hence should print: Line[(30, 17), (50, 80)] print(line) Type hints: :rtype: str """ # --------------------------------------------------------------------- # We have already implemented this __repr__ function for you. # Do NOT modify it. # --------------------------------------------------------------------- start = repr(self.start).replace('Point', '') end = repr(self.end).replace('Point', '') return 'Line[{}, {}]'.format(start, end) def __eq__(self, line2): """ What comes in: -- self -- a Line object What goes out: Returns True if: this Line's start point is equal to line2's start point AND this Line's end point is equal to line2's end point. Returns False otherwise. Side effects: None. Note: a == b is equivalent to a.__eq__(b). Examples: p1 = Point(30, 17) p2 = Point(50, 80) line1 = Line(p1, p2) line2 = Line(p1, p2) line3 = Line(p2, p1) print(line1 == line1) # Should print: True print(line1 == line2) # Should print: True print(line1 == line3) # Should print: False line1.start = Point(0, 0) print(line1 == line2) # Should now print: False Type hints: :type line2: Line :rtype: bool """ # --------------------------------------------------------------------- # We have already implemented this __eq__ function for you. # Do NOT modify it. # --------------------------------------------------------------------- return (self.start == line2.start) and (self.end == line2.end) def clone(self): self.timescloned = self.timescloned + 1 clone = Line(self.start, self.end) return clone """ What comes in: -- self What goes out: Returns a new Line whose START is a clone of this Line's START and whose END is a clone of this Line's END. Side effects: None. Example: p1 = Point(30, 17) p2 = Point(50, 80) line1 = Line(p1, p2) line2 = line1.clone() print(line1) # Should print: Line[(30, 17), (50, 80)] print(line2) # Should print: Line[(30, 17), (50, 80)] print(line1 == line2) # Should print: True print(line1 is line2) # Should print: False print(line1.start is line2.start) # Should print: False print(line1.end is line2.end) # Should print: False line1.start = Point(11, 12) print(line1) # Should print: Line[(11, 12), (50, 80)] print(line2) # Should print: Line[(30, 17), (50, 80)] print(line1 == line2) # Should now print: False Type hints: :rtype: Line """ # --------------------------------------------------------------------- # DONE: 4. # a. READ the above specification, including the Example. # ** ASK QUESTIONS AS NEEDED. ** # ** Be sure you understand it, ESPECIALLY the Example. # b. Implement and test this method. # The tests are already written (below). # They include the Example in the above doc-string. # --------------------------------------------------------------------- def reverse(self): reversestart = self.end reverseend = self.start self.start = reversestart self.end = reverseend """ What comes in: -- self What goes out: Nothing (i.e., None). Side effects: MUTATES this Line so that its direction is reversed (that is, its start and end points are swapped). ** Must NOT mutate its start and end points -- just SWAP them. ** Examples: p1 = Point(30, 17) p2 = Point(50, 80) line1 = Line(p1, p2) line2 = line1.clone() print(line1) # Should print: Line[(30, 17), (50, 80)] line1.reverse() print(line1) # Should print: Line[(50, 80), (30, 17)] print(line1 == line2) # Should print: False line1.reverse() print(line1 == line2) # Should now print: True """ # --------------------------------------------------------------------- # DONE: 5. # a. READ the above specification, including the Example. # ** ASK QUESTIONS AS NEEDED. ** # ** Be sure you understand it, ESPECIALLY the Example. # b. Implement and test this method. # The tests are already written (below). # They include the Example in the above doc-string. # --------------------------------------------------------------------- def slope(self): slopex = (self.end.x-self.start.x) slopey = (self.end.y-self.start.y) if slopex == 0: return math.inf else: return slopey/slopex """ What comes in: -- self What goes out: Returns the slope of this Line, or math.inf if the line is vertical (i.e., has "infinite" slope). Side effects: None. Examples: p1 = Point(30, 3) p2 = Point(50, 8) line1 = Line(p1, p2) # Since the slope is (8 - 3) / (50 - 30) , which is 0.25: print(line1.slope()) # Should print [approximately]: 0.25 line2 = Line(Point(10, 10), Point(10, 5)) print(line2.slope()) # Should print: inf # math.inf is NOT the STRING 'inf', so: print(line2.slope() == 'inf') # Should print False Type hints: :rtype: float """ # --------------------------------------------------------------------- # DONE: 6. # a. READ the above specification, including the Example. # ** ASK QUESTIONS AS NEEDED. ** # ** Be sure you understand it, ESPECIALLY the Example. # b. Implement and test this method. # The tests are already written (below). # They include the Example in the above doc-string. # --------------------------------------------------------------------- def length(self): length = math.sqrt(((self.start.x- self.end.x) ** 2) + ((self.start.y - self.end.y) ** 2)) return length """ What comes in: -- self What goes out: Returns the length of this Line. Side effects: None. Example: p1 = Point(166, 10) p2 = Point(100, 10) line1 = Line(p1, p2) # Since the distance from p1 to p2 is 66: print(line1.length()) # Should print: 66.0 p3 = Point(0, 0) p4 = Point(3, 4) line2 = Line(p3, p4) print(line2.length()) # Should print about 5.0 Type hints: :rtype: float """ # --------------------------------------------------------------------- # DONE: 7. # a. READ the above specification, including the Example. # ** ASK QUESTIONS AS NEEDED. ** # ** Be sure you understand it, ESPECIALLY the Example. # b. Implement and test this method. # The tests are already written (below). # They include the Example in the above doc-string. # --------------------------------------------------------------------- def get_number_of_clones(self): return self.timescloned """ What comes in: -- self What goes out: -- Returns the number of times that this Line has been cloned (via the clone method). Side effects: None. Example: line1 = Line(Point(500, 20), Point(100, 8)) line2 = line1.clone() line3 = line1.clone() line4 = line3.clone() line5 = line1.clone() print(line1.get_number_of_clones()) print(line2.get_number_of_clones()) print(line3.get_number_of_clones()) print(line4.get_number_of_clones()) print(line5.get_number_of_clones()) would print: 3 [since there are three line1.clone() statements] 0 [since there are no line2.clone() statements] 1 [since there is one line3.clone() statement] 0 [since there are no line4.clone() statements] 0 [since there are no line5.clone() statements] Type hints: :rtype: int: """ # --------------------------------------------------------------------- # DONE: 8. # a. READ the above specification, including the Example. # ** ASK QUESTIONS AS NEEDED. ** # ** Be sure you understand it, ESPECIALLY the Example. # b. Implement and test this method. # The tests are already written (below). # They include the Example in the above doc-string. # --------------------------------------------------------------------- def line_plus(self, other_line): """ What comes in: -- self -- another Line object What goes out: -- Returns a Line whose: -- start is the sum of this Line's start (a Point) and the other_line's start (another Point). -- end is the sum of this Line's end (a Point) and the other_line's end (another Point). Side effects: None. Example: line1 = Line(Point(500, 20), Point(100, 8)) line2 = Line(Point(100, 13), Point(400, 8)) line3 = line1.line_plus(line2) print(line3) would print: Line[(600, 33), (500, 16)] Type hints: :type other_line: Line :rtype: Line: """ # --------------------------------------------------------------------- # DONE: 9. # a. READ the above specification, including the Example. # ** ASK QUESTIONS AS NEEDED. ** # ** Be sure you understand it, ESPECIALLY the Example. # b. Implement and test this method. # The tests are already written (below). # They include the Example in the above doc-string. # --------------------------------------------------------------------- start = Point(self.start.x + other_line.start.x, self.start.y + other_line.start.y) end = Point(self.end.x + other_line.end.x, self.end.y + other_line.end.y) line_plus = Line(start, end) return line_plus def line_minus(self, other_line): """ What comes in: -- self -- another Line object What goes out: -- Returns a Line whose: -- start is this Line's start (a Point) minus the other_line's start (another Point). -- end is this Line's end (a Point) minus the other_line's end (another Point). Side effects: None. Example: line1 = Line(Point(500, 20), Point(100, 8)) line2 = Line(Point(100, 13), Point(400, 8)) line3 = line1.line_minus(line2) print(line3) would print: Line[(400, 7), (-300, 0)] Type hints: :type other_line: Line :rtype: Line: """ # --------------------------------------------------------------------- # DONE: 10. # a. READ the above specification, including the Example. # ** ASK QUESTIONS AS NEEDED. ** # ** Be sure you understand it, ESPECIALLY the Example. # b. Implement and test this method. # The tests are already written (below). # They include the Example in the above doc-string. # --------------------------------------------------------------------- start = Point(self.start.x - other_line.start.x, self.start.y - other_line.start.y) end = Point(self.end.x - other_line.end.x, self.end.y - other_line.end.y) line_minus = Line(start, end) return line_minus def midpoint(self): """ What comes in: -- self What goes out: returns a Point at the midpoint of this Line. Side effects: None. Example: p1 = Point(3, 10) p2 = Point(9, 20) line1 = Line(p1, p2) print(line1.midpoint()) # Should print: Point(6, 15) Type hints: :rtype: Point """ # --------------------------------------------------------------------- # DONE: 11. # a. READ the above specification, including the Example. # ** ASK QUESTIONS AS NEEDED. ** # ** Be sure you understand it, ESPECIALLY the Example. # b. Implement and test this method. # The tests are already written (below). # They include the Example in the above doc-string. # --------------------------------------------------------------------- midpoint = Point((self.end.x + self.start.x)/2, (self.end.y + self.start.y)/2) return midpoint def is_parallel(self, line2): """ What comes in: -- self -- another Line object (line2) What goes out: Returns True if this Line is parallel to the given Line (line2). Returns False otherwise. *** SEE THE IMPORTANT NOTE BELOW, re ROUNDING numbers. Side effects: None. Examples: line1 = Line(Point(15, 30), Point(17, 50)) # slope is 10.0 line2 = Line(Point(10, 10), Point(15, 60)) # slope is 10.0 line3 = Line(Point(10, 10), Point(80, 80)) # slope is 7.0 line4 = Line(Point(10, 10), Point(10, 20)) # slope is inf print(line1.is_parallel(line2)) # Should print: True print(line2.is_parallel(line1)) # Should print: True print(line1.is_parallel(line3)) # Should print: False print(line1.is_parallel(line4)) # Should print: False print(line1.is_parallel(line1)) # Should print: True print(line4.is_parallel(line4)) # Should print: True Type hints: :type line2: Line :rtype: bool """ selfslopex = (self.end.x - self.start.x) line2slopex = (line2.end.x - line2.start.x) if line2slopex == 0: if line2slopex == selfslopex: return True else: return False if selfslopex == 0: return False selfslope =((self.end.y - self.start.y)/(self.end.x - self.start.x)) line2slope = ((line2.end.y - line2.start.y)/ (line2.end.x - line2.start.x)) if round(line2slope, 10) == round(selfslope, 10): return True else: return False # --------------------------------------------------------------------- # DONE: 12. # a. READ the above specification, including the Example. # ** ASK QUESTIONS AS NEEDED. ** # ** Be sure you understand it, ESPECIALLY the Example. # b. Implement and test this method. # The tests are already written (below). # They include the Example in the above doc-string. # --------------------------------------------------------------------- ####################################################################### # # IMPORTANT: When you test whether two FLOATING POINT numbers # are "equal", you must deal with the imprecision # of floating-point arithmetic. For example, in REAL arithmetic, # 1 / (24 * math.pi - 20 * math.pi) # and # 3 / (72 * math.pi - 60 * math.pi) # are equal. But in FLOATING point arithmetic, they are: # 0.07957747154594767 # and # 0.07957747154594765 # respectively (hence NOT equal). # Try it out if you don't believe me! # ####################################################################### # IMPORTANT BOTTOM-LINE: When you want to test whether two # FLOATING POINT numbers a and b are the same, as in this method, # DON'T use: a == b # INSTEAD use: round(a, 12) == round(b, 12) ######################################################################## # # The latter compares the numbers rounded to 12 decimal places. # In the context of this exercise, doing so is adequate to ignore # floating-point errors while distinguishing numbers that really # are different from each other. ####################################################################### def reset(self): self.start = self.originalstart self.end = self.originalend """ What comes in: -- self What goes out: Nothing (i.e., None). Side effects: MUTATES this Line so that its start and end points revert to what they were when this Line was constructed. Examples: p1 = Point(-3, -4) p2 = Point(3, 4) line1 = Line(p1, p2) line2 = Line(Point(0, 1), Point(10, 20)) ... [various actions, including some like these:] line1.start = Point(100, 300) line2.end = Point(99, 4) line1.reverse() # Should print: Line[(x1, y1), (x2, y2)] where (x1, y1) and # (x2, y2) are the CURRENT coordinates of line1's endpoints. print(line1) print(line2) # Similarly for line2 line1.reset() line2.reset() print(line1) # Should print: Line[(-3, -4), (3, 4)] print(line2) # Should print: Line[(0, 1), (10, 20)] """ # --------------------------------------------------------------------- # DONE: 13. # a. READ the above specification, including the Example. # ** ASK QUESTIONS AS NEEDED. ** # ** Be sure you understand it, ESPECIALLY the Example. # b. Implement and test this method. # The tests are already written (below). # They include the Example in the above doc-string. # --------------------------------------------------------------------- ############################################################################### # The TEST functions for the Line class begin here. # # We have already written the TEST functions. They all take the form: # -- m1t.run_test_BLAH() # This runs OUR tests. # -- One more test (or set of tests) that came directly from the Example # in the specification. ############################################################################### def run_test_init(): """ Tests the __init__ method of the Line class. """ m1t.run_test_init() # This runs OUR tests. # ------------------------------------------------------------------------- # One ADDITIONAL test (or set of tests). # ------------------------------------------------------------------------- p1 = Point(30, 17) p2 = Point(50, 80) line = Line(p1, p2) # Causes __init__ to run print(line.start) # Should print Point(30, 17) print(line.end) # Should print Point(50, 80) print(line.start == p1) # Should print True print(line.start is p1) # Should print False print('The above should print:') print(' Point(30, 17)') print(' Point(50, 80)') print(' True') print(' False') def run_test_clone(): """ Tests the clone method of the Line class. """ m1t.run_test_clone() # This runs OUR tests. # ------------------------------------------------------------------------- # One ADDITIONAL test (or set of tests). # ------------------------------------------------------------------------- p1 = Point(30, 17) p2 = Point(50, 80) line1 = Line(p1, p2) line2 = line1.clone() print(line1) # Should print: Line[(30, 17), (50, 80)] print(line2) # Should print: Line[(30, 17), (50, 80)] print(line1 == line2) # Should print: True print(line1 is line2) # Should print: False print(line1.start is line2.start) # Should print: False print(line1.end is line2.end) # Should print: False line1.start = Point(11, 12) print(line1) # Should print: Line[(11, 12), (50, 80)] print(line2) # Should print: Line[(30, 17), (50, 80)] print(line1 == line2) # Should now print: False print('The above should print:') print(' Line[(30, 17), (50, 80)]') print(' Line[(30, 17), (50, 80)]') print(' True') print(' False') print(' False') print(' False') print(' Line[(11, 12), (50, 80)]') print(' Line[(30, 17), (50, 80)') print(' False') def run_test_reverse(): """ Tests the reverse method of the Line class. """ m1t.run_test_reverse() # This runs OUR tests. # ------------------------------------------------------------------------- # One ADDITIONAL test (or set of tests). # ------------------------------------------------------------------------- p1 = Point(30, 17) p2 = Point(50, 80) line1 = Line(p1, p2) line2 = line1.clone() print(line1) # Should print: Line[(30, 17), (50, 80)] line1.reverse() print(line1) # Should print: Line[(50, 80), (30, 17)] print(line1 == line2) # Should print: False line1.reverse() print(line1 == line2) # Should now print: True print('The above should print:') print(' Line[(30, 17), (50, 80)]') print(' Line[(50, 80), (30, 17)') print(' False') print(' True') def run_test_slope(): """ Tests the slope method of the Line class. """ m1t.run_test_slope() # This runs OUR tests. # ------------------------------------------------------------------------- # One ADDITIONAL test (or set of tests). # ------------------------------------------------------------------------- p1 = Point(30, 3) p2 = Point(50, 8) line1 = Line(p1, p2) # Since the slope is (8 - 3) / (50 - 30) , which is 0.25: print(line1.slope()) # Should print [approximately]: 0.25 line2 = Line(Point(10, 10), Point(10, 5)) print(line2.slope()) # Should print: inf # math.inf is NOT the STRING 'inf', so: print(line2.slope() == 'inf') # Should print False print('The above should print:') print(' 0.25 (approximately)') print(' inf') print(' False') def run_test_length(): """ Tests the length method of the Line class. """ m1t.run_test_length() # This runs OUR tests. # ------------------------------------------------------------------------- # One ADDITIONAL test (or set of tests). # ------------------------------------------------------------------------- p1 = Point(166, 10) p2 = Point(100, 10) line1 = Line(p1, p2) # Since the distance from p1 to p2 is 66: print(line1.length()) # Should print: 66.0 p3 = Point(0, 0) p4 = Point(3, 4) line2 = Line(p3, p4) print(line2.length()) # Should print about 5.0 print('The above should print:') print(' 66.0') print(' 5.0 (approximately)') def run_test_get_number_of_clones(): """ Tests the get_number_of_clones method of the Line class. """ m1t.run_test_get_number_of_clones() # This runs OUR tests. # ------------------------------------------------------------------------- # One ADDITIONAL test (or set of tests). # ------------------------------------------------------------------------- line1 = Line(Point(500, 20), Point(100, 8)) line2 = line1.clone() line3 = line1.clone() line4 = line3.clone() line5 = line1.clone() print(line1.get_number_of_clones()) print(line2.get_number_of_clones()) print(line3.get_number_of_clones()) print(line4.get_number_of_clones()) print(line5.get_number_of_clones()) print('The above should print 3, then 0, then 1, then 0, then 0.') def run_test_line_plus(): """ Tests the line_plus method of the Line class. """ m1t.run_test_line_plus() # This runs OUR tests. # ------------------------------------------------------------------------- # One ADDITIONAL test (or set of tests). # ------------------------------------------------------------------------- line1 = Line(Point(500, 20), Point(100, 8)) line2 = Line(Point(100, 13), Point(400, 8)) line3 = line1.line_plus(line2) print(line3) print('The above should print: Line[(600, 33), (500, 16)]') def run_test_line_minus(): """ Tests the line_minus method of the Line class. """ m1t.run_test_line_minus() # This runs OUR tests. # ------------------------------------------------------------------------- # One ADDITIONAL test (or set of tests). # ------------------------------------------------------------------------- line1 = Line(Point(500, 20), Point(100, 8)) line2 = Line(Point(100, 13), Point(400, 8)) line3 = line1.line_minus(line2) print(line3) print('The above should print: Line[(400, 7), (-300, 0)]') def run_test_midpoint(): """ Tests the midpoint method of the Line class. """ m1t.run_test_midpoint() # This runs OUR tests. # ------------------------------------------------------------------------- # One ADDITIONAL test (or set of tests). # ------------------------------------------------------------------------- p1 = Point(3, 10) p2 = Point(9, 20) line1 = Line(p1, p2) print(line1.midpoint()) # Should print: Point(6, 15) print('The above should print: Point(6, 15)') def run_test_is_parallel(): """ Tests the is_parallel method of the Line class. """ m1t.run_test_is_parallel() # This runs OUR tests. # ------------------------------------------------------------------------- # One ADDITIONAL test (or set of tests). # ------------------------------------------------------------------------- line1 = Line(Point(15, 30), Point(17, 50)) # slope is 10.0 line2 = Line(Point(10, 10), Point(15, 60)) # slope is 10.0 line3 = Line(Point(10, 10), Point(80, 80)) # slope is 7.0 line4 = Line(Point(10, 10), Point(10, 20)) # slope is inf print(line1.is_parallel(line2)) # Should print: True print(line2.is_parallel(line1)) # Should print: True print(line1.is_parallel(line3)) # Should print: False print(line1.is_parallel(line4)) # Should print: False print(line1.is_parallel(line1)) # Should print: True print(line4.is_parallel(line4)) # Should print: True print('The above should print:') print(' True, True, False, False, True, True') def run_test_reset(): """ Tests the reset method of the Line class. """ m1t.run_test_reset() # This runs OUR tests. # ------------------------------------------------------------------------- # One ADDITIONAL test (or set of tests). # ------------------------------------------------------------------------- p1 = Point(-3, -4) p2 = Point(3, 4) line1 = Line(p1, p2) line2 = Line(Point(0, 1), Point(10, 20)) line1.start = Point(100, 300) line2.end = Point(99, 4) line1.reverse() # Should print: Line[(x1, y1), (x2, y2)] where (x1, y1) and # (x2, y2) are the CURRENT coordinates of line1's endpoints. print(line1) print(line2) # Similarly for line2 line1.reset() line2.reset() print(line1) # Should print: Line[(-3, -4), (3, 4)] print(line2) # Should print: Line[(0, 1), (10, 20)] print('The above should print:') print(' Line[(3, 4), (100, 300)]') print(' Line[(0, 1), (99, 4)]') print(' Line[(-3, -4), (3, 4)]') print(' Line[(0, 1), (10, 20)]') # ----------------------------------------------------------------------------- # If this module is running at the top level (as opposed to being # imported by another module), then call the 'main' function. # It is necessary here to enable the automatic testing in m1t_test_Line.py. # ----------------------------------------------------------------------------- if __name__ == '__main__': main()
__init__
:param port: port number to start the redis server on. Specify none to automatically generate :type port: int|None :param extra_args: any extra arguments kwargs will be passed to redis server as --key val
import subprocess import socket import tempfile import redis import time import os import itertools import sys # Environment variable pointing to the redis executable REDIS_PATH_ENVVAR = 'REDIS_PATH' def get_random_port(): sock = socket.socket() sock.listen(0) _, port = sock.getsockname() sock.close() return port class DisposableRedis(object): # MASKED: __init__ function (lines 24-39) def __enter__(self): if self._port is None: self.port = get_random_port() else: self.port = self._port args = [self.path, '--port', str(self.port), '--dir', tempfile.gettempdir(), '--save', ''] + self.extra_args self.process = subprocess.Popen( args, #cwd=os.getcwd(), stdin=subprocess.PIPE, stdout=open(os.devnull, 'w') # stdout=sys.stdout, # env=os.environ.copy() ) while True: try: self.client().ping() break except redis.ConnectionError: self.process.poll() if self.process.returncode is not None: raise RuntimeError("Process has exited") time.sleep(0.1) return self.client() def __exit__(self, exc_type, exc_val, exc_tb): self.process.terminate() def client(self): """ :rtype: redis.StrictRedis """ return redis.StrictRedis(port=self.port, decode_responses=True)
def __init__(self, port=None, path='redis-server', **extra_args): """ :param port: port number to start the redis server on. Specify none to automatically generate :type port: int|None :param extra_args: any extra arguments kwargs will be passed to redis server as --key val """ self._port = port # this will hold the actual port the redis is listening on. It's equal to `_port` unless `_port` is None # in that case `port` is randomly generated self.port = None self.extra_args = list(itertools.chain( *(('--%s'%k, v) for k, v in extra_args.items()) )) self.path = os.getenv(REDIS_PATH_ENVVAR, path)
24
39
import subprocess import socket import tempfile import redis import time import os import itertools import sys # Environment variable pointing to the redis executable REDIS_PATH_ENVVAR = 'REDIS_PATH' def get_random_port(): sock = socket.socket() sock.listen(0) _, port = sock.getsockname() sock.close() return port class DisposableRedis(object): def __init__(self, port=None, path='redis-server', **extra_args): """ :param port: port number to start the redis server on. Specify none to automatically generate :type port: int|None :param extra_args: any extra arguments kwargs will be passed to redis server as --key val """ self._port = port # this will hold the actual port the redis is listening on. It's equal to `_port` unless `_port` is None # in that case `port` is randomly generated self.port = None self.extra_args = list(itertools.chain( *(('--%s'%k, v) for k, v in extra_args.items()) )) self.path = os.getenv(REDIS_PATH_ENVVAR, path) def __enter__(self): if self._port is None: self.port = get_random_port() else: self.port = self._port args = [self.path, '--port', str(self.port), '--dir', tempfile.gettempdir(), '--save', ''] + self.extra_args self.process = subprocess.Popen( args, #cwd=os.getcwd(), stdin=subprocess.PIPE, stdout=open(os.devnull, 'w') # stdout=sys.stdout, # env=os.environ.copy() ) while True: try: self.client().ping() break except redis.ConnectionError: self.process.poll() if self.process.returncode is not None: raise RuntimeError("Process has exited") time.sleep(0.1) return self.client() def __exit__(self, exc_type, exc_val, exc_tb): self.process.terminate() def client(self): """ :rtype: redis.StrictRedis """ return redis.StrictRedis(port=self.port, decode_responses=True)
_sample_reduce
Reduce function used on the sample and choice functions. Parameters ---------- reduce_iter : iterable Each element is a tuple coming generated by the _sample_map_partitions function. Returns a sequence of uniformly distributed samples;
import heapq import math import random as rnd from functools import partial from .core import Bag def sample(population, k): """Chooses k unique random elements from a bag. Returns a new bag containing elements from the population while leaving the original population unchanged. Parameters ---------- population: Bag Elements to sample. k: integer, optional Number of elements to sample. Examples -------- >>> import dask.bag as db # doctest: +SKIP ... from dask.bag import random ... ... b = db.from_sequence(range(5), npartitions=2) ... list(random.sample(b, 3).compute()) [1, 3, 5] """ return _sample(population=population, k=k, replace=False) def choices(population, k=1): """ Return a k sized list of elements chosen with replacement. Parameters ---------- population: Bag Elements to sample. k: integer, optional Number of elements to sample. Examples -------- >>> import dask.bag as db # doctest: +SKIP ... from dask.bag import random ... ... b = db.from_sequence(range(5), npartitions=2) ... list(random.choices(b, 3).compute()) [1, 1, 5] """ return _sample(population=population, k=k, replace=True) def _sample(population, k, replace=False): return population.reduction( partial(_sample_map_partitions, k=k, replace=replace), partial(_sample_reduce, k=k, replace=replace), out_type=Bag, ) def _sample_map_partitions(population, k, replace): """ Map function used on the sample and choices functions. Parameters ---------- population : list List of elements to sample. k : int, optional Number of elements to sample. Default is 1. Returns ------- sample: list List of sampled elements from the partition. lx: int Number of elements on the partition. k: int Number of elements to sample. """ lx = len(population) real_k = k if k <= lx else lx sample_func = rnd.choices if replace else rnd.sample # because otherwise it raises IndexError: sampled = [] if real_k == 0 else sample_func(population=population, k=real_k) return sampled, lx # MASKED: _sample_reduce function (lines 93-126) def _weighted_sampling_without_replacement(population, weights, k): """ Source: Weighted random sampling with a reservoir, Pavlos S. Efraimidis, Paul G. Spirakis """ elt = [(math.log(rnd.random()) / weights[i], i) for i in range(len(weights))] return [population[x[1]] for x in heapq.nlargest(k, elt)]
def _sample_reduce(reduce_iter, k, replace): """ Reduce function used on the sample and choice functions. Parameters ---------- reduce_iter : iterable Each element is a tuple coming generated by the _sample_map_partitions function. Returns a sequence of uniformly distributed samples; """ ns_ks = [] s = [] n = 0 # unfolding reduce outputs for i in reduce_iter: (s_i, n_i) = i s.extend(s_i) n += n_i k_i = len(s_i) ns_ks.append((n_i, k_i)) if k < 0 or (k > n and not replace): raise ValueError("Sample larger than population or is negative") # creating the probability array p = [] for n_i, k_i in ns_ks: if k_i > 0: p_i = n_i / (k_i * n) p += [p_i] * k_i sample_func = rnd.choices if replace else _weighted_sampling_without_replacement return sample_func(population=s, weights=p, k=k)
93
126
import heapq import math import random as rnd from functools import partial from .core import Bag def sample(population, k): """Chooses k unique random elements from a bag. Returns a new bag containing elements from the population while leaving the original population unchanged. Parameters ---------- population: Bag Elements to sample. k: integer, optional Number of elements to sample. Examples -------- >>> import dask.bag as db # doctest: +SKIP ... from dask.bag import random ... ... b = db.from_sequence(range(5), npartitions=2) ... list(random.sample(b, 3).compute()) [1, 3, 5] """ return _sample(population=population, k=k, replace=False) def choices(population, k=1): """ Return a k sized list of elements chosen with replacement. Parameters ---------- population: Bag Elements to sample. k: integer, optional Number of elements to sample. Examples -------- >>> import dask.bag as db # doctest: +SKIP ... from dask.bag import random ... ... b = db.from_sequence(range(5), npartitions=2) ... list(random.choices(b, 3).compute()) [1, 1, 5] """ return _sample(population=population, k=k, replace=True) def _sample(population, k, replace=False): return population.reduction( partial(_sample_map_partitions, k=k, replace=replace), partial(_sample_reduce, k=k, replace=replace), out_type=Bag, ) def _sample_map_partitions(population, k, replace): """ Map function used on the sample and choices functions. Parameters ---------- population : list List of elements to sample. k : int, optional Number of elements to sample. Default is 1. Returns ------- sample: list List of sampled elements from the partition. lx: int Number of elements on the partition. k: int Number of elements to sample. """ lx = len(population) real_k = k if k <= lx else lx sample_func = rnd.choices if replace else rnd.sample # because otherwise it raises IndexError: sampled = [] if real_k == 0 else sample_func(population=population, k=real_k) return sampled, lx def _sample_reduce(reduce_iter, k, replace): """ Reduce function used on the sample and choice functions. Parameters ---------- reduce_iter : iterable Each element is a tuple coming generated by the _sample_map_partitions function. Returns a sequence of uniformly distributed samples; """ ns_ks = [] s = [] n = 0 # unfolding reduce outputs for i in reduce_iter: (s_i, n_i) = i s.extend(s_i) n += n_i k_i = len(s_i) ns_ks.append((n_i, k_i)) if k < 0 or (k > n and not replace): raise ValueError("Sample larger than population or is negative") # creating the probability array p = [] for n_i, k_i in ns_ks: if k_i > 0: p_i = n_i / (k_i * n) p += [p_i] * k_i sample_func = rnd.choices if replace else _weighted_sampling_without_replacement return sample_func(population=s, weights=p, k=k) def _weighted_sampling_without_replacement(population, weights, k): """ Source: Weighted random sampling with a reservoir, Pavlos S. Efraimidis, Paul G. Spirakis """ elt = [(math.log(rnd.random()) / weights[i], i) for i in range(len(weights))] return [population[x[1]] for x in heapq.nlargest(k, elt)]
get_observations
Generate a `~gammapy.data.Observations`. Parameters ---------- obs_id : list Observation IDs. skip_missing : bool, optional Skip missing observations, default: False Returns ------- observations : `~gammapy.data.Observations` Container holding a list of `~gammapy.data.DataStoreObservation`
# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import absolute_import, division, print_function, unicode_literals import logging import subprocess from ..utils.scripts import make_path from ..utils.testing import Checker from .obs_table import ObservationTable from .hdu_index_table import HDUIndexTable from .obs_table import ObservationTableChecker from .observations import DataStoreObservation, Observations, ObservationChecker __all__ = ["DataStore"] log = logging.getLogger(__name__) class DataStore(object): """IACT data store. The data selection and access happens using an observation and an HDU index file as described at :ref:`gadf:iact-storage`. See :gp-extra-notebook:`cta_1dc_introduction` for usage examples. Parameters ---------- hdu_table : `~gammapy.data.HDUIndexTable` HDU index table obs_table : `~gammapy.data.ObservationTable` Observation index table Examples -------- Here's an example how to create a `DataStore` to access H.E.S.S. data: >>> from gammapy.data import DataStore >>> data_store = DataStore.from_dir('$GAMMAPY_DATA/hess-dl3-dr1') >>> data_store.info() """ DEFAULT_HDU_TABLE = "hdu-index.fits.gz" """Default HDU table filename.""" DEFAULT_OBS_TABLE = "obs-index.fits.gz" """Default observation table filename.""" def __init__(self, hdu_table=None, obs_table=None): self.hdu_table = hdu_table self.obs_table = obs_table def __str__(self): return self.info(show=False) @classmethod def from_file(cls, filename, hdu_hdu="HDU_INDEX", hdu_obs="OBS_INDEX"): """Create from a FITS file. The FITS file must contain both index files. Parameters ---------- filename : str, Path FITS filename hdu_hdu : str or int FITS HDU name or number for the HDU index table hdu_obs : str or int FITS HDU name or number for the observation index table """ filename = make_path(filename) hdu_table = HDUIndexTable.read(filename, hdu=hdu_hdu, format="fits") obs_table = ObservationTable.read(filename, hdu=hdu_obs, format="fits") return cls(hdu_table=hdu_table, obs_table=obs_table) @classmethod def from_dir(cls, base_dir, hdu_table_filename=None, obs_table_filename=None): """Create from a directory. Parameters ---------- base_dir : str, Path Base directory of the data files. hdu_table_filename : str, Path Filename of the HDU index file. May be specified either relative to `base_dir` or as an absolute path. If None, the default filename will be looked for. obs_table_filename : str, Path Filename of the observation index file. May be specified either relative to `base_dir` or as an absolute path. If None, the default filename will be looked for. """ base_dir = make_path(base_dir) if hdu_table_filename: hdu_table_filename = make_path(hdu_table_filename) if (base_dir / hdu_table_filename).exists(): hdu_table_filename = base_dir / hdu_table_filename else: hdu_table_filename = base_dir / cls.DEFAULT_HDU_TABLE if obs_table_filename: obs_table_filename = make_path(obs_table_filename) if (base_dir / obs_table_filename).exists(): obs_table_filename = base_dir / obs_table_filename else: obs_table_filename = base_dir / cls.DEFAULT_OBS_TABLE if not hdu_table_filename.exists(): raise IOError("File not found: {}".format(hdu_table_filename)) log.debug("Reading {}".format(hdu_table_filename)) hdu_table = HDUIndexTable.read(str(hdu_table_filename), format="fits") hdu_table.meta["BASE_DIR"] = str(base_dir) if not obs_table_filename.exists(): raise IOError("File not found: {}".format(obs_table_filename)) log.debug("Reading {}".format(str(obs_table_filename))) obs_table = ObservationTable.read(str(obs_table_filename), format="fits") return cls(hdu_table=hdu_table, obs_table=obs_table) @classmethod def from_config(cls, config): """Create from a config dict.""" base_dir = config["base_dir"] hdu_table_filename = config.get("hduindx", cls.DEFAULT_HDU_TABLE) obs_table_filename = config.get("obsindx", cls.DEFAULT_OBS_TABLE) hdu_table_filename = cls._find_file(hdu_table_filename, base_dir) obs_table_filename = cls._find_file(obs_table_filename, base_dir) return cls.from_files( base_dir=base_dir, hdu_table_filename=hdu_table_filename, obs_table_filename=obs_table_filename, ) @staticmethod def _find_file(filename, dir): """Find a file at an absolute or relative location. - First tries ``Path(filename)`` - Second tries ``Path(dir) / filename`` - Raises ``OSError`` if both don't exist. """ path1 = make_path(filename) path2 = make_path(dir) / filename if path1.is_file(): filename = path1 elif path2.is_file(): filename = path2 else: raise OSError("File not found at {} or {}".format(path1, path2)) return filename def info(self, show=True): """Print some info.""" s = "Data store:\n" s += self.hdu_table.summary() s += "\n\n" s += self.obs_table.summary() if show: print(s) else: return s def obs(self, obs_id): """Access a given `~gammapy.data.DataStoreObservation`. Parameters ---------- obs_id : int Observation ID. Returns ------- observation : `~gammapy.data.DataStoreObservation` Observation container """ return DataStoreObservation(obs_id=int(obs_id), data_store=self) # MASKED: get_observations function (lines 186-213) def copy_obs(self, obs_id, outdir, hdu_class=None, verbose=False, overwrite=False): """Create a new `~gammapy.data.DataStore` containing a subset of observations. Parameters ---------- obs_id : array-like, `~gammapy.data.ObservationTable` List of observations to copy outdir : str, Path Directory for the new store hdu_class : list of str see :attr:`gammapy.data.HDUIndexTable.VALID_HDU_CLASS` verbose : bool Print copied files overwrite : bool Overwrite """ # TODO : Does rsync give any benefits here? outdir = make_path(outdir) if isinstance(obs_id, ObservationTable): obs_id = obs_id["OBS_ID"].data hdutable = self.hdu_table hdutable.add_index("OBS_ID") with hdutable.index_mode("discard_on_copy"): subhdutable = hdutable.loc[obs_id] if hdu_class is not None: subhdutable.add_index("HDU_CLASS") with subhdutable.index_mode("discard_on_copy"): subhdutable = subhdutable.loc[hdu_class] subobstable = self.obs_table.select_obs_id(obs_id) for idx in range(len(subhdutable)): # Changes to the file structure could be made here loc = subhdutable.location_info(idx) targetdir = outdir / loc.file_dir targetdir.mkdir(exist_ok=True, parents=True) cmd = ["cp", "-v"] if verbose else ["cp"] if not overwrite: cmd += ["-n"] cmd += [str(loc.path()), str(targetdir)] subprocess.call(cmd) filename = str(outdir / self.DEFAULT_HDU_TABLE) subhdutable.write(filename, format="fits", overwrite=overwrite) filename = str(outdir / self.DEFAULT_OBS_TABLE) subobstable.write(filename, format="fits", overwrite=overwrite) def check(self, checks="all"): """Check index tables and data files. This is a generator that yields a list of dicts. """ checker = DataStoreChecker(self) return checker.run(checks=checks) class DataStoreChecker(Checker): """Check data store. Checks data format and a bit about the content. """ CHECKS = { "obs_table": "check_obs_table", "hdu_table": "check_hdu_table", "observations": "check_observations", "consistency": "check_consistency", } def __init__(self, data_store): self.data_store = data_store def check_obs_table(self): """Checks for the observation index table.""" checker = ObservationTableChecker(self.data_store.obs_table) for record in checker.run(): yield record def check_hdu_table(self): """Checks for the HDU index table.""" t = self.data_store.hdu_table m = t.meta if m.get("HDUCLAS1", "") != "INDEX": yield { "level": "error", "hdu": "hdu-index", "msg": "Invalid header key. Must have HDUCLAS1=INDEX", } if m.get("HDUCLAS2", "") != "HDU": yield { "level": "error", "hdu": "hdu-index", "msg": "Invalid header key. Must have HDUCLAS2=HDU", } # Check that all HDU in the data files exist for idx in range(len(t)): location_info = t.location_info(idx) try: location_info.get_hdu() except KeyError: yield { "level": "error", "msg": "HDU not found: {!r}".format(location_info.__dict__), } def check_consistency(self): """Consistency checks between multiple HDUs""" # obs and HDU index should have the same OBS_ID obs_table_obs_id = set(self.data_store.obs_table["OBS_ID"]) hdu_table_obs_id = set(self.data_store.hdu_table["OBS_ID"]) if not obs_table_obs_id == hdu_table_obs_id: yield { "level": "error", "msg": "Inconsistent OBS_ID in obs and HDU index tables", } # TODO: obs table and events header should have the same times def check_observations(self): """Perform some sanity checks for all observations.""" for obs_id in self.data_store.obs_table["OBS_ID"]: obs = self.data_store.obs(obs_id) for record in ObservationChecker(obs).run(): yield record
def get_observations(self, obs_id, skip_missing=False): """Generate a `~gammapy.data.Observations`. Parameters ---------- obs_id : list Observation IDs. skip_missing : bool, optional Skip missing observations, default: False Returns ------- observations : `~gammapy.data.Observations` Container holding a list of `~gammapy.data.DataStoreObservation` """ obs_list = [] for _ in obs_id: try: obs = self.obs(_) except ValueError as err: if skip_missing: log.warning("Skipping missing obs_id: {!r}".format(_)) continue else: raise err else: obs_list.append(obs) return Observations(obs_list)
186
213
# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import absolute_import, division, print_function, unicode_literals import logging import subprocess from ..utils.scripts import make_path from ..utils.testing import Checker from .obs_table import ObservationTable from .hdu_index_table import HDUIndexTable from .obs_table import ObservationTableChecker from .observations import DataStoreObservation, Observations, ObservationChecker __all__ = ["DataStore"] log = logging.getLogger(__name__) class DataStore(object): """IACT data store. The data selection and access happens using an observation and an HDU index file as described at :ref:`gadf:iact-storage`. See :gp-extra-notebook:`cta_1dc_introduction` for usage examples. Parameters ---------- hdu_table : `~gammapy.data.HDUIndexTable` HDU index table obs_table : `~gammapy.data.ObservationTable` Observation index table Examples -------- Here's an example how to create a `DataStore` to access H.E.S.S. data: >>> from gammapy.data import DataStore >>> data_store = DataStore.from_dir('$GAMMAPY_DATA/hess-dl3-dr1') >>> data_store.info() """ DEFAULT_HDU_TABLE = "hdu-index.fits.gz" """Default HDU table filename.""" DEFAULT_OBS_TABLE = "obs-index.fits.gz" """Default observation table filename.""" def __init__(self, hdu_table=None, obs_table=None): self.hdu_table = hdu_table self.obs_table = obs_table def __str__(self): return self.info(show=False) @classmethod def from_file(cls, filename, hdu_hdu="HDU_INDEX", hdu_obs="OBS_INDEX"): """Create from a FITS file. The FITS file must contain both index files. Parameters ---------- filename : str, Path FITS filename hdu_hdu : str or int FITS HDU name or number for the HDU index table hdu_obs : str or int FITS HDU name or number for the observation index table """ filename = make_path(filename) hdu_table = HDUIndexTable.read(filename, hdu=hdu_hdu, format="fits") obs_table = ObservationTable.read(filename, hdu=hdu_obs, format="fits") return cls(hdu_table=hdu_table, obs_table=obs_table) @classmethod def from_dir(cls, base_dir, hdu_table_filename=None, obs_table_filename=None): """Create from a directory. Parameters ---------- base_dir : str, Path Base directory of the data files. hdu_table_filename : str, Path Filename of the HDU index file. May be specified either relative to `base_dir` or as an absolute path. If None, the default filename will be looked for. obs_table_filename : str, Path Filename of the observation index file. May be specified either relative to `base_dir` or as an absolute path. If None, the default filename will be looked for. """ base_dir = make_path(base_dir) if hdu_table_filename: hdu_table_filename = make_path(hdu_table_filename) if (base_dir / hdu_table_filename).exists(): hdu_table_filename = base_dir / hdu_table_filename else: hdu_table_filename = base_dir / cls.DEFAULT_HDU_TABLE if obs_table_filename: obs_table_filename = make_path(obs_table_filename) if (base_dir / obs_table_filename).exists(): obs_table_filename = base_dir / obs_table_filename else: obs_table_filename = base_dir / cls.DEFAULT_OBS_TABLE if not hdu_table_filename.exists(): raise IOError("File not found: {}".format(hdu_table_filename)) log.debug("Reading {}".format(hdu_table_filename)) hdu_table = HDUIndexTable.read(str(hdu_table_filename), format="fits") hdu_table.meta["BASE_DIR"] = str(base_dir) if not obs_table_filename.exists(): raise IOError("File not found: {}".format(obs_table_filename)) log.debug("Reading {}".format(str(obs_table_filename))) obs_table = ObservationTable.read(str(obs_table_filename), format="fits") return cls(hdu_table=hdu_table, obs_table=obs_table) @classmethod def from_config(cls, config): """Create from a config dict.""" base_dir = config["base_dir"] hdu_table_filename = config.get("hduindx", cls.DEFAULT_HDU_TABLE) obs_table_filename = config.get("obsindx", cls.DEFAULT_OBS_TABLE) hdu_table_filename = cls._find_file(hdu_table_filename, base_dir) obs_table_filename = cls._find_file(obs_table_filename, base_dir) return cls.from_files( base_dir=base_dir, hdu_table_filename=hdu_table_filename, obs_table_filename=obs_table_filename, ) @staticmethod def _find_file(filename, dir): """Find a file at an absolute or relative location. - First tries ``Path(filename)`` - Second tries ``Path(dir) / filename`` - Raises ``OSError`` if both don't exist. """ path1 = make_path(filename) path2 = make_path(dir) / filename if path1.is_file(): filename = path1 elif path2.is_file(): filename = path2 else: raise OSError("File not found at {} or {}".format(path1, path2)) return filename def info(self, show=True): """Print some info.""" s = "Data store:\n" s += self.hdu_table.summary() s += "\n\n" s += self.obs_table.summary() if show: print(s) else: return s def obs(self, obs_id): """Access a given `~gammapy.data.DataStoreObservation`. Parameters ---------- obs_id : int Observation ID. Returns ------- observation : `~gammapy.data.DataStoreObservation` Observation container """ return DataStoreObservation(obs_id=int(obs_id), data_store=self) def get_observations(self, obs_id, skip_missing=False): """Generate a `~gammapy.data.Observations`. Parameters ---------- obs_id : list Observation IDs. skip_missing : bool, optional Skip missing observations, default: False Returns ------- observations : `~gammapy.data.Observations` Container holding a list of `~gammapy.data.DataStoreObservation` """ obs_list = [] for _ in obs_id: try: obs = self.obs(_) except ValueError as err: if skip_missing: log.warning("Skipping missing obs_id: {!r}".format(_)) continue else: raise err else: obs_list.append(obs) return Observations(obs_list) def copy_obs(self, obs_id, outdir, hdu_class=None, verbose=False, overwrite=False): """Create a new `~gammapy.data.DataStore` containing a subset of observations. Parameters ---------- obs_id : array-like, `~gammapy.data.ObservationTable` List of observations to copy outdir : str, Path Directory for the new store hdu_class : list of str see :attr:`gammapy.data.HDUIndexTable.VALID_HDU_CLASS` verbose : bool Print copied files overwrite : bool Overwrite """ # TODO : Does rsync give any benefits here? outdir = make_path(outdir) if isinstance(obs_id, ObservationTable): obs_id = obs_id["OBS_ID"].data hdutable = self.hdu_table hdutable.add_index("OBS_ID") with hdutable.index_mode("discard_on_copy"): subhdutable = hdutable.loc[obs_id] if hdu_class is not None: subhdutable.add_index("HDU_CLASS") with subhdutable.index_mode("discard_on_copy"): subhdutable = subhdutable.loc[hdu_class] subobstable = self.obs_table.select_obs_id(obs_id) for idx in range(len(subhdutable)): # Changes to the file structure could be made here loc = subhdutable.location_info(idx) targetdir = outdir / loc.file_dir targetdir.mkdir(exist_ok=True, parents=True) cmd = ["cp", "-v"] if verbose else ["cp"] if not overwrite: cmd += ["-n"] cmd += [str(loc.path()), str(targetdir)] subprocess.call(cmd) filename = str(outdir / self.DEFAULT_HDU_TABLE) subhdutable.write(filename, format="fits", overwrite=overwrite) filename = str(outdir / self.DEFAULT_OBS_TABLE) subobstable.write(filename, format="fits", overwrite=overwrite) def check(self, checks="all"): """Check index tables and data files. This is a generator that yields a list of dicts. """ checker = DataStoreChecker(self) return checker.run(checks=checks) class DataStoreChecker(Checker): """Check data store. Checks data format and a bit about the content. """ CHECKS = { "obs_table": "check_obs_table", "hdu_table": "check_hdu_table", "observations": "check_observations", "consistency": "check_consistency", } def __init__(self, data_store): self.data_store = data_store def check_obs_table(self): """Checks for the observation index table.""" checker = ObservationTableChecker(self.data_store.obs_table) for record in checker.run(): yield record def check_hdu_table(self): """Checks for the HDU index table.""" t = self.data_store.hdu_table m = t.meta if m.get("HDUCLAS1", "") != "INDEX": yield { "level": "error", "hdu": "hdu-index", "msg": "Invalid header key. Must have HDUCLAS1=INDEX", } if m.get("HDUCLAS2", "") != "HDU": yield { "level": "error", "hdu": "hdu-index", "msg": "Invalid header key. Must have HDUCLAS2=HDU", } # Check that all HDU in the data files exist for idx in range(len(t)): location_info = t.location_info(idx) try: location_info.get_hdu() except KeyError: yield { "level": "error", "msg": "HDU not found: {!r}".format(location_info.__dict__), } def check_consistency(self): """Consistency checks between multiple HDUs""" # obs and HDU index should have the same OBS_ID obs_table_obs_id = set(self.data_store.obs_table["OBS_ID"]) hdu_table_obs_id = set(self.data_store.hdu_table["OBS_ID"]) if not obs_table_obs_id == hdu_table_obs_id: yield { "level": "error", "msg": "Inconsistent OBS_ID in obs and HDU index tables", } # TODO: obs table and events header should have the same times def check_observations(self): """Perform some sanity checks for all observations.""" for obs_id in self.data_store.obs_table["OBS_ID"]: obs = self.data_store.obs(obs_id) for record in ObservationChecker(obs).run(): yield record
copy_obs
Create a new `~gammapy.data.DataStore` containing a subset of observations. Parameters ---------- obs_id : array-like, `~gammapy.data.ObservationTable` List of observations to copy outdir : str, Path Directory for the new store hdu_class : list of str see :attr:`gammapy.data.HDUIndexTable.VALID_HDU_CLASS` verbose : bool Print copied files overwrite : bool Overwrite
# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import absolute_import, division, print_function, unicode_literals import logging import subprocess from ..utils.scripts import make_path from ..utils.testing import Checker from .obs_table import ObservationTable from .hdu_index_table import HDUIndexTable from .obs_table import ObservationTableChecker from .observations import DataStoreObservation, Observations, ObservationChecker __all__ = ["DataStore"] log = logging.getLogger(__name__) class DataStore(object): """IACT data store. The data selection and access happens using an observation and an HDU index file as described at :ref:`gadf:iact-storage`. See :gp-extra-notebook:`cta_1dc_introduction` for usage examples. Parameters ---------- hdu_table : `~gammapy.data.HDUIndexTable` HDU index table obs_table : `~gammapy.data.ObservationTable` Observation index table Examples -------- Here's an example how to create a `DataStore` to access H.E.S.S. data: >>> from gammapy.data import DataStore >>> data_store = DataStore.from_dir('$GAMMAPY_DATA/hess-dl3-dr1') >>> data_store.info() """ DEFAULT_HDU_TABLE = "hdu-index.fits.gz" """Default HDU table filename.""" DEFAULT_OBS_TABLE = "obs-index.fits.gz" """Default observation table filename.""" def __init__(self, hdu_table=None, obs_table=None): self.hdu_table = hdu_table self.obs_table = obs_table def __str__(self): return self.info(show=False) @classmethod def from_file(cls, filename, hdu_hdu="HDU_INDEX", hdu_obs="OBS_INDEX"): """Create from a FITS file. The FITS file must contain both index files. Parameters ---------- filename : str, Path FITS filename hdu_hdu : str or int FITS HDU name or number for the HDU index table hdu_obs : str or int FITS HDU name or number for the observation index table """ filename = make_path(filename) hdu_table = HDUIndexTable.read(filename, hdu=hdu_hdu, format="fits") obs_table = ObservationTable.read(filename, hdu=hdu_obs, format="fits") return cls(hdu_table=hdu_table, obs_table=obs_table) @classmethod def from_dir(cls, base_dir, hdu_table_filename=None, obs_table_filename=None): """Create from a directory. Parameters ---------- base_dir : str, Path Base directory of the data files. hdu_table_filename : str, Path Filename of the HDU index file. May be specified either relative to `base_dir` or as an absolute path. If None, the default filename will be looked for. obs_table_filename : str, Path Filename of the observation index file. May be specified either relative to `base_dir` or as an absolute path. If None, the default filename will be looked for. """ base_dir = make_path(base_dir) if hdu_table_filename: hdu_table_filename = make_path(hdu_table_filename) if (base_dir / hdu_table_filename).exists(): hdu_table_filename = base_dir / hdu_table_filename else: hdu_table_filename = base_dir / cls.DEFAULT_HDU_TABLE if obs_table_filename: obs_table_filename = make_path(obs_table_filename) if (base_dir / obs_table_filename).exists(): obs_table_filename = base_dir / obs_table_filename else: obs_table_filename = base_dir / cls.DEFAULT_OBS_TABLE if not hdu_table_filename.exists(): raise IOError("File not found: {}".format(hdu_table_filename)) log.debug("Reading {}".format(hdu_table_filename)) hdu_table = HDUIndexTable.read(str(hdu_table_filename), format="fits") hdu_table.meta["BASE_DIR"] = str(base_dir) if not obs_table_filename.exists(): raise IOError("File not found: {}".format(obs_table_filename)) log.debug("Reading {}".format(str(obs_table_filename))) obs_table = ObservationTable.read(str(obs_table_filename), format="fits") return cls(hdu_table=hdu_table, obs_table=obs_table) @classmethod def from_config(cls, config): """Create from a config dict.""" base_dir = config["base_dir"] hdu_table_filename = config.get("hduindx", cls.DEFAULT_HDU_TABLE) obs_table_filename = config.get("obsindx", cls.DEFAULT_OBS_TABLE) hdu_table_filename = cls._find_file(hdu_table_filename, base_dir) obs_table_filename = cls._find_file(obs_table_filename, base_dir) return cls.from_files( base_dir=base_dir, hdu_table_filename=hdu_table_filename, obs_table_filename=obs_table_filename, ) @staticmethod def _find_file(filename, dir): """Find a file at an absolute or relative location. - First tries ``Path(filename)`` - Second tries ``Path(dir) / filename`` - Raises ``OSError`` if both don't exist. """ path1 = make_path(filename) path2 = make_path(dir) / filename if path1.is_file(): filename = path1 elif path2.is_file(): filename = path2 else: raise OSError("File not found at {} or {}".format(path1, path2)) return filename def info(self, show=True): """Print some info.""" s = "Data store:\n" s += self.hdu_table.summary() s += "\n\n" s += self.obs_table.summary() if show: print(s) else: return s def obs(self, obs_id): """Access a given `~gammapy.data.DataStoreObservation`. Parameters ---------- obs_id : int Observation ID. Returns ------- observation : `~gammapy.data.DataStoreObservation` Observation container """ return DataStoreObservation(obs_id=int(obs_id), data_store=self) def get_observations(self, obs_id, skip_missing=False): """Generate a `~gammapy.data.Observations`. Parameters ---------- obs_id : list Observation IDs. skip_missing : bool, optional Skip missing observations, default: False Returns ------- observations : `~gammapy.data.Observations` Container holding a list of `~gammapy.data.DataStoreObservation` """ obs_list = [] for _ in obs_id: try: obs = self.obs(_) except ValueError as err: if skip_missing: log.warning("Skipping missing obs_id: {!r}".format(_)) continue else: raise err else: obs_list.append(obs) return Observations(obs_list) # MASKED: copy_obs function (lines 215-262) def check(self, checks="all"): """Check index tables and data files. This is a generator that yields a list of dicts. """ checker = DataStoreChecker(self) return checker.run(checks=checks) class DataStoreChecker(Checker): """Check data store. Checks data format and a bit about the content. """ CHECKS = { "obs_table": "check_obs_table", "hdu_table": "check_hdu_table", "observations": "check_observations", "consistency": "check_consistency", } def __init__(self, data_store): self.data_store = data_store def check_obs_table(self): """Checks for the observation index table.""" checker = ObservationTableChecker(self.data_store.obs_table) for record in checker.run(): yield record def check_hdu_table(self): """Checks for the HDU index table.""" t = self.data_store.hdu_table m = t.meta if m.get("HDUCLAS1", "") != "INDEX": yield { "level": "error", "hdu": "hdu-index", "msg": "Invalid header key. Must have HDUCLAS1=INDEX", } if m.get("HDUCLAS2", "") != "HDU": yield { "level": "error", "hdu": "hdu-index", "msg": "Invalid header key. Must have HDUCLAS2=HDU", } # Check that all HDU in the data files exist for idx in range(len(t)): location_info = t.location_info(idx) try: location_info.get_hdu() except KeyError: yield { "level": "error", "msg": "HDU not found: {!r}".format(location_info.__dict__), } def check_consistency(self): """Consistency checks between multiple HDUs""" # obs and HDU index should have the same OBS_ID obs_table_obs_id = set(self.data_store.obs_table["OBS_ID"]) hdu_table_obs_id = set(self.data_store.hdu_table["OBS_ID"]) if not obs_table_obs_id == hdu_table_obs_id: yield { "level": "error", "msg": "Inconsistent OBS_ID in obs and HDU index tables", } # TODO: obs table and events header should have the same times def check_observations(self): """Perform some sanity checks for all observations.""" for obs_id in self.data_store.obs_table["OBS_ID"]: obs = self.data_store.obs(obs_id) for record in ObservationChecker(obs).run(): yield record
def copy_obs(self, obs_id, outdir, hdu_class=None, verbose=False, overwrite=False): """Create a new `~gammapy.data.DataStore` containing a subset of observations. Parameters ---------- obs_id : array-like, `~gammapy.data.ObservationTable` List of observations to copy outdir : str, Path Directory for the new store hdu_class : list of str see :attr:`gammapy.data.HDUIndexTable.VALID_HDU_CLASS` verbose : bool Print copied files overwrite : bool Overwrite """ # TODO : Does rsync give any benefits here? outdir = make_path(outdir) if isinstance(obs_id, ObservationTable): obs_id = obs_id["OBS_ID"].data hdutable = self.hdu_table hdutable.add_index("OBS_ID") with hdutable.index_mode("discard_on_copy"): subhdutable = hdutable.loc[obs_id] if hdu_class is not None: subhdutable.add_index("HDU_CLASS") with subhdutable.index_mode("discard_on_copy"): subhdutable = subhdutable.loc[hdu_class] subobstable = self.obs_table.select_obs_id(obs_id) for idx in range(len(subhdutable)): # Changes to the file structure could be made here loc = subhdutable.location_info(idx) targetdir = outdir / loc.file_dir targetdir.mkdir(exist_ok=True, parents=True) cmd = ["cp", "-v"] if verbose else ["cp"] if not overwrite: cmd += ["-n"] cmd += [str(loc.path()), str(targetdir)] subprocess.call(cmd) filename = str(outdir / self.DEFAULT_HDU_TABLE) subhdutable.write(filename, format="fits", overwrite=overwrite) filename = str(outdir / self.DEFAULT_OBS_TABLE) subobstable.write(filename, format="fits", overwrite=overwrite)
215
262
# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import absolute_import, division, print_function, unicode_literals import logging import subprocess from ..utils.scripts import make_path from ..utils.testing import Checker from .obs_table import ObservationTable from .hdu_index_table import HDUIndexTable from .obs_table import ObservationTableChecker from .observations import DataStoreObservation, Observations, ObservationChecker __all__ = ["DataStore"] log = logging.getLogger(__name__) class DataStore(object): """IACT data store. The data selection and access happens using an observation and an HDU index file as described at :ref:`gadf:iact-storage`. See :gp-extra-notebook:`cta_1dc_introduction` for usage examples. Parameters ---------- hdu_table : `~gammapy.data.HDUIndexTable` HDU index table obs_table : `~gammapy.data.ObservationTable` Observation index table Examples -------- Here's an example how to create a `DataStore` to access H.E.S.S. data: >>> from gammapy.data import DataStore >>> data_store = DataStore.from_dir('$GAMMAPY_DATA/hess-dl3-dr1') >>> data_store.info() """ DEFAULT_HDU_TABLE = "hdu-index.fits.gz" """Default HDU table filename.""" DEFAULT_OBS_TABLE = "obs-index.fits.gz" """Default observation table filename.""" def __init__(self, hdu_table=None, obs_table=None): self.hdu_table = hdu_table self.obs_table = obs_table def __str__(self): return self.info(show=False) @classmethod def from_file(cls, filename, hdu_hdu="HDU_INDEX", hdu_obs="OBS_INDEX"): """Create from a FITS file. The FITS file must contain both index files. Parameters ---------- filename : str, Path FITS filename hdu_hdu : str or int FITS HDU name or number for the HDU index table hdu_obs : str or int FITS HDU name or number for the observation index table """ filename = make_path(filename) hdu_table = HDUIndexTable.read(filename, hdu=hdu_hdu, format="fits") obs_table = ObservationTable.read(filename, hdu=hdu_obs, format="fits") return cls(hdu_table=hdu_table, obs_table=obs_table) @classmethod def from_dir(cls, base_dir, hdu_table_filename=None, obs_table_filename=None): """Create from a directory. Parameters ---------- base_dir : str, Path Base directory of the data files. hdu_table_filename : str, Path Filename of the HDU index file. May be specified either relative to `base_dir` or as an absolute path. If None, the default filename will be looked for. obs_table_filename : str, Path Filename of the observation index file. May be specified either relative to `base_dir` or as an absolute path. If None, the default filename will be looked for. """ base_dir = make_path(base_dir) if hdu_table_filename: hdu_table_filename = make_path(hdu_table_filename) if (base_dir / hdu_table_filename).exists(): hdu_table_filename = base_dir / hdu_table_filename else: hdu_table_filename = base_dir / cls.DEFAULT_HDU_TABLE if obs_table_filename: obs_table_filename = make_path(obs_table_filename) if (base_dir / obs_table_filename).exists(): obs_table_filename = base_dir / obs_table_filename else: obs_table_filename = base_dir / cls.DEFAULT_OBS_TABLE if not hdu_table_filename.exists(): raise IOError("File not found: {}".format(hdu_table_filename)) log.debug("Reading {}".format(hdu_table_filename)) hdu_table = HDUIndexTable.read(str(hdu_table_filename), format="fits") hdu_table.meta["BASE_DIR"] = str(base_dir) if not obs_table_filename.exists(): raise IOError("File not found: {}".format(obs_table_filename)) log.debug("Reading {}".format(str(obs_table_filename))) obs_table = ObservationTable.read(str(obs_table_filename), format="fits") return cls(hdu_table=hdu_table, obs_table=obs_table) @classmethod def from_config(cls, config): """Create from a config dict.""" base_dir = config["base_dir"] hdu_table_filename = config.get("hduindx", cls.DEFAULT_HDU_TABLE) obs_table_filename = config.get("obsindx", cls.DEFAULT_OBS_TABLE) hdu_table_filename = cls._find_file(hdu_table_filename, base_dir) obs_table_filename = cls._find_file(obs_table_filename, base_dir) return cls.from_files( base_dir=base_dir, hdu_table_filename=hdu_table_filename, obs_table_filename=obs_table_filename, ) @staticmethod def _find_file(filename, dir): """Find a file at an absolute or relative location. - First tries ``Path(filename)`` - Second tries ``Path(dir) / filename`` - Raises ``OSError`` if both don't exist. """ path1 = make_path(filename) path2 = make_path(dir) / filename if path1.is_file(): filename = path1 elif path2.is_file(): filename = path2 else: raise OSError("File not found at {} or {}".format(path1, path2)) return filename def info(self, show=True): """Print some info.""" s = "Data store:\n" s += self.hdu_table.summary() s += "\n\n" s += self.obs_table.summary() if show: print(s) else: return s def obs(self, obs_id): """Access a given `~gammapy.data.DataStoreObservation`. Parameters ---------- obs_id : int Observation ID. Returns ------- observation : `~gammapy.data.DataStoreObservation` Observation container """ return DataStoreObservation(obs_id=int(obs_id), data_store=self) def get_observations(self, obs_id, skip_missing=False): """Generate a `~gammapy.data.Observations`. Parameters ---------- obs_id : list Observation IDs. skip_missing : bool, optional Skip missing observations, default: False Returns ------- observations : `~gammapy.data.Observations` Container holding a list of `~gammapy.data.DataStoreObservation` """ obs_list = [] for _ in obs_id: try: obs = self.obs(_) except ValueError as err: if skip_missing: log.warning("Skipping missing obs_id: {!r}".format(_)) continue else: raise err else: obs_list.append(obs) return Observations(obs_list) def copy_obs(self, obs_id, outdir, hdu_class=None, verbose=False, overwrite=False): """Create a new `~gammapy.data.DataStore` containing a subset of observations. Parameters ---------- obs_id : array-like, `~gammapy.data.ObservationTable` List of observations to copy outdir : str, Path Directory for the new store hdu_class : list of str see :attr:`gammapy.data.HDUIndexTable.VALID_HDU_CLASS` verbose : bool Print copied files overwrite : bool Overwrite """ # TODO : Does rsync give any benefits here? outdir = make_path(outdir) if isinstance(obs_id, ObservationTable): obs_id = obs_id["OBS_ID"].data hdutable = self.hdu_table hdutable.add_index("OBS_ID") with hdutable.index_mode("discard_on_copy"): subhdutable = hdutable.loc[obs_id] if hdu_class is not None: subhdutable.add_index("HDU_CLASS") with subhdutable.index_mode("discard_on_copy"): subhdutable = subhdutable.loc[hdu_class] subobstable = self.obs_table.select_obs_id(obs_id) for idx in range(len(subhdutable)): # Changes to the file structure could be made here loc = subhdutable.location_info(idx) targetdir = outdir / loc.file_dir targetdir.mkdir(exist_ok=True, parents=True) cmd = ["cp", "-v"] if verbose else ["cp"] if not overwrite: cmd += ["-n"] cmd += [str(loc.path()), str(targetdir)] subprocess.call(cmd) filename = str(outdir / self.DEFAULT_HDU_TABLE) subhdutable.write(filename, format="fits", overwrite=overwrite) filename = str(outdir / self.DEFAULT_OBS_TABLE) subobstable.write(filename, format="fits", overwrite=overwrite) def check(self, checks="all"): """Check index tables and data files. This is a generator that yields a list of dicts. """ checker = DataStoreChecker(self) return checker.run(checks=checks) class DataStoreChecker(Checker): """Check data store. Checks data format and a bit about the content. """ CHECKS = { "obs_table": "check_obs_table", "hdu_table": "check_hdu_table", "observations": "check_observations", "consistency": "check_consistency", } def __init__(self, data_store): self.data_store = data_store def check_obs_table(self): """Checks for the observation index table.""" checker = ObservationTableChecker(self.data_store.obs_table) for record in checker.run(): yield record def check_hdu_table(self): """Checks for the HDU index table.""" t = self.data_store.hdu_table m = t.meta if m.get("HDUCLAS1", "") != "INDEX": yield { "level": "error", "hdu": "hdu-index", "msg": "Invalid header key. Must have HDUCLAS1=INDEX", } if m.get("HDUCLAS2", "") != "HDU": yield { "level": "error", "hdu": "hdu-index", "msg": "Invalid header key. Must have HDUCLAS2=HDU", } # Check that all HDU in the data files exist for idx in range(len(t)): location_info = t.location_info(idx) try: location_info.get_hdu() except KeyError: yield { "level": "error", "msg": "HDU not found: {!r}".format(location_info.__dict__), } def check_consistency(self): """Consistency checks between multiple HDUs""" # obs and HDU index should have the same OBS_ID obs_table_obs_id = set(self.data_store.obs_table["OBS_ID"]) hdu_table_obs_id = set(self.data_store.hdu_table["OBS_ID"]) if not obs_table_obs_id == hdu_table_obs_id: yield { "level": "error", "msg": "Inconsistent OBS_ID in obs and HDU index tables", } # TODO: obs table and events header should have the same times def check_observations(self): """Perform some sanity checks for all observations.""" for obs_id in self.data_store.obs_table["OBS_ID"]: obs = self.data_store.obs(obs_id) for record in ObservationChecker(obs).run(): yield record
get
Get an existing CSIStorageCapacity resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource.
# coding=utf-8 # *** WARNING: this file was generated by pulumigen. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from ... import _utilities from ... import meta as _meta __all__ = ['CSIStorageCapacityArgs', 'CSIStorageCapacity'] @pulumi.input_type class CSIStorageCapacityArgs: def __init__(__self__, *, storage_class_name: pulumi.Input[str], api_version: Optional[pulumi.Input[str]] = None, capacity: Optional[pulumi.Input[str]] = None, kind: Optional[pulumi.Input[str]] = None, maximum_volume_size: Optional[pulumi.Input[str]] = None, metadata: Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']] = None, node_topology: Optional[pulumi.Input['_meta.v1.LabelSelectorArgs']] = None): """ The set of arguments for constructing a CSIStorageCapacity resource. :param pulumi.Input[str] storage_class_name: The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable. :param pulumi.Input[str] api_version: APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources :param pulumi.Input[str] capacity: Capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields. The semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable and treated like zero capacity. :param pulumi.Input[str] kind: Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds :param pulumi.Input[str] maximum_volume_size: MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields. This is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim. :param pulumi.Input['_meta.v1.ObjectMetaArgs'] metadata: Standard object's metadata. The name has no particular meaning. It must be be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-<uuid>, a generated name, or a reverse-domain name which ends with the unique CSI driver name. Objects are namespaced. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata :param pulumi.Input['_meta.v1.LabelSelectorArgs'] node_topology: NodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable. """ pulumi.set(__self__, "storage_class_name", storage_class_name) if api_version is not None: pulumi.set(__self__, "api_version", 'storage.k8s.io/v1beta1') if capacity is not None: pulumi.set(__self__, "capacity", capacity) if kind is not None: pulumi.set(__self__, "kind", 'CSIStorageCapacity') if maximum_volume_size is not None: pulumi.set(__self__, "maximum_volume_size", maximum_volume_size) if metadata is not None: pulumi.set(__self__, "metadata", metadata) if node_topology is not None: pulumi.set(__self__, "node_topology", node_topology) @property @pulumi.getter(name="storageClassName") def storage_class_name(self) -> pulumi.Input[str]: """ The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable. """ return pulumi.get(self, "storage_class_name") @storage_class_name.setter def storage_class_name(self, value: pulumi.Input[str]): pulumi.set(self, "storage_class_name", value) @property @pulumi.getter(name="apiVersion") def api_version(self) -> Optional[pulumi.Input[str]]: """ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources """ return pulumi.get(self, "api_version") @api_version.setter def api_version(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "api_version", value) @property @pulumi.getter def capacity(self) -> Optional[pulumi.Input[str]]: """ Capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields. The semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable and treated like zero capacity. """ return pulumi.get(self, "capacity") @capacity.setter def capacity(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "capacity", value) @property @pulumi.getter def kind(self) -> Optional[pulumi.Input[str]]: """ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds """ return pulumi.get(self, "kind") @kind.setter def kind(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "kind", value) @property @pulumi.getter(name="maximumVolumeSize") def maximum_volume_size(self) -> Optional[pulumi.Input[str]]: """ MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields. This is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim. """ return pulumi.get(self, "maximum_volume_size") @maximum_volume_size.setter def maximum_volume_size(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "maximum_volume_size", value) @property @pulumi.getter def metadata(self) -> Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']]: """ Standard object's metadata. The name has no particular meaning. It must be be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-<uuid>, a generated name, or a reverse-domain name which ends with the unique CSI driver name. Objects are namespaced. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata """ return pulumi.get(self, "metadata") @metadata.setter def metadata(self, value: Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']]): pulumi.set(self, "metadata", value) @property @pulumi.getter(name="nodeTopology") def node_topology(self) -> Optional[pulumi.Input['_meta.v1.LabelSelectorArgs']]: """ NodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable. """ return pulumi.get(self, "node_topology") @node_topology.setter def node_topology(self, value: Optional[pulumi.Input['_meta.v1.LabelSelectorArgs']]): pulumi.set(self, "node_topology", value) class CSIStorageCapacity(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, api_version: Optional[pulumi.Input[str]] = None, capacity: Optional[pulumi.Input[str]] = None, kind: Optional[pulumi.Input[str]] = None, maximum_volume_size: Optional[pulumi.Input[str]] = None, metadata: Optional[pulumi.Input[pulumi.InputType['_meta.v1.ObjectMetaArgs']]] = None, node_topology: Optional[pulumi.Input[pulumi.InputType['_meta.v1.LabelSelectorArgs']]] = None, storage_class_name: Optional[pulumi.Input[str]] = None, __props__=None): """ CSIStorageCapacity stores the result of one CSI GetCapacity call. For a given StorageClass, this describes the available capacity in a particular topology segment. This can be used when considering where to instantiate new PersistentVolumes. For example this can express things like: - StorageClass "standard" has "1234 GiB" available in "topology.kubernetes.io/zone=us-east1" - StorageClass "localssd" has "10 GiB" available in "kubernetes.io/hostname=knode-abc123" The following three cases all imply that no capacity is available for a certain combination: - no object exists with suitable topology and storage class name - such an object exists, but the capacity is unset - such an object exists, but the capacity is zero The producer of these objects can decide which approach is more suitable. They are consumed by the kube-scheduler if the CSIStorageCapacity beta feature gate is enabled there and a CSI driver opts into capacity-aware scheduling with CSIDriver.StorageCapacity. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] api_version: APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources :param pulumi.Input[str] capacity: Capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields. The semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable and treated like zero capacity. :param pulumi.Input[str] kind: Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds :param pulumi.Input[str] maximum_volume_size: MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields. This is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim. :param pulumi.Input[pulumi.InputType['_meta.v1.ObjectMetaArgs']] metadata: Standard object's metadata. The name has no particular meaning. It must be be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-<uuid>, a generated name, or a reverse-domain name which ends with the unique CSI driver name. Objects are namespaced. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata :param pulumi.Input[pulumi.InputType['_meta.v1.LabelSelectorArgs']] node_topology: NodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable. :param pulumi.Input[str] storage_class_name: The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable. """ ... @overload def __init__(__self__, resource_name: str, args: CSIStorageCapacityArgs, opts: Optional[pulumi.ResourceOptions] = None): """ CSIStorageCapacity stores the result of one CSI GetCapacity call. For a given StorageClass, this describes the available capacity in a particular topology segment. This can be used when considering where to instantiate new PersistentVolumes. For example this can express things like: - StorageClass "standard" has "1234 GiB" available in "topology.kubernetes.io/zone=us-east1" - StorageClass "localssd" has "10 GiB" available in "kubernetes.io/hostname=knode-abc123" The following three cases all imply that no capacity is available for a certain combination: - no object exists with suitable topology and storage class name - such an object exists, but the capacity is unset - such an object exists, but the capacity is zero The producer of these objects can decide which approach is more suitable. They are consumed by the kube-scheduler if the CSIStorageCapacity beta feature gate is enabled there and a CSI driver opts into capacity-aware scheduling with CSIDriver.StorageCapacity. :param str resource_name: The name of the resource. :param CSIStorageCapacityArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(CSIStorageCapacityArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, api_version: Optional[pulumi.Input[str]] = None, capacity: Optional[pulumi.Input[str]] = None, kind: Optional[pulumi.Input[str]] = None, maximum_volume_size: Optional[pulumi.Input[str]] = None, metadata: Optional[pulumi.Input[pulumi.InputType['_meta.v1.ObjectMetaArgs']]] = None, node_topology: Optional[pulumi.Input[pulumi.InputType['_meta.v1.LabelSelectorArgs']]] = None, storage_class_name: Optional[pulumi.Input[str]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = CSIStorageCapacityArgs.__new__(CSIStorageCapacityArgs) __props__.__dict__["api_version"] = 'storage.k8s.io/v1beta1' __props__.__dict__["capacity"] = capacity __props__.__dict__["kind"] = 'CSIStorageCapacity' __props__.__dict__["maximum_volume_size"] = maximum_volume_size __props__.__dict__["metadata"] = metadata __props__.__dict__["node_topology"] = node_topology if storage_class_name is None and not opts.urn: raise TypeError("Missing required property 'storage_class_name'") __props__.__dict__["storage_class_name"] = storage_class_name alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="kubernetes:storage.k8s.io/v1alpha1:CSIStorageCapacity")]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(CSIStorageCapacity, __self__).__init__( 'kubernetes:storage.k8s.io/v1beta1:CSIStorageCapacity', resource_name, __props__, opts) # MASKED: get function (lines 259-282) @property @pulumi.getter(name="apiVersion") def api_version(self) -> pulumi.Output[Optional[str]]: """ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources """ return pulumi.get(self, "api_version") @property @pulumi.getter def capacity(self) -> pulumi.Output[Optional[str]]: """ Capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields. The semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable and treated like zero capacity. """ return pulumi.get(self, "capacity") @property @pulumi.getter def kind(self) -> pulumi.Output[Optional[str]]: """ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds """ return pulumi.get(self, "kind") @property @pulumi.getter(name="maximumVolumeSize") def maximum_volume_size(self) -> pulumi.Output[Optional[str]]: """ MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields. This is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim. """ return pulumi.get(self, "maximum_volume_size") @property @pulumi.getter def metadata(self) -> pulumi.Output[Optional['_meta.v1.outputs.ObjectMeta']]: """ Standard object's metadata. The name has no particular meaning. It must be be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-<uuid>, a generated name, or a reverse-domain name which ends with the unique CSI driver name. Objects are namespaced. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata """ return pulumi.get(self, "metadata") @property @pulumi.getter(name="nodeTopology") def node_topology(self) -> pulumi.Output[Optional['_meta.v1.outputs.LabelSelector']]: """ NodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable. """ return pulumi.get(self, "node_topology") @property @pulumi.getter(name="storageClassName") def storage_class_name(self) -> pulumi.Output[str]: """ The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable. """ return pulumi.get(self, "storage_class_name")
@staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'CSIStorageCapacity': """ Get an existing CSIStorageCapacity resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = CSIStorageCapacityArgs.__new__(CSIStorageCapacityArgs) __props__.__dict__["api_version"] = None __props__.__dict__["capacity"] = None __props__.__dict__["kind"] = None __props__.__dict__["maximum_volume_size"] = None __props__.__dict__["metadata"] = None __props__.__dict__["node_topology"] = None __props__.__dict__["storage_class_name"] = None return CSIStorageCapacity(resource_name, opts=opts, __props__=__props__)
259
282
# coding=utf-8 # *** WARNING: this file was generated by pulumigen. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from ... import _utilities from ... import meta as _meta __all__ = ['CSIStorageCapacityArgs', 'CSIStorageCapacity'] @pulumi.input_type class CSIStorageCapacityArgs: def __init__(__self__, *, storage_class_name: pulumi.Input[str], api_version: Optional[pulumi.Input[str]] = None, capacity: Optional[pulumi.Input[str]] = None, kind: Optional[pulumi.Input[str]] = None, maximum_volume_size: Optional[pulumi.Input[str]] = None, metadata: Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']] = None, node_topology: Optional[pulumi.Input['_meta.v1.LabelSelectorArgs']] = None): """ The set of arguments for constructing a CSIStorageCapacity resource. :param pulumi.Input[str] storage_class_name: The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable. :param pulumi.Input[str] api_version: APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources :param pulumi.Input[str] capacity: Capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields. The semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable and treated like zero capacity. :param pulumi.Input[str] kind: Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds :param pulumi.Input[str] maximum_volume_size: MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields. This is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim. :param pulumi.Input['_meta.v1.ObjectMetaArgs'] metadata: Standard object's metadata. The name has no particular meaning. It must be be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-<uuid>, a generated name, or a reverse-domain name which ends with the unique CSI driver name. Objects are namespaced. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata :param pulumi.Input['_meta.v1.LabelSelectorArgs'] node_topology: NodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable. """ pulumi.set(__self__, "storage_class_name", storage_class_name) if api_version is not None: pulumi.set(__self__, "api_version", 'storage.k8s.io/v1beta1') if capacity is not None: pulumi.set(__self__, "capacity", capacity) if kind is not None: pulumi.set(__self__, "kind", 'CSIStorageCapacity') if maximum_volume_size is not None: pulumi.set(__self__, "maximum_volume_size", maximum_volume_size) if metadata is not None: pulumi.set(__self__, "metadata", metadata) if node_topology is not None: pulumi.set(__self__, "node_topology", node_topology) @property @pulumi.getter(name="storageClassName") def storage_class_name(self) -> pulumi.Input[str]: """ The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable. """ return pulumi.get(self, "storage_class_name") @storage_class_name.setter def storage_class_name(self, value: pulumi.Input[str]): pulumi.set(self, "storage_class_name", value) @property @pulumi.getter(name="apiVersion") def api_version(self) -> Optional[pulumi.Input[str]]: """ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources """ return pulumi.get(self, "api_version") @api_version.setter def api_version(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "api_version", value) @property @pulumi.getter def capacity(self) -> Optional[pulumi.Input[str]]: """ Capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields. The semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable and treated like zero capacity. """ return pulumi.get(self, "capacity") @capacity.setter def capacity(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "capacity", value) @property @pulumi.getter def kind(self) -> Optional[pulumi.Input[str]]: """ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds """ return pulumi.get(self, "kind") @kind.setter def kind(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "kind", value) @property @pulumi.getter(name="maximumVolumeSize") def maximum_volume_size(self) -> Optional[pulumi.Input[str]]: """ MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields. This is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim. """ return pulumi.get(self, "maximum_volume_size") @maximum_volume_size.setter def maximum_volume_size(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "maximum_volume_size", value) @property @pulumi.getter def metadata(self) -> Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']]: """ Standard object's metadata. The name has no particular meaning. It must be be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-<uuid>, a generated name, or a reverse-domain name which ends with the unique CSI driver name. Objects are namespaced. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata """ return pulumi.get(self, "metadata") @metadata.setter def metadata(self, value: Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']]): pulumi.set(self, "metadata", value) @property @pulumi.getter(name="nodeTopology") def node_topology(self) -> Optional[pulumi.Input['_meta.v1.LabelSelectorArgs']]: """ NodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable. """ return pulumi.get(self, "node_topology") @node_topology.setter def node_topology(self, value: Optional[pulumi.Input['_meta.v1.LabelSelectorArgs']]): pulumi.set(self, "node_topology", value) class CSIStorageCapacity(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, api_version: Optional[pulumi.Input[str]] = None, capacity: Optional[pulumi.Input[str]] = None, kind: Optional[pulumi.Input[str]] = None, maximum_volume_size: Optional[pulumi.Input[str]] = None, metadata: Optional[pulumi.Input[pulumi.InputType['_meta.v1.ObjectMetaArgs']]] = None, node_topology: Optional[pulumi.Input[pulumi.InputType['_meta.v1.LabelSelectorArgs']]] = None, storage_class_name: Optional[pulumi.Input[str]] = None, __props__=None): """ CSIStorageCapacity stores the result of one CSI GetCapacity call. For a given StorageClass, this describes the available capacity in a particular topology segment. This can be used when considering where to instantiate new PersistentVolumes. For example this can express things like: - StorageClass "standard" has "1234 GiB" available in "topology.kubernetes.io/zone=us-east1" - StorageClass "localssd" has "10 GiB" available in "kubernetes.io/hostname=knode-abc123" The following three cases all imply that no capacity is available for a certain combination: - no object exists with suitable topology and storage class name - such an object exists, but the capacity is unset - such an object exists, but the capacity is zero The producer of these objects can decide which approach is more suitable. They are consumed by the kube-scheduler if the CSIStorageCapacity beta feature gate is enabled there and a CSI driver opts into capacity-aware scheduling with CSIDriver.StorageCapacity. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] api_version: APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources :param pulumi.Input[str] capacity: Capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields. The semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable and treated like zero capacity. :param pulumi.Input[str] kind: Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds :param pulumi.Input[str] maximum_volume_size: MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields. This is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim. :param pulumi.Input[pulumi.InputType['_meta.v1.ObjectMetaArgs']] metadata: Standard object's metadata. The name has no particular meaning. It must be be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-<uuid>, a generated name, or a reverse-domain name which ends with the unique CSI driver name. Objects are namespaced. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata :param pulumi.Input[pulumi.InputType['_meta.v1.LabelSelectorArgs']] node_topology: NodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable. :param pulumi.Input[str] storage_class_name: The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable. """ ... @overload def __init__(__self__, resource_name: str, args: CSIStorageCapacityArgs, opts: Optional[pulumi.ResourceOptions] = None): """ CSIStorageCapacity stores the result of one CSI GetCapacity call. For a given StorageClass, this describes the available capacity in a particular topology segment. This can be used when considering where to instantiate new PersistentVolumes. For example this can express things like: - StorageClass "standard" has "1234 GiB" available in "topology.kubernetes.io/zone=us-east1" - StorageClass "localssd" has "10 GiB" available in "kubernetes.io/hostname=knode-abc123" The following three cases all imply that no capacity is available for a certain combination: - no object exists with suitable topology and storage class name - such an object exists, but the capacity is unset - such an object exists, but the capacity is zero The producer of these objects can decide which approach is more suitable. They are consumed by the kube-scheduler if the CSIStorageCapacity beta feature gate is enabled there and a CSI driver opts into capacity-aware scheduling with CSIDriver.StorageCapacity. :param str resource_name: The name of the resource. :param CSIStorageCapacityArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(CSIStorageCapacityArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, api_version: Optional[pulumi.Input[str]] = None, capacity: Optional[pulumi.Input[str]] = None, kind: Optional[pulumi.Input[str]] = None, maximum_volume_size: Optional[pulumi.Input[str]] = None, metadata: Optional[pulumi.Input[pulumi.InputType['_meta.v1.ObjectMetaArgs']]] = None, node_topology: Optional[pulumi.Input[pulumi.InputType['_meta.v1.LabelSelectorArgs']]] = None, storage_class_name: Optional[pulumi.Input[str]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = CSIStorageCapacityArgs.__new__(CSIStorageCapacityArgs) __props__.__dict__["api_version"] = 'storage.k8s.io/v1beta1' __props__.__dict__["capacity"] = capacity __props__.__dict__["kind"] = 'CSIStorageCapacity' __props__.__dict__["maximum_volume_size"] = maximum_volume_size __props__.__dict__["metadata"] = metadata __props__.__dict__["node_topology"] = node_topology if storage_class_name is None and not opts.urn: raise TypeError("Missing required property 'storage_class_name'") __props__.__dict__["storage_class_name"] = storage_class_name alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="kubernetes:storage.k8s.io/v1alpha1:CSIStorageCapacity")]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(CSIStorageCapacity, __self__).__init__( 'kubernetes:storage.k8s.io/v1beta1:CSIStorageCapacity', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'CSIStorageCapacity': """ Get an existing CSIStorageCapacity resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = CSIStorageCapacityArgs.__new__(CSIStorageCapacityArgs) __props__.__dict__["api_version"] = None __props__.__dict__["capacity"] = None __props__.__dict__["kind"] = None __props__.__dict__["maximum_volume_size"] = None __props__.__dict__["metadata"] = None __props__.__dict__["node_topology"] = None __props__.__dict__["storage_class_name"] = None return CSIStorageCapacity(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter(name="apiVersion") def api_version(self) -> pulumi.Output[Optional[str]]: """ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources """ return pulumi.get(self, "api_version") @property @pulumi.getter def capacity(self) -> pulumi.Output[Optional[str]]: """ Capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields. The semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable and treated like zero capacity. """ return pulumi.get(self, "capacity") @property @pulumi.getter def kind(self) -> pulumi.Output[Optional[str]]: """ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds """ return pulumi.get(self, "kind") @property @pulumi.getter(name="maximumVolumeSize") def maximum_volume_size(self) -> pulumi.Output[Optional[str]]: """ MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields. This is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim. """ return pulumi.get(self, "maximum_volume_size") @property @pulumi.getter def metadata(self) -> pulumi.Output[Optional['_meta.v1.outputs.ObjectMeta']]: """ Standard object's metadata. The name has no particular meaning. It must be be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-<uuid>, a generated name, or a reverse-domain name which ends with the unique CSI driver name. Objects are namespaced. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata """ return pulumi.get(self, "metadata") @property @pulumi.getter(name="nodeTopology") def node_topology(self) -> pulumi.Output[Optional['_meta.v1.outputs.LabelSelector']]: """ NodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable. """ return pulumi.get(self, "node_topology") @property @pulumi.getter(name="storageClassName") def storage_class_name(self) -> pulumi.Output[str]: """ The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable. """ return pulumi.get(self, "storage_class_name")
commits
获取分支的commits @param branch: @param kwargs: @return:
# -*- coding: utf-8 -*- """ walle-web :copyright: © 2015-2019 walle-web.io :created time: 2019-02-24 10:47:53 :author: wushuiyong@walle-web.io """ import os import re import os.path as osp import git as PyGit from git import Repo as PyRepo class Repo: path = None def __init__(self, path=None): self.path = path def is_git_dir(self): ''' 判断是否为git目录 @param path: @return: ''' d = self.path + '/.git' if osp.isdir(d): if osp.isdir(osp.join(d, 'objects')) and osp.isdir(osp.join(d, 'refs')): headref = osp.join(d, 'HEAD') return osp.isfile(headref) or \ (osp.islink(headref) and os.readlink(headref).startswith('refs')) elif (osp.isfile(osp.join(d, 'gitdir')) and osp.isfile(osp.join(d, 'commondir')) and osp.isfile(osp.join(d, 'gitfile'))): return False return False def init(self, url): # 创建目录 if not os.path.exists(self.path): os.makedirs(self.path) # git clone if self.is_git_dir(): return self.pull() else: return self.clone(url) def clone(self, url): ''' 检出项目 @param branch: @param kwargs: @return: ''' return PyRepo.clone_from(url, self.path) def pull(self): ''' 更新项目 @param branch: @param kwargs: @return: ''' repo = PyRepo(self.path) return repo.remote().pull() def checkout_2_branch(self, branch): PyRepo(self.path).git.checkout(branch) def checkout_2_commit(self, branch, commit): ''' @todo 未完成 @param branch: @param commit: @return: ''' PyRepo(self.path).git.checkout(branch) # PyRepo(self.path).head.set_reference(branch) # 方法有问题,只是做了reset,没有checkout PyRepo(self.path).head.set_commit(commit) def checkout_2_tag(self, tag): PyRepo(self.path).git.checkout(tag) def branches(self): ''' 获取所有分支 @param branch: @param kwargs: @return: ''' # 去除 origin/HEAD -> 当前指向 # 去除远端前缀 branches = PyRepo(self.path).remote().refs # fixbug https://github.com/meolu/walle-web/issues/705 return [str(branch).strip().lstrip('origin').lstrip('/') for branch in branches if not str(branch).strip().startswith('origin/HEAD')] def tags(self): ''' 获取所有tag @param branch: @param kwargs: @return: ''' return [str(tag) for tag in PyRepo(self.path).tags] # MASKED: commits function (lines 118-144)
def commits(self, branch): ''' 获取分支的commits @param branch: @param kwargs: @return: ''' self.checkout_2_branch(branch) commit_log = PyGit.Git(self.path).log('--pretty=%h #@_@# %an #@_@# %s', max_count=50) commit_list = commit_log.split('\n') commits = [] for commit in commit_list: if not re.search('^.+ #@_@# .+ #@_@# .*$', commit): continue commit_dict = commit.split(' #@_@# ') from flask import current_app current_app.logger.info(commit_dict) commits.append({ 'id': commit_dict[0], 'name': commit_dict[1], 'message': commit_dict[2], }) return commits
118
144
# -*- coding: utf-8 -*- """ walle-web :copyright: © 2015-2019 walle-web.io :created time: 2019-02-24 10:47:53 :author: wushuiyong@walle-web.io """ import os import re import os.path as osp import git as PyGit from git import Repo as PyRepo class Repo: path = None def __init__(self, path=None): self.path = path def is_git_dir(self): ''' 判断是否为git目录 @param path: @return: ''' d = self.path + '/.git' if osp.isdir(d): if osp.isdir(osp.join(d, 'objects')) and osp.isdir(osp.join(d, 'refs')): headref = osp.join(d, 'HEAD') return osp.isfile(headref) or \ (osp.islink(headref) and os.readlink(headref).startswith('refs')) elif (osp.isfile(osp.join(d, 'gitdir')) and osp.isfile(osp.join(d, 'commondir')) and osp.isfile(osp.join(d, 'gitfile'))): return False return False def init(self, url): # 创建目录 if not os.path.exists(self.path): os.makedirs(self.path) # git clone if self.is_git_dir(): return self.pull() else: return self.clone(url) def clone(self, url): ''' 检出项目 @param branch: @param kwargs: @return: ''' return PyRepo.clone_from(url, self.path) def pull(self): ''' 更新项目 @param branch: @param kwargs: @return: ''' repo = PyRepo(self.path) return repo.remote().pull() def checkout_2_branch(self, branch): PyRepo(self.path).git.checkout(branch) def checkout_2_commit(self, branch, commit): ''' @todo 未完成 @param branch: @param commit: @return: ''' PyRepo(self.path).git.checkout(branch) # PyRepo(self.path).head.set_reference(branch) # 方法有问题,只是做了reset,没有checkout PyRepo(self.path).head.set_commit(commit) def checkout_2_tag(self, tag): PyRepo(self.path).git.checkout(tag) def branches(self): ''' 获取所有分支 @param branch: @param kwargs: @return: ''' # 去除 origin/HEAD -> 当前指向 # 去除远端前缀 branches = PyRepo(self.path).remote().refs # fixbug https://github.com/meolu/walle-web/issues/705 return [str(branch).strip().lstrip('origin').lstrip('/') for branch in branches if not str(branch).strip().startswith('origin/HEAD')] def tags(self): ''' 获取所有tag @param branch: @param kwargs: @return: ''' return [str(tag) for tag in PyRepo(self.path).tags] def commits(self, branch): ''' 获取分支的commits @param branch: @param kwargs: @return: ''' self.checkout_2_branch(branch) commit_log = PyGit.Git(self.path).log('--pretty=%h #@_@# %an #@_@# %s', max_count=50) commit_list = commit_log.split('\n') commits = [] for commit in commit_list: if not re.search('^.+ #@_@# .+ #@_@# .*$', commit): continue commit_dict = commit.split(' #@_@# ') from flask import current_app current_app.logger.info(commit_dict) commits.append({ 'id': commit_dict[0], 'name': commit_dict[1], 'message': commit_dict[2], }) return commits
quadprog
Input: Numpy arrays, the format follows MATLAB quadprog function: https://www.mathworks.com/help/optim/ug/quadprog.html Output: Numpy array of the solution
from json import load import os import argparse import random from copy import deepcopy import torchvision import torchvision.transforms as transforms from torch import nn import sys import torch import numpy as np import cvxopt torch.manual_seed(0) from fedlab.core.client.serial_trainer import SubsetSerialTrainer from fedlab.utils.aggregator import Aggregators from fedlab.utils.serialization import SerializationTool from fedlab.utils.functional import evaluate from fedlab.utils.functional import get_best_gpu, load_dict sys.path.append("../") from models.cnn import CNN_MNIST # MASKED: quadprog function (lines 24-36) def optim_lambdas(gradients, lambda0): epsilon = 0.5 n = len(gradients) J_t = [grad.numpy() for grad in gradients] J_t = np.array(J_t) # target function Q = 2 * np.dot(J_t, J_t.T) q = np.array([[0] for i in range(n)]) # equality constrint A = np.ones(n).T b = np.array([1]) # boundary lb = np.array([max(0, lambda0[i] - epsilon) for i in range(n)]) ub = np.array([min(1, lambda0[i] + epsilon) for i in range(n)]) G = np.zeros((2 * n, n)) for i in range(n): G[i][i] = -1 G[n + i][i] = 1 h = np.zeros((2 * n, 1)) for i in range(n): h[i] = -lb[i] h[n + i] = ub[i] res = quadprog(Q, q, G, h, A, b) return res # python standalone.py --sample_ratio 0.1 --batch_size 10 --epochs 5 --partition iid # configuration parser = argparse.ArgumentParser(description="Standalone training example") parser.add_argument("--total_client", type=int, default=10) parser.add_argument("--com_round", type=int, default=5) parser.add_argument("--sample_ratio", type=float) parser.add_argument("--batch_size", type=int) parser.add_argument("--lr", type=float) parser.add_argument("--epochs", type=int) args = parser.parse_args() # get raw dataset root = "../datasets/mnist/" trainset = torchvision.datasets.MNIST(root=root, train=True, download=True, transform=transforms.ToTensor()) testset = torchvision.datasets.MNIST(root=root, train=False, download=True, transform=transforms.ToTensor()) test_loader = torch.utils.data.DataLoader(testset, batch_size=len(testset), drop_last=False, shuffle=False) # setup os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3" gpu = get_best_gpu() model = CNN_MNIST().cuda(gpu) # FL settings num_per_round = int(args.total_client * args.sample_ratio) aggregator = Aggregators.fedavg_aggregate total_client_num = args.total_client # client总数 data_indices = load_dict("./mnist_noniid.pkl") # fedlab setup local_model = deepcopy(model) trainer = SubsetSerialTrainer(model=local_model, dataset=trainset, data_slices=data_indices, aggregator=aggregator, args={ "batch_size": args.batch_size, "epochs": args.epochs, "lr": args.lr }) dynamic_lambdas = np.ones(num_per_round) * 1.0 / num_per_round # train procedure to_select = [i for i in range(total_client_num)] for round in range(args.com_round): model_parameters = SerializationTool.serialize_model(model) selection = random.sample(to_select, num_per_round) parameters = trainer.train(model_parameters=model_parameters, id_list=selection, aggregate=False) gradients = [model_parameters - model for model in parameters] for i, grad in enumerate(gradients): gradients[i] = grad / grad.norm() print(len(gradients)) print(gradients[0].shape) # calculate lamda lambda0 = [1.0 / num_per_round for _ in range(num_per_round)] dynamic_lambdas = torch.Tensor(optim_lambdas(gradients, lambda0)).view(-1) dt = Aggregators.fedavg_aggregate(gradients, dynamic_lambdas) serialized_parameters = model_parameters - dt * args.lr SerializationTool.deserialize_model(model, serialized_parameters) criterion = nn.CrossEntropyLoss() loss, acc = evaluate(model, criterion, test_loader) print("loss: {:.4f}, acc: {:.2f}".format(loss, acc))
def quadprog(Q, q, G, h, A, b): """ Input: Numpy arrays, the format follows MATLAB quadprog function: https://www.mathworks.com/help/optim/ug/quadprog.html Output: Numpy array of the solution """ Q = cvxopt.matrix(Q.tolist()) q = cvxopt.matrix(q.tolist(), tc='d') G = cvxopt.matrix(G.tolist()) h = cvxopt.matrix(h.tolist()) A = cvxopt.matrix(A.tolist()) b = cvxopt.matrix(b.tolist(), tc='d') sol = cvxopt.solvers.qp(Q, q.T, G.T, h.T, A.T, b) return np.array(sol['x'])
24
36
from json import load import os import argparse import random from copy import deepcopy import torchvision import torchvision.transforms as transforms from torch import nn import sys import torch import numpy as np import cvxopt torch.manual_seed(0) from fedlab.core.client.serial_trainer import SubsetSerialTrainer from fedlab.utils.aggregator import Aggregators from fedlab.utils.serialization import SerializationTool from fedlab.utils.functional import evaluate from fedlab.utils.functional import get_best_gpu, load_dict sys.path.append("../") from models.cnn import CNN_MNIST def quadprog(Q, q, G, h, A, b): """ Input: Numpy arrays, the format follows MATLAB quadprog function: https://www.mathworks.com/help/optim/ug/quadprog.html Output: Numpy array of the solution """ Q = cvxopt.matrix(Q.tolist()) q = cvxopt.matrix(q.tolist(), tc='d') G = cvxopt.matrix(G.tolist()) h = cvxopt.matrix(h.tolist()) A = cvxopt.matrix(A.tolist()) b = cvxopt.matrix(b.tolist(), tc='d') sol = cvxopt.solvers.qp(Q, q.T, G.T, h.T, A.T, b) return np.array(sol['x']) def optim_lambdas(gradients, lambda0): epsilon = 0.5 n = len(gradients) J_t = [grad.numpy() for grad in gradients] J_t = np.array(J_t) # target function Q = 2 * np.dot(J_t, J_t.T) q = np.array([[0] for i in range(n)]) # equality constrint A = np.ones(n).T b = np.array([1]) # boundary lb = np.array([max(0, lambda0[i] - epsilon) for i in range(n)]) ub = np.array([min(1, lambda0[i] + epsilon) for i in range(n)]) G = np.zeros((2 * n, n)) for i in range(n): G[i][i] = -1 G[n + i][i] = 1 h = np.zeros((2 * n, 1)) for i in range(n): h[i] = -lb[i] h[n + i] = ub[i] res = quadprog(Q, q, G, h, A, b) return res # python standalone.py --sample_ratio 0.1 --batch_size 10 --epochs 5 --partition iid # configuration parser = argparse.ArgumentParser(description="Standalone training example") parser.add_argument("--total_client", type=int, default=10) parser.add_argument("--com_round", type=int, default=5) parser.add_argument("--sample_ratio", type=float) parser.add_argument("--batch_size", type=int) parser.add_argument("--lr", type=float) parser.add_argument("--epochs", type=int) args = parser.parse_args() # get raw dataset root = "../datasets/mnist/" trainset = torchvision.datasets.MNIST(root=root, train=True, download=True, transform=transforms.ToTensor()) testset = torchvision.datasets.MNIST(root=root, train=False, download=True, transform=transforms.ToTensor()) test_loader = torch.utils.data.DataLoader(testset, batch_size=len(testset), drop_last=False, shuffle=False) # setup os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3" gpu = get_best_gpu() model = CNN_MNIST().cuda(gpu) # FL settings num_per_round = int(args.total_client * args.sample_ratio) aggregator = Aggregators.fedavg_aggregate total_client_num = args.total_client # client总数 data_indices = load_dict("./mnist_noniid.pkl") # fedlab setup local_model = deepcopy(model) trainer = SubsetSerialTrainer(model=local_model, dataset=trainset, data_slices=data_indices, aggregator=aggregator, args={ "batch_size": args.batch_size, "epochs": args.epochs, "lr": args.lr }) dynamic_lambdas = np.ones(num_per_round) * 1.0 / num_per_round # train procedure to_select = [i for i in range(total_client_num)] for round in range(args.com_round): model_parameters = SerializationTool.serialize_model(model) selection = random.sample(to_select, num_per_round) parameters = trainer.train(model_parameters=model_parameters, id_list=selection, aggregate=False) gradients = [model_parameters - model for model in parameters] for i, grad in enumerate(gradients): gradients[i] = grad / grad.norm() print(len(gradients)) print(gradients[0].shape) # calculate lamda lambda0 = [1.0 / num_per_round for _ in range(num_per_round)] dynamic_lambdas = torch.Tensor(optim_lambdas(gradients, lambda0)).view(-1) dt = Aggregators.fedavg_aggregate(gradients, dynamic_lambdas) serialized_parameters = model_parameters - dt * args.lr SerializationTool.deserialize_model(model, serialized_parameters) criterion = nn.CrossEntropyLoss() loss, acc = evaluate(model, criterion, test_loader) print("loss: {:.4f}, acc: {:.2f}".format(loss, acc))
refine_dict
Clean dictionary based on frequency and gap of frequency. For example, {'s1': ['t1': 999, 't2': 199, 't3':1], 's2': ['m1': 2000, 'm2': 100]} => {'s1': ['t1': 999, 't2': 199], 's2': ['m1': 2000]} Args: full_mapping: clean_dict_filename: threshold: ignore_gap: Returns:
import argparse import json import os from collections import Counter, defaultdict from helper import _is_token_alnum THRESHOLD = 0.01 GAP = 10 def get_full_mapping(src_filename, trg_filename, align_filename, mapping_filename, reverse_src2trg=False, lowercase=True): """ Get full mapping give align. Args: src_filename: trg_filename: align_filename: mapping_filename: reverse_src2trg: lowercase: Returns: """ print('src: {}, trg: {}, align: {}, mapping: {}, reverse: {}'.format( src_filename, trg_filename, align_filename, mapping_filename, reverse_src2trg)) src2trg_mapping = defaultdict(lambda: defaultdict(int)) processed_line = 0 with open(src_filename) as fs, open(trg_filename) as ft, open( align_filename) as fa: for ls, lt, la in zip(fs, ft, fa): if lowercase: ls = ls.lower() lt = lt.lower() processed_line += 1 ls_words = ls.split() lt_words = lt.split() la_aligns = la.split() src_pos_counter = Counter() trg_pos_counter = Counter() valid_src_pos = set() valid_trg_pos = set() for align in la_aligns: # only consider one-to-one mapping src_pos, trg_pos = align.split('-') src_pos = int(src_pos) trg_pos = int(trg_pos) # only consider alpha number token if _is_token_alnum(ls_words[src_pos]): src_pos_counter[src_pos] += 1 if _is_token_alnum(lt_words[trg_pos]): trg_pos_counter[trg_pos] += 1 # ignore token that aligned twice for pos, c in src_pos_counter.items(): if c == 1: valid_src_pos.add(pos) for pos, c in trg_pos_counter.items(): if c == 1: valid_trg_pos.add(pos) for align in la_aligns: src_pos, trg_pos = align.split('-') src_pos = int(src_pos) trg_pos = int(trg_pos) if _is_token_alnum(ls_words[src_pos]) and _is_token_alnum( lt_words[trg_pos]) and (src_pos in valid_src_pos) and ( trg_pos in valid_trg_pos): if reverse_src2trg: src2trg_mapping[lt_words[trg_pos]][ ls_words[src_pos]] += 1 else: src2trg_mapping[ls_words[src_pos]][ lt_words[trg_pos]] += 1 if processed_line % 1000000 == 0: print('{} done.'.format(processed_line)) with open(mapping_filename, 'w') as fw: print('dump to {} ...'.format(mapping_filename)) json.dump(src2trg_mapping, fw) return src2trg_mapping # MASKED: refine_dict function (lines 92-134) if __name__ == '__main__': parser = argparse.ArgumentParser( description='Process alignments and do filter') parser.add_argument('--src_filename', help='Origin src file name before bsp', type=str, required=True) parser.add_argument('--trg_filename', help='Origin trg file name before bsp', type=str, required=True) parser.add_argument('--align_filename', help='align file name by atools', type=str, required=True) parser.add_argument('--dict_filename', help='clean dict file name', type=str, required=True) parser.add_argument('--threshold', help='threshold of ignore frequency', type=float, default=THRESHOLD) parser.add_argument('--ignore_gap', help='gap of ignore frequency', type=float, default=GAP) parser.add_argument( '--overwrite', dest='overwrite', action='store_true', help='Overwrite existing output files') args = parser.parse_args() if args.overwrite: print('Overwrite existing file') src2trg_mapping_filename = '{}.{}'.format(args.align_filename, 'src2trg_mapping') trg2src_mapping_filename = '{}.{}'.format(args.align_filename, 'trg2src_mapping') if os.path.isfile(src2trg_mapping_filename) and (not args.overwrite): print('loading mapping: {}'.format(src2trg_mapping_filename)) with open(src2trg_mapping_filename) as f: full_src2trg_mapping = json.load(f) else: print('creating mapping: {}'.format(src2trg_mapping_filename)) full_src2trg_mapping = get_full_mapping(args.src_filename, args.trg_filename, args.align_filename, src2trg_mapping_filename, False) if os.path.isfile(trg2src_mapping_filename) and (not args.overwrite): print('loading mapping: {}'.format(trg2src_mapping_filename)) with open(trg2src_mapping_filename) as f: full_trg2src_mapping = json.load(f) else: print('creating mapping: {}'.format(trg2src_mapping_filename)) full_trg2src_mapping = get_full_mapping(args.src_filename, args.trg_filename, args.align_filename, trg2src_mapping_filename, True) src2trg_clean_dict_filename = '{}.{}'.format(args.dict_filename, 'src2trg') refine_dict(full_src2trg_mapping, src2trg_clean_dict_filename, args.threshold, args.ignore_gap) trg2src_clean_dict_filename = '{}.{}'.format(args.dict_filename, 'trg2src') refine_dict(full_trg2src_mapping, trg2src_clean_dict_filename, args.threshold, args.ignore_gap)
def refine_dict(full_mapping, clean_dict_filename, threshold, ignore_gap): """ Clean dictionary based on frequency and gap of frequency. For example, {'s1': ['t1': 999, 't2': 199, 't3':1], 's2': ['m1': 2000, 'm2': 100]} => {'s1': ['t1': 999, 't2': 199], 's2': ['m1': 2000]} Args: full_mapping: clean_dict_filename: threshold: ignore_gap: Returns: """ print('Refine dict to {}, threshold: {}, ignore_gap: {} ...'.format( clean_dict_filename, threshold, ignore_gap)) full_mapping = sorted( full_mapping.items(), key=lambda x: sum(x[1].values()), reverse=True) with open(clean_dict_filename, 'w') as fw: for idx, src2trg in enumerate(full_mapping): src = src2trg[0] trg = sorted(src2trg[1].items(), key=lambda x: x[1], reverse=True) total_count = sum(c[1] for c in trg) clean_trg = dict() p = trg[0][1] for w, c in trg: if c / total_count < threshold: # too rare break if (p / c > ignore_gap) and (c / total_count < THRESHOLD * 5): # large gap break p = c clean_trg.update({w: round(c / total_count, 3)}) fw.write('{}\n'.format(json.dumps({src: clean_trg}, ensure_ascii=False)))
92
134
import argparse import json import os from collections import Counter, defaultdict from helper import _is_token_alnum THRESHOLD = 0.01 GAP = 10 def get_full_mapping(src_filename, trg_filename, align_filename, mapping_filename, reverse_src2trg=False, lowercase=True): """ Get full mapping give align. Args: src_filename: trg_filename: align_filename: mapping_filename: reverse_src2trg: lowercase: Returns: """ print('src: {}, trg: {}, align: {}, mapping: {}, reverse: {}'.format( src_filename, trg_filename, align_filename, mapping_filename, reverse_src2trg)) src2trg_mapping = defaultdict(lambda: defaultdict(int)) processed_line = 0 with open(src_filename) as fs, open(trg_filename) as ft, open( align_filename) as fa: for ls, lt, la in zip(fs, ft, fa): if lowercase: ls = ls.lower() lt = lt.lower() processed_line += 1 ls_words = ls.split() lt_words = lt.split() la_aligns = la.split() src_pos_counter = Counter() trg_pos_counter = Counter() valid_src_pos = set() valid_trg_pos = set() for align in la_aligns: # only consider one-to-one mapping src_pos, trg_pos = align.split('-') src_pos = int(src_pos) trg_pos = int(trg_pos) # only consider alpha number token if _is_token_alnum(ls_words[src_pos]): src_pos_counter[src_pos] += 1 if _is_token_alnum(lt_words[trg_pos]): trg_pos_counter[trg_pos] += 1 # ignore token that aligned twice for pos, c in src_pos_counter.items(): if c == 1: valid_src_pos.add(pos) for pos, c in trg_pos_counter.items(): if c == 1: valid_trg_pos.add(pos) for align in la_aligns: src_pos, trg_pos = align.split('-') src_pos = int(src_pos) trg_pos = int(trg_pos) if _is_token_alnum(ls_words[src_pos]) and _is_token_alnum( lt_words[trg_pos]) and (src_pos in valid_src_pos) and ( trg_pos in valid_trg_pos): if reverse_src2trg: src2trg_mapping[lt_words[trg_pos]][ ls_words[src_pos]] += 1 else: src2trg_mapping[ls_words[src_pos]][ lt_words[trg_pos]] += 1 if processed_line % 1000000 == 0: print('{} done.'.format(processed_line)) with open(mapping_filename, 'w') as fw: print('dump to {} ...'.format(mapping_filename)) json.dump(src2trg_mapping, fw) return src2trg_mapping def refine_dict(full_mapping, clean_dict_filename, threshold, ignore_gap): """ Clean dictionary based on frequency and gap of frequency. For example, {'s1': ['t1': 999, 't2': 199, 't3':1], 's2': ['m1': 2000, 'm2': 100]} => {'s1': ['t1': 999, 't2': 199], 's2': ['m1': 2000]} Args: full_mapping: clean_dict_filename: threshold: ignore_gap: Returns: """ print('Refine dict to {}, threshold: {}, ignore_gap: {} ...'.format( clean_dict_filename, threshold, ignore_gap)) full_mapping = sorted( full_mapping.items(), key=lambda x: sum(x[1].values()), reverse=True) with open(clean_dict_filename, 'w') as fw: for idx, src2trg in enumerate(full_mapping): src = src2trg[0] trg = sorted(src2trg[1].items(), key=lambda x: x[1], reverse=True) total_count = sum(c[1] for c in trg) clean_trg = dict() p = trg[0][1] for w, c in trg: if c / total_count < threshold: # too rare break if (p / c > ignore_gap) and (c / total_count < THRESHOLD * 5): # large gap break p = c clean_trg.update({w: round(c / total_count, 3)}) fw.write('{}\n'.format(json.dumps({src: clean_trg}, ensure_ascii=False))) if __name__ == '__main__': parser = argparse.ArgumentParser( description='Process alignments and do filter') parser.add_argument('--src_filename', help='Origin src file name before bsp', type=str, required=True) parser.add_argument('--trg_filename', help='Origin trg file name before bsp', type=str, required=True) parser.add_argument('--align_filename', help='align file name by atools', type=str, required=True) parser.add_argument('--dict_filename', help='clean dict file name', type=str, required=True) parser.add_argument('--threshold', help='threshold of ignore frequency', type=float, default=THRESHOLD) parser.add_argument('--ignore_gap', help='gap of ignore frequency', type=float, default=GAP) parser.add_argument( '--overwrite', dest='overwrite', action='store_true', help='Overwrite existing output files') args = parser.parse_args() if args.overwrite: print('Overwrite existing file') src2trg_mapping_filename = '{}.{}'.format(args.align_filename, 'src2trg_mapping') trg2src_mapping_filename = '{}.{}'.format(args.align_filename, 'trg2src_mapping') if os.path.isfile(src2trg_mapping_filename) and (not args.overwrite): print('loading mapping: {}'.format(src2trg_mapping_filename)) with open(src2trg_mapping_filename) as f: full_src2trg_mapping = json.load(f) else: print('creating mapping: {}'.format(src2trg_mapping_filename)) full_src2trg_mapping = get_full_mapping(args.src_filename, args.trg_filename, args.align_filename, src2trg_mapping_filename, False) if os.path.isfile(trg2src_mapping_filename) and (not args.overwrite): print('loading mapping: {}'.format(trg2src_mapping_filename)) with open(trg2src_mapping_filename) as f: full_trg2src_mapping = json.load(f) else: print('creating mapping: {}'.format(trg2src_mapping_filename)) full_trg2src_mapping = get_full_mapping(args.src_filename, args.trg_filename, args.align_filename, trg2src_mapping_filename, True) src2trg_clean_dict_filename = '{}.{}'.format(args.dict_filename, 'src2trg') refine_dict(full_src2trg_mapping, src2trg_clean_dict_filename, args.threshold, args.ignore_gap) trg2src_clean_dict_filename = '{}.{}'.format(args.dict_filename, 'trg2src') refine_dict(full_trg2src_mapping, trg2src_clean_dict_filename, args.threshold, args.ignore_gap)
generate_yaml_template
Args: base_yaml: A string representation of one type job's base yaml. slots_proto: A proto map object representation of modification template's operable smallest units. Returns: string: A yaml_template
# Copyright 2020 The FedLearner Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # coding: utf-8 from flatten_dict import flatten from fedlearner_webconsole.proto.workflow_definition_pb2 import Slot from fedlearner_webconsole.workflow_template.template_validaor \ import YamlTemplate class _YamlTemplate(YamlTemplate): # Which placeholders in the template should be interpreted idpattern = r'Slot_[a-z0-9_]*' def substitute(self, mapping): return super()._substitute(mapping, fixed_placeholder=None, ignore_invalid=True) def format_yaml(yaml, **kwargs): """Formats a yaml template. Example usage: format_yaml('{"abc": ${x.y}}', x={'y': 123}) output should be '{"abc": 123}' """ template = _YamlTemplate(yaml) try: return template.substitute(flatten(kwargs or {}, reducer='dot')) except KeyError as e: raise RuntimeError( 'Unknown placeholder: {}'.format(e.args[0])) from e # MASKED: generate_yaml_template function (lines 48-63)
def generate_yaml_template(base_yaml, slots_proto): """ Args: base_yaml: A string representation of one type job's base yaml. slots_proto: A proto map object representation of modification template's operable smallest units. Returns: string: A yaml_template """ slots = {} for key in slots_proto: if slots_proto[key].reference_type == Slot.ReferenceType.DEFAULT: slots[key] = slots_proto[key].default else: slots[key] = f'${{{slots_proto[key].reference}}}' return format_yaml(base_yaml, **slots)
48
63
# Copyright 2020 The FedLearner Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # coding: utf-8 from flatten_dict import flatten from fedlearner_webconsole.proto.workflow_definition_pb2 import Slot from fedlearner_webconsole.workflow_template.template_validaor \ import YamlTemplate class _YamlTemplate(YamlTemplate): # Which placeholders in the template should be interpreted idpattern = r'Slot_[a-z0-9_]*' def substitute(self, mapping): return super()._substitute(mapping, fixed_placeholder=None, ignore_invalid=True) def format_yaml(yaml, **kwargs): """Formats a yaml template. Example usage: format_yaml('{"abc": ${x.y}}', x={'y': 123}) output should be '{"abc": 123}' """ template = _YamlTemplate(yaml) try: return template.substitute(flatten(kwargs or {}, reducer='dot')) except KeyError as e: raise RuntimeError( 'Unknown placeholder: {}'.format(e.args[0])) from e def generate_yaml_template(base_yaml, slots_proto): """ Args: base_yaml: A string representation of one type job's base yaml. slots_proto: A proto map object representation of modification template's operable smallest units. Returns: string: A yaml_template """ slots = {} for key in slots_proto: if slots_proto[key].reference_type == Slot.ReferenceType.DEFAULT: slots[key] = slots_proto[key].default else: slots[key] = f'${{{slots_proto[key].reference}}}' return format_yaml(base_yaml, **slots)
remote_shards
Returns a Dict[int, RRef] with keys being the RPC rank and values being RRefs to shards on that rank. Need to initialize the RPC framework for this functionality. Raises an exception if ShardedTensor was created with ``init_rrefs=False``
from dataclasses import dataclass, field from enum import Enum from typing import ( Callable, Dict, List, Optional, Union ) import weakref import threading import torch import torch.distributed as dist from torch.distributed import rpc from torch.distributed import distributed_c10d from torch.distributed._sharding_spec import ( ChunkShardingSpec, EnumerableShardingSpec, ShardMetadata, ShardingSpec, ) from torch.distributed._sharding_spec._internals import ( check_tensor, get_split_size, get_chunked_dim_size, validate_non_overlapping_shards_metadata, ) from torch.types import Number from .metadata import TensorProperties, ShardedTensorMetadata from .shard import Shard from .utils import ( get_current_process_group, _flatten_tensor_size, _parse_and_validate_remote_device, _validate_output_tensor_for_gather, build_metadata_from_local_shards, build_global_metadata ) # Tracking for sharded tensor objects. _sharded_tensor_lock = threading.Lock() _sharded_tensor_current_id = 0 _sharded_tensor_map: Dict[int, 'weakref.ReferenceType[ShardedTensor]'] = {} # Custom sharded ops _SHARDED_OPS: Dict[str, Callable] = {} def _register_sharded_op(op, func): from inspect import signature if len(signature(func).parameters) != 4: raise TypeError( f'Custom sharded op function expects signature: ' f'(types, args, kwargs, process_group), but received ' f'signature: {signature(func)}') global _SHARDED_OPS _SHARDED_OPS[op] = func def _register_remote_shards(sharded_tensor_id: int, rrefs: List[rpc.RRef[Shard]], rpc_rank: int): with _sharded_tensor_lock: if sharded_tensor_id not in _sharded_tensor_map: raise RuntimeError( f'Could not find sharded_tensor_id: {sharded_tensor_id} in map: {_sharded_tensor_map.keys()}') sharded_tensor = _sharded_tensor_map[sharded_tensor_id]() if sharded_tensor is None: raise RuntimeError('ShardedTensor weakref has been deallocated') else: sharded_tensor._register_remote_shards(rrefs, rpc_rank) class CreateOp(Enum): EMPTY = 0 FULL = 1 ONES = 2 RAND = 3 ZEROS = 4 @dataclass class TensorInitParams(object): """ Container for list of common params to create new local tensor. """ create_op: CreateOp # needed when create_op is FULL # default set to False (not None) since None is incompatible with Number. fill_value: Number = field(default=False) tensor_properties: TensorProperties = field( default=TensorProperties(dtype=torch.get_default_dtype(), layout=torch.strided, requires_grad=False, memory_format=torch.contiguous_format, pin_memory=False)) class ShardedTensor(object): """ ShardedTensor is an abstraction to represent Tensors that are sharded across multiple devices and multiple processes. ShardedTensor is initialized in an SPMD like fashion where each rank initializes the ShardedTensor. The ShardedTensor object on each rank then only stores the local shard for the Tensor and provides global metadata for all the shards. ShardedTensor doesn't provide any Tensor like operations but is a wrapper providing the Tensor representing the local shard and the global metadata. Using these, users can build their custom distributed sharded computations on top of this primitive. The local shards are all initialized using the create_op specified by tensor_init_params.create_op, e.g., torch.ones, or torch.empty Args: sharding_spec (:class:`torch.distributed._sharding_spec.ShardingSpec`): The specification describing how to shard the Tensor. size (int...): a sequence of integers defining the shape of the output tensor. Can be a variable number of arguments or a collection like a list or tuple. Keyword args: tensor_init_params (:class: `TensorInitParams`): common params to create tensor. init_rrefs (bool, optional): Whether or not to initialize :class:`torch.distributed.rpc.RRef`s pointing to remote shards. Need to initialize the RPC Framework if specified as ``True``. Default: ``False``. .. note:: ShardedTensor uses collectives to do various operations, i.e. it uses all_gather to do cross rank validations. For NCCL-based processed groups, internal tensor representations of objects must be moved to the GPU device before communication takes place. In this case, the device used is given by ``torch.cuda.current_device()`` and it is the user's responsiblity to ensure that this is set so that each rank has an individual GPU, via ``torch.cuda.set_device()`` """ def __new__(cls, *args, **kwargs): # Use __new__ for logging purposes. torch._C._log_api_usage_once("torch.distributed.sharded_tensor") return super(ShardedTensor, cls).__new__(cls) def __init__( self, sharding_spec: ShardingSpec, *size, tensor_init_params: TensorInitParams, process_group=None, init_rrefs=False, ): # prepare initialization, initialize fields like # _process_group, _local_shards, etc. self._prepare_init(process_group=process_group, init_rrefs=init_rrefs) if tensor_init_params.tensor_properties is None: raise ValueError('tensor_properties must not be None.') if tensor_init_params.tensor_properties.dtype is None: tensor_init_params.tensor_properties.dtype = torch.get_default_dtype() if tensor_init_params.tensor_properties.layout != torch.strided: raise ValueError('Only torch.strided layout is currently supported') if tensor_init_params.tensor_properties.memory_format != torch.contiguous_format: raise ValueError('Only torch.contiguous_format memory_format is currently supported') dims = _flatten_tensor_size(size) self._sharding_spec = sharding_spec if isinstance(self._sharding_spec, ChunkShardingSpec): self._init_chunked(dims, tensor_init_params) elif isinstance(self._sharding_spec, EnumerableShardingSpec): self._init_enumerable(dims, tensor_init_params) else: raise ValueError(f'Unsupported sharding_spec: {self._sharding_spec}') # do post initialization (i.e. register sharded_tensor_id, initialize_rpc) self._post_init() def _prepare_init(self, process_group=None, init_rrefs=False): self._init_rrefs = init_rrefs self._sharded_tensor_id = None self._process_group = ( process_group if process_group is not None else distributed_c10d._get_default_group() ) self._local_shards: List[Shard] = [] self._remote_shards: Dict[int, List[rpc.RRef[Shard]]] = {} def _post_init(self): # Initialize RPC if available. if self._init_rrefs: with _sharded_tensor_lock: global _sharded_tensor_current_id, _sharded_tensor_map self._sharded_tensor_id = _sharded_tensor_current_id _sharded_tensor_map[self._sharded_tensor_id] = weakref.ref(self) _sharded_tensor_current_id += 1 if not rpc._is_current_rpc_agent_set(): raise RuntimeError( 'RPC Framework needs to be initialized using' ' torch.distributed.rpc.init_rpc if init_rrefs is set to True') self._init_rpc() def __del__(self): # Clean up the global map. with _sharded_tensor_lock: global _sharded_tensor_current_id, _sharded_tensor_map if self._sharded_tensor_id in _sharded_tensor_map: _sharded_tensor_map.pop(self._sharded_tensor_id) # type: ignore[call-overload] def _init_rpc(self): # Validate PG and RPC ranks match. pg_rank = dist.get_rank() rpc_rank = rpc.get_worker_info().id if pg_rank != rpc_rank: raise ValueError( f'Default ProcessGroup and RPC ranks must be ' f'the same for ShardedTensor, found process group rank: ' f'{pg_rank} and RPC rank: {rpc_rank}' ) self._remote_shards = {} # Gather all the sharded tensor ids. worker_infos = rpc._get_current_rpc_agent().get_worker_infos() rank_to_name = {} name_to_rank = {} for worker_info in worker_infos: rank_to_name[worker_info.id] = worker_info.name name_to_rank[worker_info.name] = worker_info.id all_tensor_ids = rpc.api._all_gather(self._sharded_tensor_id) # Share the local shards to the entire world. futs = [] rpc_rank = rpc.get_worker_info().id for rank in range(dist.get_world_size()): # Skip self. if rank == dist.get_rank(): continue if len(self.local_shards()) != 0: rrefs: List[rpc.RRef[Shard]] = [rpc.RRef(shard) for shard in self.local_shards()] fut = rpc.rpc_async( rank, _register_remote_shards, args=(all_tensor_ids[rank_to_name[rank]], rrefs, rpc_rank)) futs.append(fut) torch.futures.wait_all(futs) # Barrier for all RPCs to finish on all ranks. rpc.api._all_gather(None) def gather( self, dst: int = 0, out: Optional[torch.Tensor] = None, ) -> None: """ Creates a full :class:`Tensor` on rank ``dst`` by gathering all shards of the sharded tensor. The API needs to be called on all ranks in SPMD fashion. All ranks should have the same ``dst``. ``out`` should be a tensor of the same size as the overall size of the sharded tensor on ``dst`` and ``None`` on all other ranks. Args: dst(int): The rank where full tensor is constructed. Default: 0 out (:class `torch.Tensor`, optional): The output full tensor. Must to be provided ONLY on ``dst`` rank. Default: ``None`` """ rank = dist.get_rank(self._process_group) full_size = self.metadata().size _validate_output_tensor_for_gather(rank, dst, full_size, out) local_shards = self.local_shards() world_size = dist.get_world_size(self._process_group) gathered_shards = [None] * world_size # will revise this part with CPU support and use dist.gather() # once NCCL support for gather() is ready # https://github.com/pytorch/pytorch/issues/66187 dist.all_gather_object( obj=local_shards, object_list=gathered_shards, group=self._process_group, ) if rank == dst: dims = len(full_size) for shards in gathered_shards: if shards is None: raise RuntimeError( 'Gathered shards cannot be None on dst rank {dst}' ) for shard in shards: metadata = shard.metadata tensor = shard.tensor out_narrow_view = out for dim in range(dims): out_narrow_view = out_narrow_view.narrow( dim, metadata.shard_offsets[dim], metadata.shard_sizes[dim], ) out_narrow_view.copy_(tensor) @classmethod def _init_from_local_shards( cls, local_shards: List[Shard], *global_size, process_group=None, init_rrefs=False, ): # STEP 1: Validate the Shardmetadatas locally process_group = ( process_group if process_group is not None else distributed_c10d._get_default_group() ) current_rank = dist.get_rank(process_group) world_size = dist.get_world_size(process_group) local_sharded_tensor_metadata: Optional[ShardedTensorMetadata] = None global_tensor_size = _flatten_tensor_size(global_size) if len(local_shards) > 0: local_sharded_tensor_metadata = \ build_metadata_from_local_shards(local_shards, global_tensor_size, current_rank, process_group) # STEP 2. Validate metadata across ranks, and build a global sharded tensor # metadata by gathering local ShardedTensorMetadata gathered_metadatas: List[Optional[ShardedTensorMetadata]] = [] if world_size > 1: gathered_metadatas = [None for _ in range(world_size)] dist.all_gather_object( gathered_metadatas, local_sharded_tensor_metadata, group=process_group ) else: gathered_metadatas = [local_sharded_tensor_metadata] global_sharded_tensor_metadata = build_global_metadata(gathered_metadatas) # STEP 3: Validation done, create the actual ShardedTensor and populate fields # prepare initialization sharded_tensor = cls.__new__(cls) sharded_tensor._prepare_init(process_group=process_group, init_rrefs=init_rrefs) # add to metadata and local_shards sharded_tensor._metadata = global_sharded_tensor_metadata sharded_tensor._local_shards = local_shards # make a EnumerableShardingSpec for sharded tensors that initialized from this API. # TODO: make sharding spec a ChunkShardingSpec by inferring from the metadata list. # see issue https://github.com/pytorch/pytorch/issues/67244 sharded_tensor._sharding_spec = EnumerableShardingSpec(global_sharded_tensor_metadata.shards_metadata) # run post initialization, i.e. map registration, rpc initialization sharded_tensor._post_init() return sharded_tensor @classmethod def _init_from_local_shards_and_global_metadata( cls, local_shards: List[Shard], sharded_tensor_metadata: ShardedTensorMetadata, process_group=None, init_rrefs=False, ) -> "ShardedTensor": """ Initialize a ShardedTensor with local shards and a global ShardedTensorMetadata built on each rank. Warning: This API is experimental and subject to change. It does not do cross rank validations, and fully rely on the user for the correctness of sharded_tensor_metadata on each rank """ process_group = ( process_group if process_group is not None else distributed_c10d._get_default_group() ) current_rank = dist.get_rank(process_group) shards_metadata = sharded_tensor_metadata.shards_metadata tensor_properties = sharded_tensor_metadata.tensor_properties if len(shards_metadata) == 0: raise ValueError("shards_metadata must not be empty!") if tensor_properties.layout != torch.strided: raise ValueError('Only torch.strided layout is currently supported') sharded_tensor = cls.__new__(cls) sharded_tensor._prepare_init(process_group=process_group, init_rrefs=init_rrefs) sharded_tensor._metadata = sharded_tensor_metadata local_shard_metadatas = [] def _raise_if_mismatch(expected, actual, prop_name, rank, is_property=False): tensor_property_or_metadata = "tensor property" if is_property else "local ShardMetadata" if expected != actual: raise ValueError(f"Local shards' tensor {prop_name} property is incompatible with " f"{tensor_property_or_metadata} on rank {rank}: " f"{tensor_property_or_metadata} {prop_name}={expected}, " f"local shard tensor {prop_name}={actual}.") # collect local shard metadatas from the global sharded_tensor_metadata for shard_metadata in shards_metadata: # type: ignore[attr-defined] rank, local_device = _parse_and_validate_remote_device(sharded_tensor._process_group, shard_metadata.placement) if current_rank == rank: local_shard_metadatas.append(shard_metadata) if len(local_shards) != len(local_shard_metadatas): raise RuntimeError( f'Number of local shards ({len(local_shards)}) does not match number of local ' f'shards metadata in sharded_tensor_metadata ({len(local_shard_metadatas)}) ' f'on rank ({current_rank}) ' ) for shard in local_shards: shard_meta = shard.metadata local_shard_tensor = shard.tensor rank, local_device = _parse_and_validate_remote_device(sharded_tensor._process_group, shard_meta.placement) # validate if shard_meta in the metadatas collected from sharded_tensor_metadata assert shard_meta in local_shard_metadatas, \ "local shard metadata not in sharded_tensor_metadata!" _raise_if_mismatch(tensor_properties.layout, local_shard_tensor.layout, "layout", current_rank, True) if not local_shard_tensor.is_contiguous(): raise ValueError('Only torch.contiguous_format memory_format is currently supported') _raise_if_mismatch(shard_meta.shard_sizes, list(local_shard_tensor.size()), "size", current_rank) _raise_if_mismatch(tensor_properties.pin_memory, local_shard_tensor.is_pinned(), "pin_memory", current_rank, True) _raise_if_mismatch(local_device, local_shard_tensor.device, "device", current_rank) _raise_if_mismatch(tensor_properties.dtype, local_shard_tensor.dtype, "dtype", current_rank, True) _raise_if_mismatch( tensor_properties.requires_grad, local_shard_tensor.requires_grad, "requires_grad", current_rank, True) # check if shards_metadata have overlap shards validate_non_overlapping_shards_metadata(shards_metadata) # check if the shards_metadata is compatible with overall size of the sharded tensor. check_tensor(shards_metadata, list(sharded_tensor_metadata.size)) # done validation, add local_shards sharded_tensor._local_shards = local_shards # make a EnumerableShardingSpec for sharded tensors that initialized from this API. # TODO: make sharding spec a ChunkShardingSpec by inferring from the metadata list. # see issue https://github.com/pytorch/pytorch/issues/67244 sharded_tensor._sharding_spec = EnumerableShardingSpec(shards_metadata) # run post initialization, i.e. map registration, rpc initialization sharded_tensor._post_init() return sharded_tensor def _init_chunked(self, dims, tensor_init_params: TensorInitParams, ): current_rank = dist.get_rank(self._process_group) sharding_dim = self._sharding_spec.dim # type: ignore[attr-defined] # Validate the sharding spec. if not isinstance(sharding_dim, int): raise ValueError( f"Sharding dim needs to be an integer, found: {sharding_dim}" ) if sharding_dim >= len(dims) or sharding_dim < -len(dims): raise ValueError(f"Invalid sharding dim: {sharding_dim}") dim_size = dims[sharding_dim] remote_devices = self._sharding_spec.placements # type: ignore[attr-defined] chunks = len(remote_devices) # split_size computed similar to 'torch.chunk' split_size = get_split_size(dim_size, chunks) shards_metadata = [] for idx, remote_device in enumerate(remote_devices): rank, local_device = _parse_and_validate_remote_device(self._process_group, remote_device) # Adjust the sharding dim for this rank. sharded_dim_size = get_chunked_dim_size(dim_size, split_size, idx) if sharded_dim_size > 0: # Build sharding_metadata. # deepcopy for modification. rank_dims = dims.copy() rank_offsets = [0] * len(dims) rank_offsets[sharding_dim] = split_size * idx rank_dims[sharding_dim] = sharded_dim_size shard_metadata = ShardMetadata(rank_offsets, rank_dims, remote_device) shards_metadata.append(shard_metadata) # Build the local shard for the current rank if it is involved in the sharding spec. if current_rank == rank: # Initialize the local shard. local_shard = _create_tensor_from_params( *rank_dims, local_device=local_device, tensor_init_params=tensor_init_params) self._local_shards.append(Shard(local_shard, shard_metadata)) # Build overall metadata self._metadata = ShardedTensorMetadata( shards_metadata, dims, tensor_init_params.tensor_properties, ) def _init_enumerable(self, dims, tensor_init_params: TensorInitParams): # Validate the sharding spec is compatible with the tensor. check_tensor(self._sharding_spec.shards, dims) # type: ignore[attr-defined] current_rank = dist.get_rank(self._process_group) shards_metadata = [] for shard_metadata in self._sharding_spec.shards: # type: ignore[attr-defined] rank, local_device = _parse_and_validate_remote_device(self._process_group, shard_metadata.placement) shards_metadata.append(shard_metadata) if current_rank == rank: # Initialize the local shard. local_shard = _create_tensor_from_params( *shard_metadata.shard_sizes, local_device=local_device, tensor_init_params=tensor_init_params) self._local_shards.append(Shard(local_shard, shard_metadata)) # Build overall metadata self._metadata = ShardedTensorMetadata( shards_metadata, dims, tensor_init_params.tensor_properties, ) def sharding_spec(self) -> ShardingSpec: """ Returns the ShardingSpec for the tensor. """ return self._sharding_spec def __torch_function__(self, func, types, args=(), kwargs=None): if func in _SHARDED_OPS: return _SHARDED_OPS[func](types, args, kwargs, self._process_group) raise RuntimeError( f"torch function '{func.__name__}', with args: {args} and " f"kwargs: {kwargs} not supported for ShardedTensor!") def metadata(self) -> ShardedTensorMetadata: """ Returns a :class:`ShardedTensorMetadata` object corresponding to the metadata for the entire tensor. """ return self._metadata def local_shards(self) -> List[Shard]: """ Returns a list of :class:`Shard' corresponding to the local shards for this rank. Returns an empty list if the current rank does not host any shards for this Tensor. """ return self._local_shards def size(self, dim: int = None) -> Union[torch.Size, int]: """ Returns a :Union:`[torch.Size, int]` which represents the size of the tensor. The dimension can be specified. Args: dim (int, optional): the dimension over which the size represents. If specified, it returns the size of the given dimension. If not, it returns a subclass of tuple. Default: ``None`` Returns: A :Union:`[torch.Size, int]` represents the size of the tensor. """ size = self._metadata.size if dim is None: return size if dim < 0 or dim >= len(size): raise ValueError( f"Argument ``dim`` must be within the range of tensor dimensions [0, {len(size)})" ) return size[dim] def is_pinned(self) -> bool: """ Returns True if the sharded tensor (each local shard) resides in pinned memory. """ return self._metadata.tensor_properties.pin_memory def is_contiguous(self) -> bool: """ Returns True if the sharded tensor (each local shard) is contiguous in memory in the order specified by memory format. """ return self._metadata.tensor_properties.memory_format == torch.contiguous_format @property def shape(self): return self._metadata.size @property def requires_grad(self): return self._metadata.tensor_properties.requires_grad @property def dtype(self): return self._metadata.tensor_properties.dtype @property def layout(self): return self._metadata.tensor_properties.layout def _register_remote_shards(self, remote_shards: List[rpc.RRef[Shard]], rpc_rank: int): self._remote_shards[rpc_rank] = remote_shards # MASKED: remote_shards function (lines 632-644) def __hash__(self): return id(self) def __repr__(self): return f'ShardedTensor({self._metadata})' @dataclass class ProcessGroupState: """ State for ser-de of process group """ local_rank: int global_rank: int local_world_size: int global_world_size: int def __getstate__(self): pg_state = ShardedTensor.ProcessGroupState( distributed_c10d.get_rank(self._process_group), distributed_c10d.get_rank(), distributed_c10d.get_world_size(self._process_group), distributed_c10d.get_world_size(), ) return self._local_shards, self._metadata, pg_state, self._sharding_spec, self._init_rrefs def __setstate__(self, state): self._sharded_tensor_id = None if not distributed_c10d.is_initialized(): raise RuntimeError( 'Need to initialize default process group using ' '"init_process_group" before loading ShardedTensor') self._local_shards, self._metadata, pg_state, self._sharding_spec, self._init_rrefs = state # Setup process group self._process_group = get_current_process_group() # Validate process group. local_rank = distributed_c10d.get_rank(self._process_group) if pg_state.local_rank != local_rank: raise RuntimeError( f'Local rank at save time was {pg_state.local_rank}, but at ' f'load time was {local_rank}') global_rank = distributed_c10d.get_rank() if pg_state.global_rank != global_rank: raise RuntimeError( f'Global rank at save time was {pg_state.global_rank}, but at ' f'load time was {global_rank}') local_world_size = distributed_c10d.get_world_size(self._process_group) if pg_state.local_world_size != local_world_size: raise RuntimeError( f'Local world size at save time was {pg_state.local_world_size}, ' f'but at load time was {local_world_size}') global_world_size = distributed_c10d.get_world_size() if pg_state.global_world_size != global_world_size: raise RuntimeError( f'Global world size at save time was {pg_state.global_world_size}, ' f'but at load time was {global_world_size}') self._post_init() def _create_tensor_from_params(*size, local_device, tensor_init_params: TensorInitParams): """ Helper to construct tensor from size, device and common params. """ create_op = tensor_init_params.create_op dtype = tensor_init_params.tensor_properties.dtype layout = tensor_init_params.tensor_properties.layout requires_grad = tensor_init_params.tensor_properties.requires_grad memory_format = tensor_init_params.tensor_properties.memory_format pin_memory = tensor_init_params.tensor_properties.pin_memory if create_op == CreateOp.ONES: return torch.ones(*size, dtype=dtype, layout=layout, device=local_device, pin_memory=pin_memory, requires_grad=requires_grad,) elif create_op == CreateOp.EMPTY: return torch.empty(*size, dtype=dtype, layout=layout, device=local_device, requires_grad=requires_grad, # NB: memory_format param is not accepted by torch.ones memory_format=memory_format, pin_memory=pin_memory,) elif tensor_init_params.create_op == CreateOp.ZEROS: return torch.zeros(*size, dtype=dtype, layout=layout, device=local_device, pin_memory=pin_memory, requires_grad=requires_grad,) elif tensor_init_params.create_op == CreateOp.RAND: return torch.rand(*size, dtype=dtype, layout=layout, device=local_device, pin_memory=pin_memory, requires_grad=requires_grad,) elif tensor_init_params.create_op == CreateOp.FULL: return torch.full(size=size, fill_value=tensor_init_params.fill_value, layout=layout, dtype=dtype, requires_grad=requires_grad, device=local_device, ) else: raise ValueError(f'Unsupported create_op: {tensor_init_params.create_op}')
def remote_shards(self) -> Dict[int, List[rpc.RRef[Shard]]]: """ Returns a Dict[int, RRef] with keys being the RPC rank and values being RRefs to shards on that rank. Need to initialize the RPC framework for this functionality. Raises an exception if ShardedTensor was created with ``init_rrefs=False`` """ if not self._init_rrefs: raise RuntimeError( 'ShardedTensor created with init_rrefs=False, no RRefs to remote shards available' ) return self._remote_shards
632
644
from dataclasses import dataclass, field from enum import Enum from typing import ( Callable, Dict, List, Optional, Union ) import weakref import threading import torch import torch.distributed as dist from torch.distributed import rpc from torch.distributed import distributed_c10d from torch.distributed._sharding_spec import ( ChunkShardingSpec, EnumerableShardingSpec, ShardMetadata, ShardingSpec, ) from torch.distributed._sharding_spec._internals import ( check_tensor, get_split_size, get_chunked_dim_size, validate_non_overlapping_shards_metadata, ) from torch.types import Number from .metadata import TensorProperties, ShardedTensorMetadata from .shard import Shard from .utils import ( get_current_process_group, _flatten_tensor_size, _parse_and_validate_remote_device, _validate_output_tensor_for_gather, build_metadata_from_local_shards, build_global_metadata ) # Tracking for sharded tensor objects. _sharded_tensor_lock = threading.Lock() _sharded_tensor_current_id = 0 _sharded_tensor_map: Dict[int, 'weakref.ReferenceType[ShardedTensor]'] = {} # Custom sharded ops _SHARDED_OPS: Dict[str, Callable] = {} def _register_sharded_op(op, func): from inspect import signature if len(signature(func).parameters) != 4: raise TypeError( f'Custom sharded op function expects signature: ' f'(types, args, kwargs, process_group), but received ' f'signature: {signature(func)}') global _SHARDED_OPS _SHARDED_OPS[op] = func def _register_remote_shards(sharded_tensor_id: int, rrefs: List[rpc.RRef[Shard]], rpc_rank: int): with _sharded_tensor_lock: if sharded_tensor_id not in _sharded_tensor_map: raise RuntimeError( f'Could not find sharded_tensor_id: {sharded_tensor_id} in map: {_sharded_tensor_map.keys()}') sharded_tensor = _sharded_tensor_map[sharded_tensor_id]() if sharded_tensor is None: raise RuntimeError('ShardedTensor weakref has been deallocated') else: sharded_tensor._register_remote_shards(rrefs, rpc_rank) class CreateOp(Enum): EMPTY = 0 FULL = 1 ONES = 2 RAND = 3 ZEROS = 4 @dataclass class TensorInitParams(object): """ Container for list of common params to create new local tensor. """ create_op: CreateOp # needed when create_op is FULL # default set to False (not None) since None is incompatible with Number. fill_value: Number = field(default=False) tensor_properties: TensorProperties = field( default=TensorProperties(dtype=torch.get_default_dtype(), layout=torch.strided, requires_grad=False, memory_format=torch.contiguous_format, pin_memory=False)) class ShardedTensor(object): """ ShardedTensor is an abstraction to represent Tensors that are sharded across multiple devices and multiple processes. ShardedTensor is initialized in an SPMD like fashion where each rank initializes the ShardedTensor. The ShardedTensor object on each rank then only stores the local shard for the Tensor and provides global metadata for all the shards. ShardedTensor doesn't provide any Tensor like operations but is a wrapper providing the Tensor representing the local shard and the global metadata. Using these, users can build their custom distributed sharded computations on top of this primitive. The local shards are all initialized using the create_op specified by tensor_init_params.create_op, e.g., torch.ones, or torch.empty Args: sharding_spec (:class:`torch.distributed._sharding_spec.ShardingSpec`): The specification describing how to shard the Tensor. size (int...): a sequence of integers defining the shape of the output tensor. Can be a variable number of arguments or a collection like a list or tuple. Keyword args: tensor_init_params (:class: `TensorInitParams`): common params to create tensor. init_rrefs (bool, optional): Whether or not to initialize :class:`torch.distributed.rpc.RRef`s pointing to remote shards. Need to initialize the RPC Framework if specified as ``True``. Default: ``False``. .. note:: ShardedTensor uses collectives to do various operations, i.e. it uses all_gather to do cross rank validations. For NCCL-based processed groups, internal tensor representations of objects must be moved to the GPU device before communication takes place. In this case, the device used is given by ``torch.cuda.current_device()`` and it is the user's responsiblity to ensure that this is set so that each rank has an individual GPU, via ``torch.cuda.set_device()`` """ def __new__(cls, *args, **kwargs): # Use __new__ for logging purposes. torch._C._log_api_usage_once("torch.distributed.sharded_tensor") return super(ShardedTensor, cls).__new__(cls) def __init__( self, sharding_spec: ShardingSpec, *size, tensor_init_params: TensorInitParams, process_group=None, init_rrefs=False, ): # prepare initialization, initialize fields like # _process_group, _local_shards, etc. self._prepare_init(process_group=process_group, init_rrefs=init_rrefs) if tensor_init_params.tensor_properties is None: raise ValueError('tensor_properties must not be None.') if tensor_init_params.tensor_properties.dtype is None: tensor_init_params.tensor_properties.dtype = torch.get_default_dtype() if tensor_init_params.tensor_properties.layout != torch.strided: raise ValueError('Only torch.strided layout is currently supported') if tensor_init_params.tensor_properties.memory_format != torch.contiguous_format: raise ValueError('Only torch.contiguous_format memory_format is currently supported') dims = _flatten_tensor_size(size) self._sharding_spec = sharding_spec if isinstance(self._sharding_spec, ChunkShardingSpec): self._init_chunked(dims, tensor_init_params) elif isinstance(self._sharding_spec, EnumerableShardingSpec): self._init_enumerable(dims, tensor_init_params) else: raise ValueError(f'Unsupported sharding_spec: {self._sharding_spec}') # do post initialization (i.e. register sharded_tensor_id, initialize_rpc) self._post_init() def _prepare_init(self, process_group=None, init_rrefs=False): self._init_rrefs = init_rrefs self._sharded_tensor_id = None self._process_group = ( process_group if process_group is not None else distributed_c10d._get_default_group() ) self._local_shards: List[Shard] = [] self._remote_shards: Dict[int, List[rpc.RRef[Shard]]] = {} def _post_init(self): # Initialize RPC if available. if self._init_rrefs: with _sharded_tensor_lock: global _sharded_tensor_current_id, _sharded_tensor_map self._sharded_tensor_id = _sharded_tensor_current_id _sharded_tensor_map[self._sharded_tensor_id] = weakref.ref(self) _sharded_tensor_current_id += 1 if not rpc._is_current_rpc_agent_set(): raise RuntimeError( 'RPC Framework needs to be initialized using' ' torch.distributed.rpc.init_rpc if init_rrefs is set to True') self._init_rpc() def __del__(self): # Clean up the global map. with _sharded_tensor_lock: global _sharded_tensor_current_id, _sharded_tensor_map if self._sharded_tensor_id in _sharded_tensor_map: _sharded_tensor_map.pop(self._sharded_tensor_id) # type: ignore[call-overload] def _init_rpc(self): # Validate PG and RPC ranks match. pg_rank = dist.get_rank() rpc_rank = rpc.get_worker_info().id if pg_rank != rpc_rank: raise ValueError( f'Default ProcessGroup and RPC ranks must be ' f'the same for ShardedTensor, found process group rank: ' f'{pg_rank} and RPC rank: {rpc_rank}' ) self._remote_shards = {} # Gather all the sharded tensor ids. worker_infos = rpc._get_current_rpc_agent().get_worker_infos() rank_to_name = {} name_to_rank = {} for worker_info in worker_infos: rank_to_name[worker_info.id] = worker_info.name name_to_rank[worker_info.name] = worker_info.id all_tensor_ids = rpc.api._all_gather(self._sharded_tensor_id) # Share the local shards to the entire world. futs = [] rpc_rank = rpc.get_worker_info().id for rank in range(dist.get_world_size()): # Skip self. if rank == dist.get_rank(): continue if len(self.local_shards()) != 0: rrefs: List[rpc.RRef[Shard]] = [rpc.RRef(shard) for shard in self.local_shards()] fut = rpc.rpc_async( rank, _register_remote_shards, args=(all_tensor_ids[rank_to_name[rank]], rrefs, rpc_rank)) futs.append(fut) torch.futures.wait_all(futs) # Barrier for all RPCs to finish on all ranks. rpc.api._all_gather(None) def gather( self, dst: int = 0, out: Optional[torch.Tensor] = None, ) -> None: """ Creates a full :class:`Tensor` on rank ``dst`` by gathering all shards of the sharded tensor. The API needs to be called on all ranks in SPMD fashion. All ranks should have the same ``dst``. ``out`` should be a tensor of the same size as the overall size of the sharded tensor on ``dst`` and ``None`` on all other ranks. Args: dst(int): The rank where full tensor is constructed. Default: 0 out (:class `torch.Tensor`, optional): The output full tensor. Must to be provided ONLY on ``dst`` rank. Default: ``None`` """ rank = dist.get_rank(self._process_group) full_size = self.metadata().size _validate_output_tensor_for_gather(rank, dst, full_size, out) local_shards = self.local_shards() world_size = dist.get_world_size(self._process_group) gathered_shards = [None] * world_size # will revise this part with CPU support and use dist.gather() # once NCCL support for gather() is ready # https://github.com/pytorch/pytorch/issues/66187 dist.all_gather_object( obj=local_shards, object_list=gathered_shards, group=self._process_group, ) if rank == dst: dims = len(full_size) for shards in gathered_shards: if shards is None: raise RuntimeError( 'Gathered shards cannot be None on dst rank {dst}' ) for shard in shards: metadata = shard.metadata tensor = shard.tensor out_narrow_view = out for dim in range(dims): out_narrow_view = out_narrow_view.narrow( dim, metadata.shard_offsets[dim], metadata.shard_sizes[dim], ) out_narrow_view.copy_(tensor) @classmethod def _init_from_local_shards( cls, local_shards: List[Shard], *global_size, process_group=None, init_rrefs=False, ): # STEP 1: Validate the Shardmetadatas locally process_group = ( process_group if process_group is not None else distributed_c10d._get_default_group() ) current_rank = dist.get_rank(process_group) world_size = dist.get_world_size(process_group) local_sharded_tensor_metadata: Optional[ShardedTensorMetadata] = None global_tensor_size = _flatten_tensor_size(global_size) if len(local_shards) > 0: local_sharded_tensor_metadata = \ build_metadata_from_local_shards(local_shards, global_tensor_size, current_rank, process_group) # STEP 2. Validate metadata across ranks, and build a global sharded tensor # metadata by gathering local ShardedTensorMetadata gathered_metadatas: List[Optional[ShardedTensorMetadata]] = [] if world_size > 1: gathered_metadatas = [None for _ in range(world_size)] dist.all_gather_object( gathered_metadatas, local_sharded_tensor_metadata, group=process_group ) else: gathered_metadatas = [local_sharded_tensor_metadata] global_sharded_tensor_metadata = build_global_metadata(gathered_metadatas) # STEP 3: Validation done, create the actual ShardedTensor and populate fields # prepare initialization sharded_tensor = cls.__new__(cls) sharded_tensor._prepare_init(process_group=process_group, init_rrefs=init_rrefs) # add to metadata and local_shards sharded_tensor._metadata = global_sharded_tensor_metadata sharded_tensor._local_shards = local_shards # make a EnumerableShardingSpec for sharded tensors that initialized from this API. # TODO: make sharding spec a ChunkShardingSpec by inferring from the metadata list. # see issue https://github.com/pytorch/pytorch/issues/67244 sharded_tensor._sharding_spec = EnumerableShardingSpec(global_sharded_tensor_metadata.shards_metadata) # run post initialization, i.e. map registration, rpc initialization sharded_tensor._post_init() return sharded_tensor @classmethod def _init_from_local_shards_and_global_metadata( cls, local_shards: List[Shard], sharded_tensor_metadata: ShardedTensorMetadata, process_group=None, init_rrefs=False, ) -> "ShardedTensor": """ Initialize a ShardedTensor with local shards and a global ShardedTensorMetadata built on each rank. Warning: This API is experimental and subject to change. It does not do cross rank validations, and fully rely on the user for the correctness of sharded_tensor_metadata on each rank """ process_group = ( process_group if process_group is not None else distributed_c10d._get_default_group() ) current_rank = dist.get_rank(process_group) shards_metadata = sharded_tensor_metadata.shards_metadata tensor_properties = sharded_tensor_metadata.tensor_properties if len(shards_metadata) == 0: raise ValueError("shards_metadata must not be empty!") if tensor_properties.layout != torch.strided: raise ValueError('Only torch.strided layout is currently supported') sharded_tensor = cls.__new__(cls) sharded_tensor._prepare_init(process_group=process_group, init_rrefs=init_rrefs) sharded_tensor._metadata = sharded_tensor_metadata local_shard_metadatas = [] def _raise_if_mismatch(expected, actual, prop_name, rank, is_property=False): tensor_property_or_metadata = "tensor property" if is_property else "local ShardMetadata" if expected != actual: raise ValueError(f"Local shards' tensor {prop_name} property is incompatible with " f"{tensor_property_or_metadata} on rank {rank}: " f"{tensor_property_or_metadata} {prop_name}={expected}, " f"local shard tensor {prop_name}={actual}.") # collect local shard metadatas from the global sharded_tensor_metadata for shard_metadata in shards_metadata: # type: ignore[attr-defined] rank, local_device = _parse_and_validate_remote_device(sharded_tensor._process_group, shard_metadata.placement) if current_rank == rank: local_shard_metadatas.append(shard_metadata) if len(local_shards) != len(local_shard_metadatas): raise RuntimeError( f'Number of local shards ({len(local_shards)}) does not match number of local ' f'shards metadata in sharded_tensor_metadata ({len(local_shard_metadatas)}) ' f'on rank ({current_rank}) ' ) for shard in local_shards: shard_meta = shard.metadata local_shard_tensor = shard.tensor rank, local_device = _parse_and_validate_remote_device(sharded_tensor._process_group, shard_meta.placement) # validate if shard_meta in the metadatas collected from sharded_tensor_metadata assert shard_meta in local_shard_metadatas, \ "local shard metadata not in sharded_tensor_metadata!" _raise_if_mismatch(tensor_properties.layout, local_shard_tensor.layout, "layout", current_rank, True) if not local_shard_tensor.is_contiguous(): raise ValueError('Only torch.contiguous_format memory_format is currently supported') _raise_if_mismatch(shard_meta.shard_sizes, list(local_shard_tensor.size()), "size", current_rank) _raise_if_mismatch(tensor_properties.pin_memory, local_shard_tensor.is_pinned(), "pin_memory", current_rank, True) _raise_if_mismatch(local_device, local_shard_tensor.device, "device", current_rank) _raise_if_mismatch(tensor_properties.dtype, local_shard_tensor.dtype, "dtype", current_rank, True) _raise_if_mismatch( tensor_properties.requires_grad, local_shard_tensor.requires_grad, "requires_grad", current_rank, True) # check if shards_metadata have overlap shards validate_non_overlapping_shards_metadata(shards_metadata) # check if the shards_metadata is compatible with overall size of the sharded tensor. check_tensor(shards_metadata, list(sharded_tensor_metadata.size)) # done validation, add local_shards sharded_tensor._local_shards = local_shards # make a EnumerableShardingSpec for sharded tensors that initialized from this API. # TODO: make sharding spec a ChunkShardingSpec by inferring from the metadata list. # see issue https://github.com/pytorch/pytorch/issues/67244 sharded_tensor._sharding_spec = EnumerableShardingSpec(shards_metadata) # run post initialization, i.e. map registration, rpc initialization sharded_tensor._post_init() return sharded_tensor def _init_chunked(self, dims, tensor_init_params: TensorInitParams, ): current_rank = dist.get_rank(self._process_group) sharding_dim = self._sharding_spec.dim # type: ignore[attr-defined] # Validate the sharding spec. if not isinstance(sharding_dim, int): raise ValueError( f"Sharding dim needs to be an integer, found: {sharding_dim}" ) if sharding_dim >= len(dims) or sharding_dim < -len(dims): raise ValueError(f"Invalid sharding dim: {sharding_dim}") dim_size = dims[sharding_dim] remote_devices = self._sharding_spec.placements # type: ignore[attr-defined] chunks = len(remote_devices) # split_size computed similar to 'torch.chunk' split_size = get_split_size(dim_size, chunks) shards_metadata = [] for idx, remote_device in enumerate(remote_devices): rank, local_device = _parse_and_validate_remote_device(self._process_group, remote_device) # Adjust the sharding dim for this rank. sharded_dim_size = get_chunked_dim_size(dim_size, split_size, idx) if sharded_dim_size > 0: # Build sharding_metadata. # deepcopy for modification. rank_dims = dims.copy() rank_offsets = [0] * len(dims) rank_offsets[sharding_dim] = split_size * idx rank_dims[sharding_dim] = sharded_dim_size shard_metadata = ShardMetadata(rank_offsets, rank_dims, remote_device) shards_metadata.append(shard_metadata) # Build the local shard for the current rank if it is involved in the sharding spec. if current_rank == rank: # Initialize the local shard. local_shard = _create_tensor_from_params( *rank_dims, local_device=local_device, tensor_init_params=tensor_init_params) self._local_shards.append(Shard(local_shard, shard_metadata)) # Build overall metadata self._metadata = ShardedTensorMetadata( shards_metadata, dims, tensor_init_params.tensor_properties, ) def _init_enumerable(self, dims, tensor_init_params: TensorInitParams): # Validate the sharding spec is compatible with the tensor. check_tensor(self._sharding_spec.shards, dims) # type: ignore[attr-defined] current_rank = dist.get_rank(self._process_group) shards_metadata = [] for shard_metadata in self._sharding_spec.shards: # type: ignore[attr-defined] rank, local_device = _parse_and_validate_remote_device(self._process_group, shard_metadata.placement) shards_metadata.append(shard_metadata) if current_rank == rank: # Initialize the local shard. local_shard = _create_tensor_from_params( *shard_metadata.shard_sizes, local_device=local_device, tensor_init_params=tensor_init_params) self._local_shards.append(Shard(local_shard, shard_metadata)) # Build overall metadata self._metadata = ShardedTensorMetadata( shards_metadata, dims, tensor_init_params.tensor_properties, ) def sharding_spec(self) -> ShardingSpec: """ Returns the ShardingSpec for the tensor. """ return self._sharding_spec def __torch_function__(self, func, types, args=(), kwargs=None): if func in _SHARDED_OPS: return _SHARDED_OPS[func](types, args, kwargs, self._process_group) raise RuntimeError( f"torch function '{func.__name__}', with args: {args} and " f"kwargs: {kwargs} not supported for ShardedTensor!") def metadata(self) -> ShardedTensorMetadata: """ Returns a :class:`ShardedTensorMetadata` object corresponding to the metadata for the entire tensor. """ return self._metadata def local_shards(self) -> List[Shard]: """ Returns a list of :class:`Shard' corresponding to the local shards for this rank. Returns an empty list if the current rank does not host any shards for this Tensor. """ return self._local_shards def size(self, dim: int = None) -> Union[torch.Size, int]: """ Returns a :Union:`[torch.Size, int]` which represents the size of the tensor. The dimension can be specified. Args: dim (int, optional): the dimension over which the size represents. If specified, it returns the size of the given dimension. If not, it returns a subclass of tuple. Default: ``None`` Returns: A :Union:`[torch.Size, int]` represents the size of the tensor. """ size = self._metadata.size if dim is None: return size if dim < 0 or dim >= len(size): raise ValueError( f"Argument ``dim`` must be within the range of tensor dimensions [0, {len(size)})" ) return size[dim] def is_pinned(self) -> bool: """ Returns True if the sharded tensor (each local shard) resides in pinned memory. """ return self._metadata.tensor_properties.pin_memory def is_contiguous(self) -> bool: """ Returns True if the sharded tensor (each local shard) is contiguous in memory in the order specified by memory format. """ return self._metadata.tensor_properties.memory_format == torch.contiguous_format @property def shape(self): return self._metadata.size @property def requires_grad(self): return self._metadata.tensor_properties.requires_grad @property def dtype(self): return self._metadata.tensor_properties.dtype @property def layout(self): return self._metadata.tensor_properties.layout def _register_remote_shards(self, remote_shards: List[rpc.RRef[Shard]], rpc_rank: int): self._remote_shards[rpc_rank] = remote_shards def remote_shards(self) -> Dict[int, List[rpc.RRef[Shard]]]: """ Returns a Dict[int, RRef] with keys being the RPC rank and values being RRefs to shards on that rank. Need to initialize the RPC framework for this functionality. Raises an exception if ShardedTensor was created with ``init_rrefs=False`` """ if not self._init_rrefs: raise RuntimeError( 'ShardedTensor created with init_rrefs=False, no RRefs to remote shards available' ) return self._remote_shards def __hash__(self): return id(self) def __repr__(self): return f'ShardedTensor({self._metadata})' @dataclass class ProcessGroupState: """ State for ser-de of process group """ local_rank: int global_rank: int local_world_size: int global_world_size: int def __getstate__(self): pg_state = ShardedTensor.ProcessGroupState( distributed_c10d.get_rank(self._process_group), distributed_c10d.get_rank(), distributed_c10d.get_world_size(self._process_group), distributed_c10d.get_world_size(), ) return self._local_shards, self._metadata, pg_state, self._sharding_spec, self._init_rrefs def __setstate__(self, state): self._sharded_tensor_id = None if not distributed_c10d.is_initialized(): raise RuntimeError( 'Need to initialize default process group using ' '"init_process_group" before loading ShardedTensor') self._local_shards, self._metadata, pg_state, self._sharding_spec, self._init_rrefs = state # Setup process group self._process_group = get_current_process_group() # Validate process group. local_rank = distributed_c10d.get_rank(self._process_group) if pg_state.local_rank != local_rank: raise RuntimeError( f'Local rank at save time was {pg_state.local_rank}, but at ' f'load time was {local_rank}') global_rank = distributed_c10d.get_rank() if pg_state.global_rank != global_rank: raise RuntimeError( f'Global rank at save time was {pg_state.global_rank}, but at ' f'load time was {global_rank}') local_world_size = distributed_c10d.get_world_size(self._process_group) if pg_state.local_world_size != local_world_size: raise RuntimeError( f'Local world size at save time was {pg_state.local_world_size}, ' f'but at load time was {local_world_size}') global_world_size = distributed_c10d.get_world_size() if pg_state.global_world_size != global_world_size: raise RuntimeError( f'Global world size at save time was {pg_state.global_world_size}, ' f'but at load time was {global_world_size}') self._post_init() def _create_tensor_from_params(*size, local_device, tensor_init_params: TensorInitParams): """ Helper to construct tensor from size, device and common params. """ create_op = tensor_init_params.create_op dtype = tensor_init_params.tensor_properties.dtype layout = tensor_init_params.tensor_properties.layout requires_grad = tensor_init_params.tensor_properties.requires_grad memory_format = tensor_init_params.tensor_properties.memory_format pin_memory = tensor_init_params.tensor_properties.pin_memory if create_op == CreateOp.ONES: return torch.ones(*size, dtype=dtype, layout=layout, device=local_device, pin_memory=pin_memory, requires_grad=requires_grad,) elif create_op == CreateOp.EMPTY: return torch.empty(*size, dtype=dtype, layout=layout, device=local_device, requires_grad=requires_grad, # NB: memory_format param is not accepted by torch.ones memory_format=memory_format, pin_memory=pin_memory,) elif tensor_init_params.create_op == CreateOp.ZEROS: return torch.zeros(*size, dtype=dtype, layout=layout, device=local_device, pin_memory=pin_memory, requires_grad=requires_grad,) elif tensor_init_params.create_op == CreateOp.RAND: return torch.rand(*size, dtype=dtype, layout=layout, device=local_device, pin_memory=pin_memory, requires_grad=requires_grad,) elif tensor_init_params.create_op == CreateOp.FULL: return torch.full(size=size, fill_value=tensor_init_params.fill_value, layout=layout, dtype=dtype, requires_grad=requires_grad, device=local_device, ) else: raise ValueError(f'Unsupported create_op: {tensor_init_params.create_op}')
prerelease_local_scheme
Return local scheme version unless building on master in CircleCI. This function returns the local scheme version number (e.g. 0.0.0.dev<N>+g<HASH>) unless building on CircleCI for a pre-release in which case it ignores the hash and produces a PEP440 compliant pre-release version number (e.g. 0.0.0.dev<N>).
import os from setuptools import setup, find_packages with open('README.rst') as readme_file: readme = readme_file.read() # MASKED: prerelease_local_scheme function (lines 8-22) setup( name='histomicsui', use_scm_version={'local_scheme': prerelease_local_scheme}, setup_requires=['setuptools-scm'], description='Organize, visualize, and analyze histology images.', author='Kitware, Inc.', author_email='kitware@kitware.com', classifiers=[ 'Development Status :: 5 - Production/Stable', 'License :: OSI Approved :: Apache Software License', 'Natural Language :: English', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', ], install_requires=[ 'girder-large-image-annotation>=1.4.2', 'girder-slicer-cli-web[girder]>=1.2.0', 'girder-worker[girder]>=0.6.0', 'celery>=4.4.0rc5', ], license='Apache Software License 2.0', long_description=readme, long_description_content_type='text/x-rst', include_package_data=True, keywords='girder-plugin, histomicsui', packages=find_packages(exclude=['test', 'test.*']), url='https://github.com/DigitalSlideArchive/histomicsui', zip_safe=False, python_requires='>=3.6', entry_points={ 'girder.plugin': [ 'histomicsui = histomicsui:GirderPlugin' ] }, )
def prerelease_local_scheme(version): """ Return local scheme version unless building on master in CircleCI. This function returns the local scheme version number (e.g. 0.0.0.dev<N>+g<HASH>) unless building on CircleCI for a pre-release in which case it ignores the hash and produces a PEP440 compliant pre-release version number (e.g. 0.0.0.dev<N>). """ from setuptools_scm.version import get_local_node_and_date if os.getenv('CIRCLE_BRANCH') in ('master', ): return '' else: return get_local_node_and_date(version)
8
22
import os from setuptools import setup, find_packages with open('README.rst') as readme_file: readme = readme_file.read() def prerelease_local_scheme(version): """ Return local scheme version unless building on master in CircleCI. This function returns the local scheme version number (e.g. 0.0.0.dev<N>+g<HASH>) unless building on CircleCI for a pre-release in which case it ignores the hash and produces a PEP440 compliant pre-release version number (e.g. 0.0.0.dev<N>). """ from setuptools_scm.version import get_local_node_and_date if os.getenv('CIRCLE_BRANCH') in ('master', ): return '' else: return get_local_node_and_date(version) setup( name='histomicsui', use_scm_version={'local_scheme': prerelease_local_scheme}, setup_requires=['setuptools-scm'], description='Organize, visualize, and analyze histology images.', author='Kitware, Inc.', author_email='kitware@kitware.com', classifiers=[ 'Development Status :: 5 - Production/Stable', 'License :: OSI Approved :: Apache Software License', 'Natural Language :: English', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', ], install_requires=[ 'girder-large-image-annotation>=1.4.2', 'girder-slicer-cli-web[girder]>=1.2.0', 'girder-worker[girder]>=0.6.0', 'celery>=4.4.0rc5', ], license='Apache Software License 2.0', long_description=readme, long_description_content_type='text/x-rst', include_package_data=True, keywords='girder-plugin, histomicsui', packages=find_packages(exclude=['test', 'test.*']), url='https://github.com/DigitalSlideArchive/histomicsui', zip_safe=False, python_requires='>=3.6', entry_points={ 'girder.plugin': [ 'histomicsui = histomicsui:GirderPlugin' ] }, )
to_proto
Converts an on demand feature view object to its protobuf representation. Returns: A OnDemandFeatureViewProto protobuf.
import copy import functools import warnings from types import MethodType from typing import Dict, List, Optional, Type, Union import dill import pandas as pd from feast.base_feature_view import BaseFeatureView from feast.data_source import RequestSource from feast.errors import RegistryInferenceFailure, SpecifiedFeaturesNotPresentError from feast.feature import Feature from feast.feature_view import FeatureView from feast.feature_view_projection import FeatureViewProjection from feast.field import Field, from_value_type from feast.protos.feast.core.OnDemandFeatureView_pb2 import ( OnDemandFeatureView as OnDemandFeatureViewProto, ) from feast.protos.feast.core.OnDemandFeatureView_pb2 import ( OnDemandFeatureViewMeta, OnDemandFeatureViewSpec, OnDemandSource, ) from feast.protos.feast.core.OnDemandFeatureView_pb2 import ( UserDefinedFunction as UserDefinedFunctionProto, ) from feast.type_map import ( feast_value_type_to_pandas_type, python_type_to_feast_value_type, ) from feast.usage import log_exceptions from feast.value_type import ValueType warnings.simplefilter("once", DeprecationWarning) class OnDemandFeatureView(BaseFeatureView): """ [Experimental] An OnDemandFeatureView defines a logical group of features that are generated by applying a transformation on a set of input sources, such as feature views and request data sources. Attributes: name: The unique name of the on demand feature view. features: The list of features in the output of the on demand feature view. source_feature_view_projections: A map from input source names to actual input sources with type FeatureViewProjection. source_request_sources: A map from input source names to the actual input sources with type RequestSource. udf: The user defined transformation function, which must take pandas dataframes as inputs. description: A human-readable description. tags: A dictionary of key-value pairs to store arbitrary metadata. owner: The owner of the on demand feature view, typically the email of the primary maintainer. """ # TODO(adchia): remove inputs from proto and declaration name: str features: List[Field] source_feature_view_projections: Dict[str, FeatureViewProjection] source_request_sources: Dict[str, RequestSource] udf: MethodType description: str tags: Dict[str, str] owner: str @log_exceptions def __init__( self, *args, name: Optional[str] = None, features: Optional[List[Feature]] = None, sources: Optional[ Dict[str, Union[FeatureView, FeatureViewProjection, RequestSource]] ] = None, udf: Optional[MethodType] = None, inputs: Optional[ Dict[str, Union[FeatureView, FeatureViewProjection, RequestSource]] ] = None, schema: Optional[List[Field]] = None, description: str = "", tags: Optional[Dict[str, str]] = None, owner: str = "", ): """ Creates an OnDemandFeatureView object. Args: name: The unique name of the on demand feature view. features (deprecated): The list of features in the output of the on demand feature view, after the transformation has been applied. sources (optional): A map from input source names to the actual input sources, which may be feature views, feature view projections, or request data sources. These sources serve as inputs to the udf, which will refer to them by name. udf (optional): The user defined transformation function, which must take pandas dataframes as inputs. inputs (optional): A map from input source names to the actual input sources, which may be feature views, feature view projections, or request data sources. These sources serve as inputs to the udf, which will refer to them by name. schema (optional): The list of features in the output of the on demand feature view, after the transformation has been applied. description (optional): A human-readable description. tags (optional): A dictionary of key-value pairs to store arbitrary metadata. owner (optional): The owner of the on demand feature view, typically the email of the primary maintainer. """ positional_attributes = ["name", "features", "inputs", "udf"] _name = name _schema = schema or [] if len(_schema) == 0 and features is not None: _schema = [Field.from_feature(feature) for feature in features] if features is not None: warnings.warn( ( "The `features` parameter is being deprecated in favor of the `schema` parameter. " "Please switch from using `features` to `schema`. This will also requiring switching " "feature definitions from using `Feature` to `Field`. Feast 0.21 and onwards will not " "support the `features` parameter." ), DeprecationWarning, ) _sources = sources or inputs if inputs and sources: raise ValueError("At most one of `sources` or `inputs` can be specified.") elif inputs: warnings.warn( ( "The `inputs` parameter is being deprecated. Please use `sources` instead. " "Feast 0.21 and onwards will not support the `inputs` parameter." ), DeprecationWarning, ) _udf = udf if args: warnings.warn( ( "On demand feature view parameters should be specified as keyword arguments " "instead of positional arguments. Feast 0.23 and onwards will not support " "positional arguments in on demand feature view definitions." ), DeprecationWarning, ) if len(args) > len(positional_attributes): raise ValueError( f"Only {', '.join(positional_attributes)} are allowed as positional args " f"when defining feature views, for backwards compatibility." ) if len(args) >= 1: _name = args[0] if len(args) >= 2: _schema = args[1] # Convert Features to Fields. if len(_schema) > 0 and isinstance(_schema[0], Feature): _schema = [Field.from_feature(feature) for feature in _schema] warnings.warn( ( "The `features` parameter is being deprecated in favor of the `schema` parameter. " "Please switch from using `features` to `schema`. This will also requiring switching " "feature definitions from using `Feature` to `Field`. Feast 0.21 and onwards will not " "support the `features` parameter." ), DeprecationWarning, ) if len(args) >= 3: _sources = args[2] warnings.warn( ( "The `inputs` parameter is being deprecated. Please use `sources` instead. " "Feast 0.21 and onwards will not support the `inputs` parameter." ), DeprecationWarning, ) if len(args) >= 4: _udf = args[3] if not _name: raise ValueError( "The name of the on demand feature view must be specified." ) if not _sources: raise ValueError("The `sources` parameter must be specified.") super().__init__( name=_name, features=_schema, description=description, tags=tags, owner=owner, ) assert _sources is not None self.source_feature_view_projections: Dict[str, FeatureViewProjection] = {} self.source_request_sources: Dict[str, RequestSource] = {} for source_name, odfv_source in _sources.items(): if isinstance(odfv_source, RequestSource): self.source_request_sources[source_name] = odfv_source elif isinstance(odfv_source, FeatureViewProjection): self.source_feature_view_projections[source_name] = odfv_source else: self.source_feature_view_projections[ source_name ] = odfv_source.projection if _udf is None: raise ValueError("The `udf` parameter must be specified.") assert _udf self.udf = _udf @property def proto_class(self) -> Type[OnDemandFeatureViewProto]: return OnDemandFeatureViewProto def __copy__(self): fv = OnDemandFeatureView( name=self.name, schema=self.features, sources=dict( **self.source_feature_view_projections, **self.source_request_sources, ), udf=self.udf, description=self.description, tags=self.tags, owner=self.owner, ) fv.projection = copy.copy(self.projection) return fv def __eq__(self, other): if not super().__eq__(other): return False if ( not self.source_feature_view_projections == other.source_feature_view_projections or not self.source_request_sources == other.source_request_sources or not self.udf.__code__.co_code == other.udf.__code__.co_code ): return False return True def __hash__(self): return super().__hash__() # MASKED: to_proto function (lines 253-287) @classmethod def from_proto(cls, on_demand_feature_view_proto: OnDemandFeatureViewProto): """ Creates an on demand feature view from a protobuf representation. Args: on_demand_feature_view_proto: A protobuf representation of an on-demand feature view. Returns: A OnDemandFeatureView object based on the on-demand feature view protobuf. """ sources = {} for ( source_name, on_demand_source, ) in on_demand_feature_view_proto.spec.sources.items(): if on_demand_source.WhichOneof("source") == "feature_view": sources[source_name] = FeatureView.from_proto( on_demand_source.feature_view ).projection elif on_demand_source.WhichOneof("source") == "feature_view_projection": sources[source_name] = FeatureViewProjection.from_proto( on_demand_source.feature_view_projection ) else: sources[source_name] = RequestSource.from_proto( on_demand_source.request_data_source ) on_demand_feature_view_obj = cls( name=on_demand_feature_view_proto.spec.name, schema=[ Field( name=feature.name, dtype=from_value_type(ValueType(feature.value_type)), ) for feature in on_demand_feature_view_proto.spec.features ], sources=sources, udf=dill.loads( on_demand_feature_view_proto.spec.user_defined_function.body ), description=on_demand_feature_view_proto.spec.description, tags=dict(on_demand_feature_view_proto.spec.tags), owner=on_demand_feature_view_proto.spec.owner, ) # FeatureViewProjections are not saved in the OnDemandFeatureView proto. # Create the default projection. on_demand_feature_view_obj.projection = FeatureViewProjection.from_definition( on_demand_feature_view_obj ) if on_demand_feature_view_proto.meta.HasField("created_timestamp"): on_demand_feature_view_obj.created_timestamp = ( on_demand_feature_view_proto.meta.created_timestamp.ToDatetime() ) if on_demand_feature_view_proto.meta.HasField("last_updated_timestamp"): on_demand_feature_view_obj.last_updated_timestamp = ( on_demand_feature_view_proto.meta.last_updated_timestamp.ToDatetime() ) return on_demand_feature_view_obj def get_request_data_schema(self) -> Dict[str, ValueType]: schema: Dict[str, ValueType] = {} for request_source in self.source_request_sources.values(): if isinstance(request_source.schema, List): new_schema = {} for field in request_source.schema: new_schema[field.name] = field.dtype.to_value_type() schema.update(new_schema) elif isinstance(request_source.schema, Dict): schema.update(request_source.schema) else: raise Exception( f"Request source schema is not correct type: ${str(type(request_source.schema))}" ) return schema def get_transformed_features_df( self, df_with_features: pd.DataFrame, full_feature_names: bool = False, ) -> pd.DataFrame: # Apply on demand transformations columns_to_cleanup = [] for source_fv_projection in self.source_feature_view_projections.values(): for feature in source_fv_projection.features: full_feature_ref = f"{source_fv_projection.name}__{feature.name}" if full_feature_ref in df_with_features.keys(): # Make sure the partial feature name is always present df_with_features[feature.name] = df_with_features[full_feature_ref] columns_to_cleanup.append(feature.name) elif feature.name in df_with_features.keys(): # Make sure the full feature name is always present df_with_features[full_feature_ref] = df_with_features[feature.name] columns_to_cleanup.append(full_feature_ref) # Compute transformed values and apply to each result row df_with_transformed_features = self.udf.__call__(df_with_features) # Work out whether the correct columns names are used. rename_columns: Dict[str, str] = {} for feature in self.features: short_name = feature.name long_name = f"{self.projection.name_to_use()}__{feature.name}" if ( short_name in df_with_transformed_features.columns and full_feature_names ): rename_columns[short_name] = long_name elif not full_feature_names: # Long name must be in dataframe. rename_columns[long_name] = short_name # Cleanup extra columns used for transformation df_with_features.drop(columns=columns_to_cleanup, inplace=True) return df_with_transformed_features.rename(columns=rename_columns) def infer_features(self): """ Infers the set of features associated to this feature view from the input source. Raises: RegistryInferenceFailure: The set of features could not be inferred. """ df = pd.DataFrame() for feature_view_projection in self.source_feature_view_projections.values(): for feature in feature_view_projection.features: dtype = feast_value_type_to_pandas_type(feature.dtype.to_value_type()) df[f"{feature_view_projection.name}__{feature.name}"] = pd.Series( dtype=dtype ) df[f"{feature.name}"] = pd.Series(dtype=dtype) for request_data in self.source_request_sources.values(): for field in request_data.schema: dtype = feast_value_type_to_pandas_type(field.dtype.to_value_type()) df[f"{field.name}"] = pd.Series(dtype=dtype) output_df: pd.DataFrame = self.udf.__call__(df) inferred_features = [] for f, dt in zip(output_df.columns, output_df.dtypes): inferred_features.append( Field( name=f, dtype=from_value_type( python_type_to_feast_value_type(f, type_name=str(dt)) ), ) ) if self.features: missing_features = [] for specified_features in self.features: if specified_features not in inferred_features: missing_features.append(specified_features) if missing_features: raise SpecifiedFeaturesNotPresentError( [f.name for f in missing_features], self.name ) else: self.features = inferred_features if not self.features: raise RegistryInferenceFailure( "OnDemandFeatureView", f"Could not infer Features for the feature view '{self.name}'.", ) @staticmethod def get_requested_odfvs(feature_refs, project, registry): all_on_demand_feature_views = registry.list_on_demand_feature_views( project, allow_cache=True ) requested_on_demand_feature_views: List[OnDemandFeatureView] = [] for odfv in all_on_demand_feature_views: for feature in odfv.features: if f"{odfv.name}:{feature.name}" in feature_refs: requested_on_demand_feature_views.append(odfv) break return requested_on_demand_feature_views # TODO(felixwang9817): Force this decorator to accept kwargs and switch from # `features` to `schema`. def on_demand_feature_view( *args, features: Optional[List[Feature]] = None, sources: Optional[Dict[str, Union[FeatureView, RequestSource]]] = None, inputs: Optional[Dict[str, Union[FeatureView, RequestSource]]] = None, schema: Optional[List[Field]] = None, description: str = "", tags: Optional[Dict[str, str]] = None, owner: str = "", ): """ Creates an OnDemandFeatureView object with the given user function as udf. Args: features (deprecated): The list of features in the output of the on demand feature view, after the transformation has been applied. sources (optional): A map from input source names to the actual input sources, which may be feature views, feature view projections, or request data sources. These sources serve as inputs to the udf, which will refer to them by name. inputs (optional): A map from input source names to the actual input sources, which may be feature views, feature view projections, or request data sources. These sources serve as inputs to the udf, which will refer to them by name. schema (optional): The list of features in the output of the on demand feature view, after the transformation has been applied. description (optional): A human-readable description. tags (optional): A dictionary of key-value pairs to store arbitrary metadata. owner (optional): The owner of the on demand feature view, typically the email of the primary maintainer. """ positional_attributes = ["features", "inputs"] _schema = schema or [] if len(_schema) == 0 and features is not None: _schema = [Field.from_feature(feature) for feature in features] if features is not None: warnings.warn( ( "The `features` parameter is being deprecated in favor of the `schema` parameter. " "Please switch from using `features` to `schema`. This will also requiring switching " "feature definitions from using `Feature` to `Field`. Feast 0.21 and onwards will not " "support the `features` parameter." ), DeprecationWarning, ) _sources = sources or inputs if inputs and sources: raise ValueError("At most one of `sources` or `inputs` can be specified.") elif inputs: warnings.warn( ( "The `inputs` parameter is being deprecated. Please use `sources` instead. " "Feast 0.21 and onwards will not support the `inputs` parameter." ), DeprecationWarning, ) if args: warnings.warn( ( "On demand feature view parameters should be specified as keyword arguments " "instead of positional arguments. Feast 0.23 and onwards will not support " "positional arguments in on demand feature view definitions." ), DeprecationWarning, ) if len(args) > len(positional_attributes): raise ValueError( f"Only {', '.join(positional_attributes)} are allowed as positional args " f"when defining feature views, for backwards compatibility." ) if len(args) >= 1: _schema = args[0] # Convert Features to Fields. if len(_schema) > 0 and isinstance(_schema[0], Feature): _schema = [Field.from_feature(feature) for feature in _schema] warnings.warn( ( "The `features` parameter is being deprecated in favor of the `schema` parameter. " "Please switch from using `features` to `schema`. This will also requiring switching " "feature definitions from using `Feature` to `Field`. Feast 0.21 and onwards will not " "support the `features` parameter." ), DeprecationWarning, ) if len(args) >= 2: _sources = args[1] warnings.warn( ( "The `inputs` parameter is being deprecated. Please use `sources` instead. " "Feast 0.21 and onwards will not support the `inputs` parameter." ), DeprecationWarning, ) if not _sources: raise ValueError("The `sources` parameter must be specified.") def decorator(user_function): on_demand_feature_view_obj = OnDemandFeatureView( name=user_function.__name__, sources=_sources, schema=_schema, udf=user_function, description=description, tags=tags, owner=owner, ) functools.update_wrapper( wrapper=on_demand_feature_view_obj, wrapped=user_function ) return on_demand_feature_view_obj return decorator
def to_proto(self) -> OnDemandFeatureViewProto: """ Converts an on demand feature view object to its protobuf representation. Returns: A OnDemandFeatureViewProto protobuf. """ meta = OnDemandFeatureViewMeta() if self.created_timestamp: meta.created_timestamp.FromDatetime(self.created_timestamp) if self.last_updated_timestamp: meta.last_updated_timestamp.FromDatetime(self.last_updated_timestamp) sources = {} for source_name, fv_projection in self.source_feature_view_projections.items(): sources[source_name] = OnDemandSource( feature_view_projection=fv_projection.to_proto() ) for (source_name, request_sources,) in self.source_request_sources.items(): sources[source_name] = OnDemandSource( request_data_source=request_sources.to_proto() ) spec = OnDemandFeatureViewSpec( name=self.name, features=[feature.to_proto() for feature in self.features], sources=sources, user_defined_function=UserDefinedFunctionProto( name=self.udf.__name__, body=dill.dumps(self.udf, recurse=True), ), description=self.description, tags=self.tags, owner=self.owner, ) return OnDemandFeatureViewProto(spec=spec, meta=meta)
253
287
import copy import functools import warnings from types import MethodType from typing import Dict, List, Optional, Type, Union import dill import pandas as pd from feast.base_feature_view import BaseFeatureView from feast.data_source import RequestSource from feast.errors import RegistryInferenceFailure, SpecifiedFeaturesNotPresentError from feast.feature import Feature from feast.feature_view import FeatureView from feast.feature_view_projection import FeatureViewProjection from feast.field import Field, from_value_type from feast.protos.feast.core.OnDemandFeatureView_pb2 import ( OnDemandFeatureView as OnDemandFeatureViewProto, ) from feast.protos.feast.core.OnDemandFeatureView_pb2 import ( OnDemandFeatureViewMeta, OnDemandFeatureViewSpec, OnDemandSource, ) from feast.protos.feast.core.OnDemandFeatureView_pb2 import ( UserDefinedFunction as UserDefinedFunctionProto, ) from feast.type_map import ( feast_value_type_to_pandas_type, python_type_to_feast_value_type, ) from feast.usage import log_exceptions from feast.value_type import ValueType warnings.simplefilter("once", DeprecationWarning) class OnDemandFeatureView(BaseFeatureView): """ [Experimental] An OnDemandFeatureView defines a logical group of features that are generated by applying a transformation on a set of input sources, such as feature views and request data sources. Attributes: name: The unique name of the on demand feature view. features: The list of features in the output of the on demand feature view. source_feature_view_projections: A map from input source names to actual input sources with type FeatureViewProjection. source_request_sources: A map from input source names to the actual input sources with type RequestSource. udf: The user defined transformation function, which must take pandas dataframes as inputs. description: A human-readable description. tags: A dictionary of key-value pairs to store arbitrary metadata. owner: The owner of the on demand feature view, typically the email of the primary maintainer. """ # TODO(adchia): remove inputs from proto and declaration name: str features: List[Field] source_feature_view_projections: Dict[str, FeatureViewProjection] source_request_sources: Dict[str, RequestSource] udf: MethodType description: str tags: Dict[str, str] owner: str @log_exceptions def __init__( self, *args, name: Optional[str] = None, features: Optional[List[Feature]] = None, sources: Optional[ Dict[str, Union[FeatureView, FeatureViewProjection, RequestSource]] ] = None, udf: Optional[MethodType] = None, inputs: Optional[ Dict[str, Union[FeatureView, FeatureViewProjection, RequestSource]] ] = None, schema: Optional[List[Field]] = None, description: str = "", tags: Optional[Dict[str, str]] = None, owner: str = "", ): """ Creates an OnDemandFeatureView object. Args: name: The unique name of the on demand feature view. features (deprecated): The list of features in the output of the on demand feature view, after the transformation has been applied. sources (optional): A map from input source names to the actual input sources, which may be feature views, feature view projections, or request data sources. These sources serve as inputs to the udf, which will refer to them by name. udf (optional): The user defined transformation function, which must take pandas dataframes as inputs. inputs (optional): A map from input source names to the actual input sources, which may be feature views, feature view projections, or request data sources. These sources serve as inputs to the udf, which will refer to them by name. schema (optional): The list of features in the output of the on demand feature view, after the transformation has been applied. description (optional): A human-readable description. tags (optional): A dictionary of key-value pairs to store arbitrary metadata. owner (optional): The owner of the on demand feature view, typically the email of the primary maintainer. """ positional_attributes = ["name", "features", "inputs", "udf"] _name = name _schema = schema or [] if len(_schema) == 0 and features is not None: _schema = [Field.from_feature(feature) for feature in features] if features is not None: warnings.warn( ( "The `features` parameter is being deprecated in favor of the `schema` parameter. " "Please switch from using `features` to `schema`. This will also requiring switching " "feature definitions from using `Feature` to `Field`. Feast 0.21 and onwards will not " "support the `features` parameter." ), DeprecationWarning, ) _sources = sources or inputs if inputs and sources: raise ValueError("At most one of `sources` or `inputs` can be specified.") elif inputs: warnings.warn( ( "The `inputs` parameter is being deprecated. Please use `sources` instead. " "Feast 0.21 and onwards will not support the `inputs` parameter." ), DeprecationWarning, ) _udf = udf if args: warnings.warn( ( "On demand feature view parameters should be specified as keyword arguments " "instead of positional arguments. Feast 0.23 and onwards will not support " "positional arguments in on demand feature view definitions." ), DeprecationWarning, ) if len(args) > len(positional_attributes): raise ValueError( f"Only {', '.join(positional_attributes)} are allowed as positional args " f"when defining feature views, for backwards compatibility." ) if len(args) >= 1: _name = args[0] if len(args) >= 2: _schema = args[1] # Convert Features to Fields. if len(_schema) > 0 and isinstance(_schema[0], Feature): _schema = [Field.from_feature(feature) for feature in _schema] warnings.warn( ( "The `features` parameter is being deprecated in favor of the `schema` parameter. " "Please switch from using `features` to `schema`. This will also requiring switching " "feature definitions from using `Feature` to `Field`. Feast 0.21 and onwards will not " "support the `features` parameter." ), DeprecationWarning, ) if len(args) >= 3: _sources = args[2] warnings.warn( ( "The `inputs` parameter is being deprecated. Please use `sources` instead. " "Feast 0.21 and onwards will not support the `inputs` parameter." ), DeprecationWarning, ) if len(args) >= 4: _udf = args[3] if not _name: raise ValueError( "The name of the on demand feature view must be specified." ) if not _sources: raise ValueError("The `sources` parameter must be specified.") super().__init__( name=_name, features=_schema, description=description, tags=tags, owner=owner, ) assert _sources is not None self.source_feature_view_projections: Dict[str, FeatureViewProjection] = {} self.source_request_sources: Dict[str, RequestSource] = {} for source_name, odfv_source in _sources.items(): if isinstance(odfv_source, RequestSource): self.source_request_sources[source_name] = odfv_source elif isinstance(odfv_source, FeatureViewProjection): self.source_feature_view_projections[source_name] = odfv_source else: self.source_feature_view_projections[ source_name ] = odfv_source.projection if _udf is None: raise ValueError("The `udf` parameter must be specified.") assert _udf self.udf = _udf @property def proto_class(self) -> Type[OnDemandFeatureViewProto]: return OnDemandFeatureViewProto def __copy__(self): fv = OnDemandFeatureView( name=self.name, schema=self.features, sources=dict( **self.source_feature_view_projections, **self.source_request_sources, ), udf=self.udf, description=self.description, tags=self.tags, owner=self.owner, ) fv.projection = copy.copy(self.projection) return fv def __eq__(self, other): if not super().__eq__(other): return False if ( not self.source_feature_view_projections == other.source_feature_view_projections or not self.source_request_sources == other.source_request_sources or not self.udf.__code__.co_code == other.udf.__code__.co_code ): return False return True def __hash__(self): return super().__hash__() def to_proto(self) -> OnDemandFeatureViewProto: """ Converts an on demand feature view object to its protobuf representation. Returns: A OnDemandFeatureViewProto protobuf. """ meta = OnDemandFeatureViewMeta() if self.created_timestamp: meta.created_timestamp.FromDatetime(self.created_timestamp) if self.last_updated_timestamp: meta.last_updated_timestamp.FromDatetime(self.last_updated_timestamp) sources = {} for source_name, fv_projection in self.source_feature_view_projections.items(): sources[source_name] = OnDemandSource( feature_view_projection=fv_projection.to_proto() ) for (source_name, request_sources,) in self.source_request_sources.items(): sources[source_name] = OnDemandSource( request_data_source=request_sources.to_proto() ) spec = OnDemandFeatureViewSpec( name=self.name, features=[feature.to_proto() for feature in self.features], sources=sources, user_defined_function=UserDefinedFunctionProto( name=self.udf.__name__, body=dill.dumps(self.udf, recurse=True), ), description=self.description, tags=self.tags, owner=self.owner, ) return OnDemandFeatureViewProto(spec=spec, meta=meta) @classmethod def from_proto(cls, on_demand_feature_view_proto: OnDemandFeatureViewProto): """ Creates an on demand feature view from a protobuf representation. Args: on_demand_feature_view_proto: A protobuf representation of an on-demand feature view. Returns: A OnDemandFeatureView object based on the on-demand feature view protobuf. """ sources = {} for ( source_name, on_demand_source, ) in on_demand_feature_view_proto.spec.sources.items(): if on_demand_source.WhichOneof("source") == "feature_view": sources[source_name] = FeatureView.from_proto( on_demand_source.feature_view ).projection elif on_demand_source.WhichOneof("source") == "feature_view_projection": sources[source_name] = FeatureViewProjection.from_proto( on_demand_source.feature_view_projection ) else: sources[source_name] = RequestSource.from_proto( on_demand_source.request_data_source ) on_demand_feature_view_obj = cls( name=on_demand_feature_view_proto.spec.name, schema=[ Field( name=feature.name, dtype=from_value_type(ValueType(feature.value_type)), ) for feature in on_demand_feature_view_proto.spec.features ], sources=sources, udf=dill.loads( on_demand_feature_view_proto.spec.user_defined_function.body ), description=on_demand_feature_view_proto.spec.description, tags=dict(on_demand_feature_view_proto.spec.tags), owner=on_demand_feature_view_proto.spec.owner, ) # FeatureViewProjections are not saved in the OnDemandFeatureView proto. # Create the default projection. on_demand_feature_view_obj.projection = FeatureViewProjection.from_definition( on_demand_feature_view_obj ) if on_demand_feature_view_proto.meta.HasField("created_timestamp"): on_demand_feature_view_obj.created_timestamp = ( on_demand_feature_view_proto.meta.created_timestamp.ToDatetime() ) if on_demand_feature_view_proto.meta.HasField("last_updated_timestamp"): on_demand_feature_view_obj.last_updated_timestamp = ( on_demand_feature_view_proto.meta.last_updated_timestamp.ToDatetime() ) return on_demand_feature_view_obj def get_request_data_schema(self) -> Dict[str, ValueType]: schema: Dict[str, ValueType] = {} for request_source in self.source_request_sources.values(): if isinstance(request_source.schema, List): new_schema = {} for field in request_source.schema: new_schema[field.name] = field.dtype.to_value_type() schema.update(new_schema) elif isinstance(request_source.schema, Dict): schema.update(request_source.schema) else: raise Exception( f"Request source schema is not correct type: ${str(type(request_source.schema))}" ) return schema def get_transformed_features_df( self, df_with_features: pd.DataFrame, full_feature_names: bool = False, ) -> pd.DataFrame: # Apply on demand transformations columns_to_cleanup = [] for source_fv_projection in self.source_feature_view_projections.values(): for feature in source_fv_projection.features: full_feature_ref = f"{source_fv_projection.name}__{feature.name}" if full_feature_ref in df_with_features.keys(): # Make sure the partial feature name is always present df_with_features[feature.name] = df_with_features[full_feature_ref] columns_to_cleanup.append(feature.name) elif feature.name in df_with_features.keys(): # Make sure the full feature name is always present df_with_features[full_feature_ref] = df_with_features[feature.name] columns_to_cleanup.append(full_feature_ref) # Compute transformed values and apply to each result row df_with_transformed_features = self.udf.__call__(df_with_features) # Work out whether the correct columns names are used. rename_columns: Dict[str, str] = {} for feature in self.features: short_name = feature.name long_name = f"{self.projection.name_to_use()}__{feature.name}" if ( short_name in df_with_transformed_features.columns and full_feature_names ): rename_columns[short_name] = long_name elif not full_feature_names: # Long name must be in dataframe. rename_columns[long_name] = short_name # Cleanup extra columns used for transformation df_with_features.drop(columns=columns_to_cleanup, inplace=True) return df_with_transformed_features.rename(columns=rename_columns) def infer_features(self): """ Infers the set of features associated to this feature view from the input source. Raises: RegistryInferenceFailure: The set of features could not be inferred. """ df = pd.DataFrame() for feature_view_projection in self.source_feature_view_projections.values(): for feature in feature_view_projection.features: dtype = feast_value_type_to_pandas_type(feature.dtype.to_value_type()) df[f"{feature_view_projection.name}__{feature.name}"] = pd.Series( dtype=dtype ) df[f"{feature.name}"] = pd.Series(dtype=dtype) for request_data in self.source_request_sources.values(): for field in request_data.schema: dtype = feast_value_type_to_pandas_type(field.dtype.to_value_type()) df[f"{field.name}"] = pd.Series(dtype=dtype) output_df: pd.DataFrame = self.udf.__call__(df) inferred_features = [] for f, dt in zip(output_df.columns, output_df.dtypes): inferred_features.append( Field( name=f, dtype=from_value_type( python_type_to_feast_value_type(f, type_name=str(dt)) ), ) ) if self.features: missing_features = [] for specified_features in self.features: if specified_features not in inferred_features: missing_features.append(specified_features) if missing_features: raise SpecifiedFeaturesNotPresentError( [f.name for f in missing_features], self.name ) else: self.features = inferred_features if not self.features: raise RegistryInferenceFailure( "OnDemandFeatureView", f"Could not infer Features for the feature view '{self.name}'.", ) @staticmethod def get_requested_odfvs(feature_refs, project, registry): all_on_demand_feature_views = registry.list_on_demand_feature_views( project, allow_cache=True ) requested_on_demand_feature_views: List[OnDemandFeatureView] = [] for odfv in all_on_demand_feature_views: for feature in odfv.features: if f"{odfv.name}:{feature.name}" in feature_refs: requested_on_demand_feature_views.append(odfv) break return requested_on_demand_feature_views # TODO(felixwang9817): Force this decorator to accept kwargs and switch from # `features` to `schema`. def on_demand_feature_view( *args, features: Optional[List[Feature]] = None, sources: Optional[Dict[str, Union[FeatureView, RequestSource]]] = None, inputs: Optional[Dict[str, Union[FeatureView, RequestSource]]] = None, schema: Optional[List[Field]] = None, description: str = "", tags: Optional[Dict[str, str]] = None, owner: str = "", ): """ Creates an OnDemandFeatureView object with the given user function as udf. Args: features (deprecated): The list of features in the output of the on demand feature view, after the transformation has been applied. sources (optional): A map from input source names to the actual input sources, which may be feature views, feature view projections, or request data sources. These sources serve as inputs to the udf, which will refer to them by name. inputs (optional): A map from input source names to the actual input sources, which may be feature views, feature view projections, or request data sources. These sources serve as inputs to the udf, which will refer to them by name. schema (optional): The list of features in the output of the on demand feature view, after the transformation has been applied. description (optional): A human-readable description. tags (optional): A dictionary of key-value pairs to store arbitrary metadata. owner (optional): The owner of the on demand feature view, typically the email of the primary maintainer. """ positional_attributes = ["features", "inputs"] _schema = schema or [] if len(_schema) == 0 and features is not None: _schema = [Field.from_feature(feature) for feature in features] if features is not None: warnings.warn( ( "The `features` parameter is being deprecated in favor of the `schema` parameter. " "Please switch from using `features` to `schema`. This will also requiring switching " "feature definitions from using `Feature` to `Field`. Feast 0.21 and onwards will not " "support the `features` parameter." ), DeprecationWarning, ) _sources = sources or inputs if inputs and sources: raise ValueError("At most one of `sources` or `inputs` can be specified.") elif inputs: warnings.warn( ( "The `inputs` parameter is being deprecated. Please use `sources` instead. " "Feast 0.21 and onwards will not support the `inputs` parameter." ), DeprecationWarning, ) if args: warnings.warn( ( "On demand feature view parameters should be specified as keyword arguments " "instead of positional arguments. Feast 0.23 and onwards will not support " "positional arguments in on demand feature view definitions." ), DeprecationWarning, ) if len(args) > len(positional_attributes): raise ValueError( f"Only {', '.join(positional_attributes)} are allowed as positional args " f"when defining feature views, for backwards compatibility." ) if len(args) >= 1: _schema = args[0] # Convert Features to Fields. if len(_schema) > 0 and isinstance(_schema[0], Feature): _schema = [Field.from_feature(feature) for feature in _schema] warnings.warn( ( "The `features` parameter is being deprecated in favor of the `schema` parameter. " "Please switch from using `features` to `schema`. This will also requiring switching " "feature definitions from using `Feature` to `Field`. Feast 0.21 and onwards will not " "support the `features` parameter." ), DeprecationWarning, ) if len(args) >= 2: _sources = args[1] warnings.warn( ( "The `inputs` parameter is being deprecated. Please use `sources` instead. " "Feast 0.21 and onwards will not support the `inputs` parameter." ), DeprecationWarning, ) if not _sources: raise ValueError("The `sources` parameter must be specified.") def decorator(user_function): on_demand_feature_view_obj = OnDemandFeatureView( name=user_function.__name__, sources=_sources, schema=_schema, udf=user_function, description=description, tags=tags, owner=owner, ) functools.update_wrapper( wrapper=on_demand_feature_view_obj, wrapped=user_function ) return on_demand_feature_view_obj return decorator
custom_name_func
A custom test name function that will ensure that the tests are run such that they're batched with all tests for a given data set are run together, avoiding re-reading the data more than necessary. Tests are run in alphabetical order, so put the test case first. An alternate option is to right justify the test number (param_num) with zeroes so that the numerical and alphabetical orders are the same. Not providing this method when there are more than 10 tests results in tests running in an order similar to: test_*.test_scenario_0_* test_*.test_scenario_10_* test_*.test_scenario_11_* ... test_*.test_scenario_19_* test_*.test_scenario_1_* test_*.test_scenario_20_*
import ast import csv import logging import math import os from nose_parameterized import parameterized import numpy import SimpleITK as sitk import six from radiomics import getTestCase, imageoperations # Get the logger. This is done outside the class, as it is needed by both the class and the custom_name_func logger = logging.getLogger('radiomics.testing') TEST_CASES = ('brain1', 'brain2', 'breast1', 'lung1', 'lung2') # MASKED: custom_name_func function (lines 22-51) class RadiomicsTestUtils: """ This utility class reads in and stores the baseline files stored in 'data\baseline' (one per feature class) It provides utility methods to get the baseline feature value for a feature class and compare it to the result generated by the test. """ def __init__(self): self._logger = logging.getLogger('radiomics.testing.utils') self._logger.debug('RadiomicsTestUtils') # the image and mask volumes self._image = None self._mask = None self._current_image = None self._current_mask = None self._bb = None self._imageType = None # set up file paths self._dataDir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "data") self._baselineDir = os.path.join(self._dataDir, 'baseline') self._tests = set() self._test = None # Test, specifies an image and mask and some configuration (settings) self._testCase = None # Test image and mask to use in configured test self._testedSet = set() self._baseline = {} self.readBaselineFiles() self._current_config = {} self._featureClassName = None self._results = {} self._diffs = {} for test in self.getTests(): self._results[test] = {} self._diffs[test] = {} def readBaselineFiles(self): """ Reads the 'baseline' folder contained in dataDir. All files starting with 'baseline_' are read as baseline files. These files should therefore be named as follows: 'baseline_<className>.csv'. """ baselineFiles = [fileName for fileName in os.listdir(self._baselineDir) if os.path.isfile(os.path.join(self._baselineDir, fileName)) and fileName.startswith('baseline_')] assert len(baselineFiles) > 0 for baselineFile in baselineFiles: newBaseline = PyRadiomicsBaseline.readBaselineFile(os.path.join(self._baselineDir, baselineFile)) cls = newBaseline.cls self._logger.debug('Read baseline for class %s', cls) self._baseline[cls] = newBaseline self._tests |= newBaseline.tests def getTests(self): """ Return all the tests for which there are baseline information. """ return self._tests def getFeatureNames(self, className, test): """ Gets all features for which a baseline value is available for the current class and test case. Returns a list containing the feature names (without image type and feature class specifiers, i.e. just the feature name). """ if className not in self._baseline: return None # No baseline available for specified class return self._baseline[className].getTestFeatures(test) def setFeatureClassAndTestCase(self, className, test): """ Set testing suite to specified testCase and feature class. Throws an assertion error if either class or test case are not recognized. These have to be set here together, as the settings with which the test case has to be loaded are defined per feature class in the baseline (extracted from provenance information). Only (re)loads an image/mask if the test case has changed, or the change of feature class causes a change in test settings. If feature class and test case are unchanged, nothing is reloaded and function returns False. If either feature class or test case is changed, function returns True. """ global TEST_CASES if self._featureClassName == className and self._test == test: return False self._test = test self._testedSet.add(self._test) # First set featureClass if necessary, because if settings have changed, testCase needs te be reloaded if self._featureClassName != className: self._logger.debug('Setting feature class name to %s', className) assert className in self._baseline.keys() # Check if a baseline has been read for this class self._featureClassName = className # Check if test settings have changed if self._current_config != self._baseline[className].getTestConfig(test): self._current_config = self._baseline[className].getTestConfig(test) self._testCase = None # forces image to be reloaded (as settings have changed) # Next, set testCase if necessary if self._testCase != self._current_config['TestCase']: self._testCase = self._current_config['TestCase'] self._logger.info("Reading the image and mask for test case %s", self._testCase) assert self._current_config['TestCase'] in TEST_CASES imageName, maskName = getTestCase(self._testCase) assert imageName is not None assert maskName is not None self._image = sitk.ReadImage(imageName) self._mask = sitk.ReadImage(maskName) if 'ImageHash' in self._current_config: assert sitk.Hash(self._image) == self._current_config['ImageHash'] if 'MaskHash' in self._current_config: assert sitk.Hash(self._mask) == self._current_config['MaskHash'] settings = self._current_config.get('Settings', {}) interpolator = settings.get('interpolator', sitk.sitkBSpline) resampledPixelSpacing = settings.get('resampledPixelSpacing', None) if interpolator is not None and resampledPixelSpacing is not None: self._image, self._mask = imageoperations.resampleImage(self._image, self._mask, resampledPixelSpacing, interpolator, settings.get('label', 1), settings.get('padDistance', 5)) self._bb, correctedMask = imageoperations.checkMask(self._image, self._mask, **settings) if correctedMask is not None: self._mask = correctedMask self._imageType = None return True def getImage(self, imageType): if self._imageType != imageType: self._applyFilter(imageType) return self._current_image def getMask(self, imageType): if self._imageType != imageType: self._applyFilter(imageType) return self._current_mask def _applyFilter(self, imageType): if imageType == 'original': self._current_image, self._current_mask = imageoperations.cropToTumorMask(self._image, self._mask, self._bb) else: raise NotImplementedError() self._imageType = imageType def getSettings(self): return self._current_config.get('Settings', {}) def checkResult(self, featureName, value): """ Use utility methods to get and test the results against the expected baseline value for this key. """ longName = '_'.join(featureName) if value is None: self._diffs[self._test][longName] = None self._results[self._test][longName] = None assert (value is not None) if math.isnan(value): self._diffs[self._test][longName] = numpy.nan self._results[self._test][longName] = numpy.nan assert (not math.isnan(value)) # save the result using the baseline class and feature names self._logger.debug('checkResults: featureName = %s', featureName) self._results[self._test][longName] = value baselineValue = self._baseline[self._featureClassName].getBaselineValue(self._test, longName) assert baselineValue is not None baselineValue = float(baselineValue) self._logger.debug('checkResults: for featureName %s, got baseline value = %f', featureName, baselineValue) if baselineValue == 0.0: # avoid divide by zero, the difference is either 0% if the value is also zero, or 100% if value - baselineValue == 0.0: percentDiff = 0.0 else: percentDiff = 1.0 else: percentDiff = abs(1.0 - (value / baselineValue)) # save the difference self._diffs[self._test][longName] = percentDiff # check for a less than three percent difference if (percentDiff >= 0.03): self._logger.error('checkResult %s, baseline value = %f, calculated = %f, diff = %f%%', featureName, float(baselineValue), value, percentDiff * 100) assert (percentDiff < 0.03) def getResults(self): return self._results def getDiffs(self): return self._diffs def getDataDir(self): return self._dataDir def writeCSV(self, data, fileName): """ Write out data in a csv file. Assumes a data structure with: {'id1' : {'f1':n1, 'f2':n2}, 'id2' : {'f1':n3, 'f2':n4}} """ # Get the headers from the first testCase in _testedSet # If no tests were run, the length of _testedSet will be 0, and no files should be written if len(self._testedSet) > 0: with open(fileName, 'w') as csvFile: csvFileWriter = csv.writer(csvFile, lineterminator='\n') testedCases = sorted(self._testedSet) header = sorted(data[testedCases[0]].keys()) header = ['testCase'] + header csvFileWriter.writerow(header) for testCase in testedCases: thisCase = data[testCase] thisCase['testCase'] = testCase row = [] for h in header: row = row + [thisCase.get(h, "N/A")] csvFileWriter.writerow(row) self._logger.info('Wrote to file %s', fileName) else: self._logger.info('No test cases run, aborting file write to %s', fileName) class PyRadiomicsBaseline: def __init__(self, featureClassName): self.logger = logging.getLogger('radiomics.testing.baseline') self.cls = featureClassName self.configuration = {} self.baseline = {} self.tests = set() @classmethod def readBaselineFile(cls, baselineFile): featureClassName = os.path.basename(baselineFile)[9:-4] new_baseline = cls(featureClassName) new_baseline.logger.debug('Reading baseline for class %s', new_baseline.cls) with open(baselineFile, 'r' if six.PY3 else 'rb') as baselineReader: csvReader = csv.reader(baselineReader) tests = six.next(csvReader)[1:] for case in tests: new_baseline.configuration[case] = {} new_baseline.baseline[case] = {} for testRow in csvReader: for case_idx, case in enumerate(tests, start=1): if 'general_info' in testRow[0]: new_baseline.configuration[case][testRow[0]] = testRow[case_idx] else: new_baseline.baseline[case][testRow[0]] = testRow[case_idx] new_baseline.tests = set(tests) return new_baseline def getTestConfig(self, test): if test not in self.configuration: return {} # This test is not present in the baseline for this class config = { 'TestCase': self.configuration[test].get('general_info_TestCase', None), 'Settings': ast.literal_eval(self.configuration[test].get('general_info_GeneralSettings', '{}')), } if 'general_info_ImageHash' in self.configuration[test]: config['ImageHash'] = self.configuration[test]['general_info_ImageHash'] if 'general_info_MaskHash' in self.configuration[test]: config['MaskHash'] = self.configuration[test]['general_info_MaskHash'] if config['TestCase'] is None: self.logger.error('Missing key "general_info_TestCase". Cannot configure!') return None return config def getTestFeatures(self, test): """ Gets all features for which a baseline value is available for the current class and test case. Returns a list containing the feature names. """ if test not in self.baseline: return None # This test is not present in the baseline for this class return list(self.baseline[test].keys()) def getBaselineValue(self, test, featureName): if test not in self.baseline: return None return self.baseline[test].get(featureName, None) def writeBaselineFile(self, baselineDir): baselineFile = os.path.join(baselineDir, 'baseline_%s.csv' % self.cls) testCases = list(self.baseline.keys()) with open(baselineFile, 'wb') as baseline: csvWriter = csv.writer(baseline) header = ['featureName'] + testCases csvWriter.writerow(header) config = self.configuration[testCases[0]].keys() for c in config: row = [c] for testCase in testCases: row.append(str(self.configuration[testCase].get(c, ''))) csvWriter.writerow(row) features = self.baseline[testCases[0]].keys() for f in features: row = [f] for testCase in testCases: row.append(str(self.baseline[testCase].get(f, ''))) csvWriter.writerow(row)
def custom_name_func(testcase_func, param_num, param): """ A custom test name function that will ensure that the tests are run such that they're batched with all tests for a given data set are run together, avoiding re-reading the data more than necessary. Tests are run in alphabetical order, so put the test case first. An alternate option is to right justify the test number (param_num) with zeroes so that the numerical and alphabetical orders are the same. Not providing this method when there are more than 10 tests results in tests running in an order similar to: test_*.test_scenario_0_* test_*.test_scenario_10_* test_*.test_scenario_11_* ... test_*.test_scenario_19_* test_*.test_scenario_1_* test_*.test_scenario_20_* """ global logger logger.debug('custom_name_func: function name = %s, param_num = {0:0>3}, param.args = %s'.format(param_num), testcase_func.__name__, param.args) return str("%s_%s" % ( testcase_func.__name__, parameterized.to_safe_name("_".join(str(x) for x in param.args)), ))
22
51
import ast import csv import logging import math import os from nose_parameterized import parameterized import numpy import SimpleITK as sitk import six from radiomics import getTestCase, imageoperations # Get the logger. This is done outside the class, as it is needed by both the class and the custom_name_func logger = logging.getLogger('radiomics.testing') TEST_CASES = ('brain1', 'brain2', 'breast1', 'lung1', 'lung2') def custom_name_func(testcase_func, param_num, param): """ A custom test name function that will ensure that the tests are run such that they're batched with all tests for a given data set are run together, avoiding re-reading the data more than necessary. Tests are run in alphabetical order, so put the test case first. An alternate option is to right justify the test number (param_num) with zeroes so that the numerical and alphabetical orders are the same. Not providing this method when there are more than 10 tests results in tests running in an order similar to: test_*.test_scenario_0_* test_*.test_scenario_10_* test_*.test_scenario_11_* ... test_*.test_scenario_19_* test_*.test_scenario_1_* test_*.test_scenario_20_* """ global logger logger.debug('custom_name_func: function name = %s, param_num = {0:0>3}, param.args = %s'.format(param_num), testcase_func.__name__, param.args) return str("%s_%s" % ( testcase_func.__name__, parameterized.to_safe_name("_".join(str(x) for x in param.args)), )) class RadiomicsTestUtils: """ This utility class reads in and stores the baseline files stored in 'data\baseline' (one per feature class) It provides utility methods to get the baseline feature value for a feature class and compare it to the result generated by the test. """ def __init__(self): self._logger = logging.getLogger('radiomics.testing.utils') self._logger.debug('RadiomicsTestUtils') # the image and mask volumes self._image = None self._mask = None self._current_image = None self._current_mask = None self._bb = None self._imageType = None # set up file paths self._dataDir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "data") self._baselineDir = os.path.join(self._dataDir, 'baseline') self._tests = set() self._test = None # Test, specifies an image and mask and some configuration (settings) self._testCase = None # Test image and mask to use in configured test self._testedSet = set() self._baseline = {} self.readBaselineFiles() self._current_config = {} self._featureClassName = None self._results = {} self._diffs = {} for test in self.getTests(): self._results[test] = {} self._diffs[test] = {} def readBaselineFiles(self): """ Reads the 'baseline' folder contained in dataDir. All files starting with 'baseline_' are read as baseline files. These files should therefore be named as follows: 'baseline_<className>.csv'. """ baselineFiles = [fileName for fileName in os.listdir(self._baselineDir) if os.path.isfile(os.path.join(self._baselineDir, fileName)) and fileName.startswith('baseline_')] assert len(baselineFiles) > 0 for baselineFile in baselineFiles: newBaseline = PyRadiomicsBaseline.readBaselineFile(os.path.join(self._baselineDir, baselineFile)) cls = newBaseline.cls self._logger.debug('Read baseline for class %s', cls) self._baseline[cls] = newBaseline self._tests |= newBaseline.tests def getTests(self): """ Return all the tests for which there are baseline information. """ return self._tests def getFeatureNames(self, className, test): """ Gets all features for which a baseline value is available for the current class and test case. Returns a list containing the feature names (without image type and feature class specifiers, i.e. just the feature name). """ if className not in self._baseline: return None # No baseline available for specified class return self._baseline[className].getTestFeatures(test) def setFeatureClassAndTestCase(self, className, test): """ Set testing suite to specified testCase and feature class. Throws an assertion error if either class or test case are not recognized. These have to be set here together, as the settings with which the test case has to be loaded are defined per feature class in the baseline (extracted from provenance information). Only (re)loads an image/mask if the test case has changed, or the change of feature class causes a change in test settings. If feature class and test case are unchanged, nothing is reloaded and function returns False. If either feature class or test case is changed, function returns True. """ global TEST_CASES if self._featureClassName == className and self._test == test: return False self._test = test self._testedSet.add(self._test) # First set featureClass if necessary, because if settings have changed, testCase needs te be reloaded if self._featureClassName != className: self._logger.debug('Setting feature class name to %s', className) assert className in self._baseline.keys() # Check if a baseline has been read for this class self._featureClassName = className # Check if test settings have changed if self._current_config != self._baseline[className].getTestConfig(test): self._current_config = self._baseline[className].getTestConfig(test) self._testCase = None # forces image to be reloaded (as settings have changed) # Next, set testCase if necessary if self._testCase != self._current_config['TestCase']: self._testCase = self._current_config['TestCase'] self._logger.info("Reading the image and mask for test case %s", self._testCase) assert self._current_config['TestCase'] in TEST_CASES imageName, maskName = getTestCase(self._testCase) assert imageName is not None assert maskName is not None self._image = sitk.ReadImage(imageName) self._mask = sitk.ReadImage(maskName) if 'ImageHash' in self._current_config: assert sitk.Hash(self._image) == self._current_config['ImageHash'] if 'MaskHash' in self._current_config: assert sitk.Hash(self._mask) == self._current_config['MaskHash'] settings = self._current_config.get('Settings', {}) interpolator = settings.get('interpolator', sitk.sitkBSpline) resampledPixelSpacing = settings.get('resampledPixelSpacing', None) if interpolator is not None and resampledPixelSpacing is not None: self._image, self._mask = imageoperations.resampleImage(self._image, self._mask, resampledPixelSpacing, interpolator, settings.get('label', 1), settings.get('padDistance', 5)) self._bb, correctedMask = imageoperations.checkMask(self._image, self._mask, **settings) if correctedMask is not None: self._mask = correctedMask self._imageType = None return True def getImage(self, imageType): if self._imageType != imageType: self._applyFilter(imageType) return self._current_image def getMask(self, imageType): if self._imageType != imageType: self._applyFilter(imageType) return self._current_mask def _applyFilter(self, imageType): if imageType == 'original': self._current_image, self._current_mask = imageoperations.cropToTumorMask(self._image, self._mask, self._bb) else: raise NotImplementedError() self._imageType = imageType def getSettings(self): return self._current_config.get('Settings', {}) def checkResult(self, featureName, value): """ Use utility methods to get and test the results against the expected baseline value for this key. """ longName = '_'.join(featureName) if value is None: self._diffs[self._test][longName] = None self._results[self._test][longName] = None assert (value is not None) if math.isnan(value): self._diffs[self._test][longName] = numpy.nan self._results[self._test][longName] = numpy.nan assert (not math.isnan(value)) # save the result using the baseline class and feature names self._logger.debug('checkResults: featureName = %s', featureName) self._results[self._test][longName] = value baselineValue = self._baseline[self._featureClassName].getBaselineValue(self._test, longName) assert baselineValue is not None baselineValue = float(baselineValue) self._logger.debug('checkResults: for featureName %s, got baseline value = %f', featureName, baselineValue) if baselineValue == 0.0: # avoid divide by zero, the difference is either 0% if the value is also zero, or 100% if value - baselineValue == 0.0: percentDiff = 0.0 else: percentDiff = 1.0 else: percentDiff = abs(1.0 - (value / baselineValue)) # save the difference self._diffs[self._test][longName] = percentDiff # check for a less than three percent difference if (percentDiff >= 0.03): self._logger.error('checkResult %s, baseline value = %f, calculated = %f, diff = %f%%', featureName, float(baselineValue), value, percentDiff * 100) assert (percentDiff < 0.03) def getResults(self): return self._results def getDiffs(self): return self._diffs def getDataDir(self): return self._dataDir def writeCSV(self, data, fileName): """ Write out data in a csv file. Assumes a data structure with: {'id1' : {'f1':n1, 'f2':n2}, 'id2' : {'f1':n3, 'f2':n4}} """ # Get the headers from the first testCase in _testedSet # If no tests were run, the length of _testedSet will be 0, and no files should be written if len(self._testedSet) > 0: with open(fileName, 'w') as csvFile: csvFileWriter = csv.writer(csvFile, lineterminator='\n') testedCases = sorted(self._testedSet) header = sorted(data[testedCases[0]].keys()) header = ['testCase'] + header csvFileWriter.writerow(header) for testCase in testedCases: thisCase = data[testCase] thisCase['testCase'] = testCase row = [] for h in header: row = row + [thisCase.get(h, "N/A")] csvFileWriter.writerow(row) self._logger.info('Wrote to file %s', fileName) else: self._logger.info('No test cases run, aborting file write to %s', fileName) class PyRadiomicsBaseline: def __init__(self, featureClassName): self.logger = logging.getLogger('radiomics.testing.baseline') self.cls = featureClassName self.configuration = {} self.baseline = {} self.tests = set() @classmethod def readBaselineFile(cls, baselineFile): featureClassName = os.path.basename(baselineFile)[9:-4] new_baseline = cls(featureClassName) new_baseline.logger.debug('Reading baseline for class %s', new_baseline.cls) with open(baselineFile, 'r' if six.PY3 else 'rb') as baselineReader: csvReader = csv.reader(baselineReader) tests = six.next(csvReader)[1:] for case in tests: new_baseline.configuration[case] = {} new_baseline.baseline[case] = {} for testRow in csvReader: for case_idx, case in enumerate(tests, start=1): if 'general_info' in testRow[0]: new_baseline.configuration[case][testRow[0]] = testRow[case_idx] else: new_baseline.baseline[case][testRow[0]] = testRow[case_idx] new_baseline.tests = set(tests) return new_baseline def getTestConfig(self, test): if test not in self.configuration: return {} # This test is not present in the baseline for this class config = { 'TestCase': self.configuration[test].get('general_info_TestCase', None), 'Settings': ast.literal_eval(self.configuration[test].get('general_info_GeneralSettings', '{}')), } if 'general_info_ImageHash' in self.configuration[test]: config['ImageHash'] = self.configuration[test]['general_info_ImageHash'] if 'general_info_MaskHash' in self.configuration[test]: config['MaskHash'] = self.configuration[test]['general_info_MaskHash'] if config['TestCase'] is None: self.logger.error('Missing key "general_info_TestCase". Cannot configure!') return None return config def getTestFeatures(self, test): """ Gets all features for which a baseline value is available for the current class and test case. Returns a list containing the feature names. """ if test not in self.baseline: return None # This test is not present in the baseline for this class return list(self.baseline[test].keys()) def getBaselineValue(self, test, featureName): if test not in self.baseline: return None return self.baseline[test].get(featureName, None) def writeBaselineFile(self, baselineDir): baselineFile = os.path.join(baselineDir, 'baseline_%s.csv' % self.cls) testCases = list(self.baseline.keys()) with open(baselineFile, 'wb') as baseline: csvWriter = csv.writer(baseline) header = ['featureName'] + testCases csvWriter.writerow(header) config = self.configuration[testCases[0]].keys() for c in config: row = [c] for testCase in testCases: row.append(str(self.configuration[testCase].get(c, ''))) csvWriter.writerow(row) features = self.baseline[testCases[0]].keys() for f in features: row = [f] for testCase in testCases: row.append(str(self.baseline[testCase].get(f, ''))) csvWriter.writerow(row)
readBaselineFiles
Reads the 'baseline' folder contained in dataDir. All files starting with 'baseline_' are read as baseline files. These files should therefore be named as follows: 'baseline_<className>.csv'.
import ast import csv import logging import math import os from nose_parameterized import parameterized import numpy import SimpleITK as sitk import six from radiomics import getTestCase, imageoperations # Get the logger. This is done outside the class, as it is needed by both the class and the custom_name_func logger = logging.getLogger('radiomics.testing') TEST_CASES = ('brain1', 'brain2', 'breast1', 'lung1', 'lung2') def custom_name_func(testcase_func, param_num, param): """ A custom test name function that will ensure that the tests are run such that they're batched with all tests for a given data set are run together, avoiding re-reading the data more than necessary. Tests are run in alphabetical order, so put the test case first. An alternate option is to right justify the test number (param_num) with zeroes so that the numerical and alphabetical orders are the same. Not providing this method when there are more than 10 tests results in tests running in an order similar to: test_*.test_scenario_0_* test_*.test_scenario_10_* test_*.test_scenario_11_* ... test_*.test_scenario_19_* test_*.test_scenario_1_* test_*.test_scenario_20_* """ global logger logger.debug('custom_name_func: function name = %s, param_num = {0:0>3}, param.args = %s'.format(param_num), testcase_func.__name__, param.args) return str("%s_%s" % ( testcase_func.__name__, parameterized.to_safe_name("_".join(str(x) for x in param.args)), )) class RadiomicsTestUtils: """ This utility class reads in and stores the baseline files stored in 'data\baseline' (one per feature class) It provides utility methods to get the baseline feature value for a feature class and compare it to the result generated by the test. """ def __init__(self): self._logger = logging.getLogger('radiomics.testing.utils') self._logger.debug('RadiomicsTestUtils') # the image and mask volumes self._image = None self._mask = None self._current_image = None self._current_mask = None self._bb = None self._imageType = None # set up file paths self._dataDir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "data") self._baselineDir = os.path.join(self._dataDir, 'baseline') self._tests = set() self._test = None # Test, specifies an image and mask and some configuration (settings) self._testCase = None # Test image and mask to use in configured test self._testedSet = set() self._baseline = {} self.readBaselineFiles() self._current_config = {} self._featureClassName = None self._results = {} self._diffs = {} for test in self.getTests(): self._results[test] = {} self._diffs[test] = {} # MASKED: readBaselineFiles function (lines 95-109) def getTests(self): """ Return all the tests for which there are baseline information. """ return self._tests def getFeatureNames(self, className, test): """ Gets all features for which a baseline value is available for the current class and test case. Returns a list containing the feature names (without image type and feature class specifiers, i.e. just the feature name). """ if className not in self._baseline: return None # No baseline available for specified class return self._baseline[className].getTestFeatures(test) def setFeatureClassAndTestCase(self, className, test): """ Set testing suite to specified testCase and feature class. Throws an assertion error if either class or test case are not recognized. These have to be set here together, as the settings with which the test case has to be loaded are defined per feature class in the baseline (extracted from provenance information). Only (re)loads an image/mask if the test case has changed, or the change of feature class causes a change in test settings. If feature class and test case are unchanged, nothing is reloaded and function returns False. If either feature class or test case is changed, function returns True. """ global TEST_CASES if self._featureClassName == className and self._test == test: return False self._test = test self._testedSet.add(self._test) # First set featureClass if necessary, because if settings have changed, testCase needs te be reloaded if self._featureClassName != className: self._logger.debug('Setting feature class name to %s', className) assert className in self._baseline.keys() # Check if a baseline has been read for this class self._featureClassName = className # Check if test settings have changed if self._current_config != self._baseline[className].getTestConfig(test): self._current_config = self._baseline[className].getTestConfig(test) self._testCase = None # forces image to be reloaded (as settings have changed) # Next, set testCase if necessary if self._testCase != self._current_config['TestCase']: self._testCase = self._current_config['TestCase'] self._logger.info("Reading the image and mask for test case %s", self._testCase) assert self._current_config['TestCase'] in TEST_CASES imageName, maskName = getTestCase(self._testCase) assert imageName is not None assert maskName is not None self._image = sitk.ReadImage(imageName) self._mask = sitk.ReadImage(maskName) if 'ImageHash' in self._current_config: assert sitk.Hash(self._image) == self._current_config['ImageHash'] if 'MaskHash' in self._current_config: assert sitk.Hash(self._mask) == self._current_config['MaskHash'] settings = self._current_config.get('Settings', {}) interpolator = settings.get('interpolator', sitk.sitkBSpline) resampledPixelSpacing = settings.get('resampledPixelSpacing', None) if interpolator is not None and resampledPixelSpacing is not None: self._image, self._mask = imageoperations.resampleImage(self._image, self._mask, resampledPixelSpacing, interpolator, settings.get('label', 1), settings.get('padDistance', 5)) self._bb, correctedMask = imageoperations.checkMask(self._image, self._mask, **settings) if correctedMask is not None: self._mask = correctedMask self._imageType = None return True def getImage(self, imageType): if self._imageType != imageType: self._applyFilter(imageType) return self._current_image def getMask(self, imageType): if self._imageType != imageType: self._applyFilter(imageType) return self._current_mask def _applyFilter(self, imageType): if imageType == 'original': self._current_image, self._current_mask = imageoperations.cropToTumorMask(self._image, self._mask, self._bb) else: raise NotImplementedError() self._imageType = imageType def getSettings(self): return self._current_config.get('Settings', {}) def checkResult(self, featureName, value): """ Use utility methods to get and test the results against the expected baseline value for this key. """ longName = '_'.join(featureName) if value is None: self._diffs[self._test][longName] = None self._results[self._test][longName] = None assert (value is not None) if math.isnan(value): self._diffs[self._test][longName] = numpy.nan self._results[self._test][longName] = numpy.nan assert (not math.isnan(value)) # save the result using the baseline class and feature names self._logger.debug('checkResults: featureName = %s', featureName) self._results[self._test][longName] = value baselineValue = self._baseline[self._featureClassName].getBaselineValue(self._test, longName) assert baselineValue is not None baselineValue = float(baselineValue) self._logger.debug('checkResults: for featureName %s, got baseline value = %f', featureName, baselineValue) if baselineValue == 0.0: # avoid divide by zero, the difference is either 0% if the value is also zero, or 100% if value - baselineValue == 0.0: percentDiff = 0.0 else: percentDiff = 1.0 else: percentDiff = abs(1.0 - (value / baselineValue)) # save the difference self._diffs[self._test][longName] = percentDiff # check for a less than three percent difference if (percentDiff >= 0.03): self._logger.error('checkResult %s, baseline value = %f, calculated = %f, diff = %f%%', featureName, float(baselineValue), value, percentDiff * 100) assert (percentDiff < 0.03) def getResults(self): return self._results def getDiffs(self): return self._diffs def getDataDir(self): return self._dataDir def writeCSV(self, data, fileName): """ Write out data in a csv file. Assumes a data structure with: {'id1' : {'f1':n1, 'f2':n2}, 'id2' : {'f1':n3, 'f2':n4}} """ # Get the headers from the first testCase in _testedSet # If no tests were run, the length of _testedSet will be 0, and no files should be written if len(self._testedSet) > 0: with open(fileName, 'w') as csvFile: csvFileWriter = csv.writer(csvFile, lineterminator='\n') testedCases = sorted(self._testedSet) header = sorted(data[testedCases[0]].keys()) header = ['testCase'] + header csvFileWriter.writerow(header) for testCase in testedCases: thisCase = data[testCase] thisCase['testCase'] = testCase row = [] for h in header: row = row + [thisCase.get(h, "N/A")] csvFileWriter.writerow(row) self._logger.info('Wrote to file %s', fileName) else: self._logger.info('No test cases run, aborting file write to %s', fileName) class PyRadiomicsBaseline: def __init__(self, featureClassName): self.logger = logging.getLogger('radiomics.testing.baseline') self.cls = featureClassName self.configuration = {} self.baseline = {} self.tests = set() @classmethod def readBaselineFile(cls, baselineFile): featureClassName = os.path.basename(baselineFile)[9:-4] new_baseline = cls(featureClassName) new_baseline.logger.debug('Reading baseline for class %s', new_baseline.cls) with open(baselineFile, 'r' if six.PY3 else 'rb') as baselineReader: csvReader = csv.reader(baselineReader) tests = six.next(csvReader)[1:] for case in tests: new_baseline.configuration[case] = {} new_baseline.baseline[case] = {} for testRow in csvReader: for case_idx, case in enumerate(tests, start=1): if 'general_info' in testRow[0]: new_baseline.configuration[case][testRow[0]] = testRow[case_idx] else: new_baseline.baseline[case][testRow[0]] = testRow[case_idx] new_baseline.tests = set(tests) return new_baseline def getTestConfig(self, test): if test not in self.configuration: return {} # This test is not present in the baseline for this class config = { 'TestCase': self.configuration[test].get('general_info_TestCase', None), 'Settings': ast.literal_eval(self.configuration[test].get('general_info_GeneralSettings', '{}')), } if 'general_info_ImageHash' in self.configuration[test]: config['ImageHash'] = self.configuration[test]['general_info_ImageHash'] if 'general_info_MaskHash' in self.configuration[test]: config['MaskHash'] = self.configuration[test]['general_info_MaskHash'] if config['TestCase'] is None: self.logger.error('Missing key "general_info_TestCase". Cannot configure!') return None return config def getTestFeatures(self, test): """ Gets all features for which a baseline value is available for the current class and test case. Returns a list containing the feature names. """ if test not in self.baseline: return None # This test is not present in the baseline for this class return list(self.baseline[test].keys()) def getBaselineValue(self, test, featureName): if test not in self.baseline: return None return self.baseline[test].get(featureName, None) def writeBaselineFile(self, baselineDir): baselineFile = os.path.join(baselineDir, 'baseline_%s.csv' % self.cls) testCases = list(self.baseline.keys()) with open(baselineFile, 'wb') as baseline: csvWriter = csv.writer(baseline) header = ['featureName'] + testCases csvWriter.writerow(header) config = self.configuration[testCases[0]].keys() for c in config: row = [c] for testCase in testCases: row.append(str(self.configuration[testCase].get(c, ''))) csvWriter.writerow(row) features = self.baseline[testCases[0]].keys() for f in features: row = [f] for testCase in testCases: row.append(str(self.baseline[testCase].get(f, ''))) csvWriter.writerow(row)
def readBaselineFiles(self): """ Reads the 'baseline' folder contained in dataDir. All files starting with 'baseline_' are read as baseline files. These files should therefore be named as follows: 'baseline_<className>.csv'. """ baselineFiles = [fileName for fileName in os.listdir(self._baselineDir) if os.path.isfile(os.path.join(self._baselineDir, fileName)) and fileName.startswith('baseline_')] assert len(baselineFiles) > 0 for baselineFile in baselineFiles: newBaseline = PyRadiomicsBaseline.readBaselineFile(os.path.join(self._baselineDir, baselineFile)) cls = newBaseline.cls self._logger.debug('Read baseline for class %s', cls) self._baseline[cls] = newBaseline self._tests |= newBaseline.tests
95
109
import ast import csv import logging import math import os from nose_parameterized import parameterized import numpy import SimpleITK as sitk import six from radiomics import getTestCase, imageoperations # Get the logger. This is done outside the class, as it is needed by both the class and the custom_name_func logger = logging.getLogger('radiomics.testing') TEST_CASES = ('brain1', 'brain2', 'breast1', 'lung1', 'lung2') def custom_name_func(testcase_func, param_num, param): """ A custom test name function that will ensure that the tests are run such that they're batched with all tests for a given data set are run together, avoiding re-reading the data more than necessary. Tests are run in alphabetical order, so put the test case first. An alternate option is to right justify the test number (param_num) with zeroes so that the numerical and alphabetical orders are the same. Not providing this method when there are more than 10 tests results in tests running in an order similar to: test_*.test_scenario_0_* test_*.test_scenario_10_* test_*.test_scenario_11_* ... test_*.test_scenario_19_* test_*.test_scenario_1_* test_*.test_scenario_20_* """ global logger logger.debug('custom_name_func: function name = %s, param_num = {0:0>3}, param.args = %s'.format(param_num), testcase_func.__name__, param.args) return str("%s_%s" % ( testcase_func.__name__, parameterized.to_safe_name("_".join(str(x) for x in param.args)), )) class RadiomicsTestUtils: """ This utility class reads in and stores the baseline files stored in 'data\baseline' (one per feature class) It provides utility methods to get the baseline feature value for a feature class and compare it to the result generated by the test. """ def __init__(self): self._logger = logging.getLogger('radiomics.testing.utils') self._logger.debug('RadiomicsTestUtils') # the image and mask volumes self._image = None self._mask = None self._current_image = None self._current_mask = None self._bb = None self._imageType = None # set up file paths self._dataDir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "data") self._baselineDir = os.path.join(self._dataDir, 'baseline') self._tests = set() self._test = None # Test, specifies an image and mask and some configuration (settings) self._testCase = None # Test image and mask to use in configured test self._testedSet = set() self._baseline = {} self.readBaselineFiles() self._current_config = {} self._featureClassName = None self._results = {} self._diffs = {} for test in self.getTests(): self._results[test] = {} self._diffs[test] = {} def readBaselineFiles(self): """ Reads the 'baseline' folder contained in dataDir. All files starting with 'baseline_' are read as baseline files. These files should therefore be named as follows: 'baseline_<className>.csv'. """ baselineFiles = [fileName for fileName in os.listdir(self._baselineDir) if os.path.isfile(os.path.join(self._baselineDir, fileName)) and fileName.startswith('baseline_')] assert len(baselineFiles) > 0 for baselineFile in baselineFiles: newBaseline = PyRadiomicsBaseline.readBaselineFile(os.path.join(self._baselineDir, baselineFile)) cls = newBaseline.cls self._logger.debug('Read baseline for class %s', cls) self._baseline[cls] = newBaseline self._tests |= newBaseline.tests def getTests(self): """ Return all the tests for which there are baseline information. """ return self._tests def getFeatureNames(self, className, test): """ Gets all features for which a baseline value is available for the current class and test case. Returns a list containing the feature names (without image type and feature class specifiers, i.e. just the feature name). """ if className not in self._baseline: return None # No baseline available for specified class return self._baseline[className].getTestFeatures(test) def setFeatureClassAndTestCase(self, className, test): """ Set testing suite to specified testCase and feature class. Throws an assertion error if either class or test case are not recognized. These have to be set here together, as the settings with which the test case has to be loaded are defined per feature class in the baseline (extracted from provenance information). Only (re)loads an image/mask if the test case has changed, or the change of feature class causes a change in test settings. If feature class and test case are unchanged, nothing is reloaded and function returns False. If either feature class or test case is changed, function returns True. """ global TEST_CASES if self._featureClassName == className and self._test == test: return False self._test = test self._testedSet.add(self._test) # First set featureClass if necessary, because if settings have changed, testCase needs te be reloaded if self._featureClassName != className: self._logger.debug('Setting feature class name to %s', className) assert className in self._baseline.keys() # Check if a baseline has been read for this class self._featureClassName = className # Check if test settings have changed if self._current_config != self._baseline[className].getTestConfig(test): self._current_config = self._baseline[className].getTestConfig(test) self._testCase = None # forces image to be reloaded (as settings have changed) # Next, set testCase if necessary if self._testCase != self._current_config['TestCase']: self._testCase = self._current_config['TestCase'] self._logger.info("Reading the image and mask for test case %s", self._testCase) assert self._current_config['TestCase'] in TEST_CASES imageName, maskName = getTestCase(self._testCase) assert imageName is not None assert maskName is not None self._image = sitk.ReadImage(imageName) self._mask = sitk.ReadImage(maskName) if 'ImageHash' in self._current_config: assert sitk.Hash(self._image) == self._current_config['ImageHash'] if 'MaskHash' in self._current_config: assert sitk.Hash(self._mask) == self._current_config['MaskHash'] settings = self._current_config.get('Settings', {}) interpolator = settings.get('interpolator', sitk.sitkBSpline) resampledPixelSpacing = settings.get('resampledPixelSpacing', None) if interpolator is not None and resampledPixelSpacing is not None: self._image, self._mask = imageoperations.resampleImage(self._image, self._mask, resampledPixelSpacing, interpolator, settings.get('label', 1), settings.get('padDistance', 5)) self._bb, correctedMask = imageoperations.checkMask(self._image, self._mask, **settings) if correctedMask is not None: self._mask = correctedMask self._imageType = None return True def getImage(self, imageType): if self._imageType != imageType: self._applyFilter(imageType) return self._current_image def getMask(self, imageType): if self._imageType != imageType: self._applyFilter(imageType) return self._current_mask def _applyFilter(self, imageType): if imageType == 'original': self._current_image, self._current_mask = imageoperations.cropToTumorMask(self._image, self._mask, self._bb) else: raise NotImplementedError() self._imageType = imageType def getSettings(self): return self._current_config.get('Settings', {}) def checkResult(self, featureName, value): """ Use utility methods to get and test the results against the expected baseline value for this key. """ longName = '_'.join(featureName) if value is None: self._diffs[self._test][longName] = None self._results[self._test][longName] = None assert (value is not None) if math.isnan(value): self._diffs[self._test][longName] = numpy.nan self._results[self._test][longName] = numpy.nan assert (not math.isnan(value)) # save the result using the baseline class and feature names self._logger.debug('checkResults: featureName = %s', featureName) self._results[self._test][longName] = value baselineValue = self._baseline[self._featureClassName].getBaselineValue(self._test, longName) assert baselineValue is not None baselineValue = float(baselineValue) self._logger.debug('checkResults: for featureName %s, got baseline value = %f', featureName, baselineValue) if baselineValue == 0.0: # avoid divide by zero, the difference is either 0% if the value is also zero, or 100% if value - baselineValue == 0.0: percentDiff = 0.0 else: percentDiff = 1.0 else: percentDiff = abs(1.0 - (value / baselineValue)) # save the difference self._diffs[self._test][longName] = percentDiff # check for a less than three percent difference if (percentDiff >= 0.03): self._logger.error('checkResult %s, baseline value = %f, calculated = %f, diff = %f%%', featureName, float(baselineValue), value, percentDiff * 100) assert (percentDiff < 0.03) def getResults(self): return self._results def getDiffs(self): return self._diffs def getDataDir(self): return self._dataDir def writeCSV(self, data, fileName): """ Write out data in a csv file. Assumes a data structure with: {'id1' : {'f1':n1, 'f2':n2}, 'id2' : {'f1':n3, 'f2':n4}} """ # Get the headers from the first testCase in _testedSet # If no tests were run, the length of _testedSet will be 0, and no files should be written if len(self._testedSet) > 0: with open(fileName, 'w') as csvFile: csvFileWriter = csv.writer(csvFile, lineterminator='\n') testedCases = sorted(self._testedSet) header = sorted(data[testedCases[0]].keys()) header = ['testCase'] + header csvFileWriter.writerow(header) for testCase in testedCases: thisCase = data[testCase] thisCase['testCase'] = testCase row = [] for h in header: row = row + [thisCase.get(h, "N/A")] csvFileWriter.writerow(row) self._logger.info('Wrote to file %s', fileName) else: self._logger.info('No test cases run, aborting file write to %s', fileName) class PyRadiomicsBaseline: def __init__(self, featureClassName): self.logger = logging.getLogger('radiomics.testing.baseline') self.cls = featureClassName self.configuration = {} self.baseline = {} self.tests = set() @classmethod def readBaselineFile(cls, baselineFile): featureClassName = os.path.basename(baselineFile)[9:-4] new_baseline = cls(featureClassName) new_baseline.logger.debug('Reading baseline for class %s', new_baseline.cls) with open(baselineFile, 'r' if six.PY3 else 'rb') as baselineReader: csvReader = csv.reader(baselineReader) tests = six.next(csvReader)[1:] for case in tests: new_baseline.configuration[case] = {} new_baseline.baseline[case] = {} for testRow in csvReader: for case_idx, case in enumerate(tests, start=1): if 'general_info' in testRow[0]: new_baseline.configuration[case][testRow[0]] = testRow[case_idx] else: new_baseline.baseline[case][testRow[0]] = testRow[case_idx] new_baseline.tests = set(tests) return new_baseline def getTestConfig(self, test): if test not in self.configuration: return {} # This test is not present in the baseline for this class config = { 'TestCase': self.configuration[test].get('general_info_TestCase', None), 'Settings': ast.literal_eval(self.configuration[test].get('general_info_GeneralSettings', '{}')), } if 'general_info_ImageHash' in self.configuration[test]: config['ImageHash'] = self.configuration[test]['general_info_ImageHash'] if 'general_info_MaskHash' in self.configuration[test]: config['MaskHash'] = self.configuration[test]['general_info_MaskHash'] if config['TestCase'] is None: self.logger.error('Missing key "general_info_TestCase". Cannot configure!') return None return config def getTestFeatures(self, test): """ Gets all features for which a baseline value is available for the current class and test case. Returns a list containing the feature names. """ if test not in self.baseline: return None # This test is not present in the baseline for this class return list(self.baseline[test].keys()) def getBaselineValue(self, test, featureName): if test not in self.baseline: return None return self.baseline[test].get(featureName, None) def writeBaselineFile(self, baselineDir): baselineFile = os.path.join(baselineDir, 'baseline_%s.csv' % self.cls) testCases = list(self.baseline.keys()) with open(baselineFile, 'wb') as baseline: csvWriter = csv.writer(baseline) header = ['featureName'] + testCases csvWriter.writerow(header) config = self.configuration[testCases[0]].keys() for c in config: row = [c] for testCase in testCases: row.append(str(self.configuration[testCase].get(c, ''))) csvWriter.writerow(row) features = self.baseline[testCases[0]].keys() for f in features: row = [f] for testCase in testCases: row.append(str(self.baseline[testCase].get(f, ''))) csvWriter.writerow(row)
getFeatureNames
Gets all features for which a baseline value is available for the current class and test case. Returns a list containing the feature names (without image type and feature class specifiers, i.e. just the feature name).
import ast import csv import logging import math import os from nose_parameterized import parameterized import numpy import SimpleITK as sitk import six from radiomics import getTestCase, imageoperations # Get the logger. This is done outside the class, as it is needed by both the class and the custom_name_func logger = logging.getLogger('radiomics.testing') TEST_CASES = ('brain1', 'brain2', 'breast1', 'lung1', 'lung2') def custom_name_func(testcase_func, param_num, param): """ A custom test name function that will ensure that the tests are run such that they're batched with all tests for a given data set are run together, avoiding re-reading the data more than necessary. Tests are run in alphabetical order, so put the test case first. An alternate option is to right justify the test number (param_num) with zeroes so that the numerical and alphabetical orders are the same. Not providing this method when there are more than 10 tests results in tests running in an order similar to: test_*.test_scenario_0_* test_*.test_scenario_10_* test_*.test_scenario_11_* ... test_*.test_scenario_19_* test_*.test_scenario_1_* test_*.test_scenario_20_* """ global logger logger.debug('custom_name_func: function name = %s, param_num = {0:0>3}, param.args = %s'.format(param_num), testcase_func.__name__, param.args) return str("%s_%s" % ( testcase_func.__name__, parameterized.to_safe_name("_".join(str(x) for x in param.args)), )) class RadiomicsTestUtils: """ This utility class reads in and stores the baseline files stored in 'data\baseline' (one per feature class) It provides utility methods to get the baseline feature value for a feature class and compare it to the result generated by the test. """ def __init__(self): self._logger = logging.getLogger('radiomics.testing.utils') self._logger.debug('RadiomicsTestUtils') # the image and mask volumes self._image = None self._mask = None self._current_image = None self._current_mask = None self._bb = None self._imageType = None # set up file paths self._dataDir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "data") self._baselineDir = os.path.join(self._dataDir, 'baseline') self._tests = set() self._test = None # Test, specifies an image and mask and some configuration (settings) self._testCase = None # Test image and mask to use in configured test self._testedSet = set() self._baseline = {} self.readBaselineFiles() self._current_config = {} self._featureClassName = None self._results = {} self._diffs = {} for test in self.getTests(): self._results[test] = {} self._diffs[test] = {} def readBaselineFiles(self): """ Reads the 'baseline' folder contained in dataDir. All files starting with 'baseline_' are read as baseline files. These files should therefore be named as follows: 'baseline_<className>.csv'. """ baselineFiles = [fileName for fileName in os.listdir(self._baselineDir) if os.path.isfile(os.path.join(self._baselineDir, fileName)) and fileName.startswith('baseline_')] assert len(baselineFiles) > 0 for baselineFile in baselineFiles: newBaseline = PyRadiomicsBaseline.readBaselineFile(os.path.join(self._baselineDir, baselineFile)) cls = newBaseline.cls self._logger.debug('Read baseline for class %s', cls) self._baseline[cls] = newBaseline self._tests |= newBaseline.tests def getTests(self): """ Return all the tests for which there are baseline information. """ return self._tests # MASKED: getFeatureNames function (lines 117-124) def setFeatureClassAndTestCase(self, className, test): """ Set testing suite to specified testCase and feature class. Throws an assertion error if either class or test case are not recognized. These have to be set here together, as the settings with which the test case has to be loaded are defined per feature class in the baseline (extracted from provenance information). Only (re)loads an image/mask if the test case has changed, or the change of feature class causes a change in test settings. If feature class and test case are unchanged, nothing is reloaded and function returns False. If either feature class or test case is changed, function returns True. """ global TEST_CASES if self._featureClassName == className and self._test == test: return False self._test = test self._testedSet.add(self._test) # First set featureClass if necessary, because if settings have changed, testCase needs te be reloaded if self._featureClassName != className: self._logger.debug('Setting feature class name to %s', className) assert className in self._baseline.keys() # Check if a baseline has been read for this class self._featureClassName = className # Check if test settings have changed if self._current_config != self._baseline[className].getTestConfig(test): self._current_config = self._baseline[className].getTestConfig(test) self._testCase = None # forces image to be reloaded (as settings have changed) # Next, set testCase if necessary if self._testCase != self._current_config['TestCase']: self._testCase = self._current_config['TestCase'] self._logger.info("Reading the image and mask for test case %s", self._testCase) assert self._current_config['TestCase'] in TEST_CASES imageName, maskName = getTestCase(self._testCase) assert imageName is not None assert maskName is not None self._image = sitk.ReadImage(imageName) self._mask = sitk.ReadImage(maskName) if 'ImageHash' in self._current_config: assert sitk.Hash(self._image) == self._current_config['ImageHash'] if 'MaskHash' in self._current_config: assert sitk.Hash(self._mask) == self._current_config['MaskHash'] settings = self._current_config.get('Settings', {}) interpolator = settings.get('interpolator', sitk.sitkBSpline) resampledPixelSpacing = settings.get('resampledPixelSpacing', None) if interpolator is not None and resampledPixelSpacing is not None: self._image, self._mask = imageoperations.resampleImage(self._image, self._mask, resampledPixelSpacing, interpolator, settings.get('label', 1), settings.get('padDistance', 5)) self._bb, correctedMask = imageoperations.checkMask(self._image, self._mask, **settings) if correctedMask is not None: self._mask = correctedMask self._imageType = None return True def getImage(self, imageType): if self._imageType != imageType: self._applyFilter(imageType) return self._current_image def getMask(self, imageType): if self._imageType != imageType: self._applyFilter(imageType) return self._current_mask def _applyFilter(self, imageType): if imageType == 'original': self._current_image, self._current_mask = imageoperations.cropToTumorMask(self._image, self._mask, self._bb) else: raise NotImplementedError() self._imageType = imageType def getSettings(self): return self._current_config.get('Settings', {}) def checkResult(self, featureName, value): """ Use utility methods to get and test the results against the expected baseline value for this key. """ longName = '_'.join(featureName) if value is None: self._diffs[self._test][longName] = None self._results[self._test][longName] = None assert (value is not None) if math.isnan(value): self._diffs[self._test][longName] = numpy.nan self._results[self._test][longName] = numpy.nan assert (not math.isnan(value)) # save the result using the baseline class and feature names self._logger.debug('checkResults: featureName = %s', featureName) self._results[self._test][longName] = value baselineValue = self._baseline[self._featureClassName].getBaselineValue(self._test, longName) assert baselineValue is not None baselineValue = float(baselineValue) self._logger.debug('checkResults: for featureName %s, got baseline value = %f', featureName, baselineValue) if baselineValue == 0.0: # avoid divide by zero, the difference is either 0% if the value is also zero, or 100% if value - baselineValue == 0.0: percentDiff = 0.0 else: percentDiff = 1.0 else: percentDiff = abs(1.0 - (value / baselineValue)) # save the difference self._diffs[self._test][longName] = percentDiff # check for a less than three percent difference if (percentDiff >= 0.03): self._logger.error('checkResult %s, baseline value = %f, calculated = %f, diff = %f%%', featureName, float(baselineValue), value, percentDiff * 100) assert (percentDiff < 0.03) def getResults(self): return self._results def getDiffs(self): return self._diffs def getDataDir(self): return self._dataDir def writeCSV(self, data, fileName): """ Write out data in a csv file. Assumes a data structure with: {'id1' : {'f1':n1, 'f2':n2}, 'id2' : {'f1':n3, 'f2':n4}} """ # Get the headers from the first testCase in _testedSet # If no tests were run, the length of _testedSet will be 0, and no files should be written if len(self._testedSet) > 0: with open(fileName, 'w') as csvFile: csvFileWriter = csv.writer(csvFile, lineterminator='\n') testedCases = sorted(self._testedSet) header = sorted(data[testedCases[0]].keys()) header = ['testCase'] + header csvFileWriter.writerow(header) for testCase in testedCases: thisCase = data[testCase] thisCase['testCase'] = testCase row = [] for h in header: row = row + [thisCase.get(h, "N/A")] csvFileWriter.writerow(row) self._logger.info('Wrote to file %s', fileName) else: self._logger.info('No test cases run, aborting file write to %s', fileName) class PyRadiomicsBaseline: def __init__(self, featureClassName): self.logger = logging.getLogger('radiomics.testing.baseline') self.cls = featureClassName self.configuration = {} self.baseline = {} self.tests = set() @classmethod def readBaselineFile(cls, baselineFile): featureClassName = os.path.basename(baselineFile)[9:-4] new_baseline = cls(featureClassName) new_baseline.logger.debug('Reading baseline for class %s', new_baseline.cls) with open(baselineFile, 'r' if six.PY3 else 'rb') as baselineReader: csvReader = csv.reader(baselineReader) tests = six.next(csvReader)[1:] for case in tests: new_baseline.configuration[case] = {} new_baseline.baseline[case] = {} for testRow in csvReader: for case_idx, case in enumerate(tests, start=1): if 'general_info' in testRow[0]: new_baseline.configuration[case][testRow[0]] = testRow[case_idx] else: new_baseline.baseline[case][testRow[0]] = testRow[case_idx] new_baseline.tests = set(tests) return new_baseline def getTestConfig(self, test): if test not in self.configuration: return {} # This test is not present in the baseline for this class config = { 'TestCase': self.configuration[test].get('general_info_TestCase', None), 'Settings': ast.literal_eval(self.configuration[test].get('general_info_GeneralSettings', '{}')), } if 'general_info_ImageHash' in self.configuration[test]: config['ImageHash'] = self.configuration[test]['general_info_ImageHash'] if 'general_info_MaskHash' in self.configuration[test]: config['MaskHash'] = self.configuration[test]['general_info_MaskHash'] if config['TestCase'] is None: self.logger.error('Missing key "general_info_TestCase". Cannot configure!') return None return config def getTestFeatures(self, test): """ Gets all features for which a baseline value is available for the current class and test case. Returns a list containing the feature names. """ if test not in self.baseline: return None # This test is not present in the baseline for this class return list(self.baseline[test].keys()) def getBaselineValue(self, test, featureName): if test not in self.baseline: return None return self.baseline[test].get(featureName, None) def writeBaselineFile(self, baselineDir): baselineFile = os.path.join(baselineDir, 'baseline_%s.csv' % self.cls) testCases = list(self.baseline.keys()) with open(baselineFile, 'wb') as baseline: csvWriter = csv.writer(baseline) header = ['featureName'] + testCases csvWriter.writerow(header) config = self.configuration[testCases[0]].keys() for c in config: row = [c] for testCase in testCases: row.append(str(self.configuration[testCase].get(c, ''))) csvWriter.writerow(row) features = self.baseline[testCases[0]].keys() for f in features: row = [f] for testCase in testCases: row.append(str(self.baseline[testCase].get(f, ''))) csvWriter.writerow(row)
def getFeatureNames(self, className, test): """ Gets all features for which a baseline value is available for the current class and test case. Returns a list containing the feature names (without image type and feature class specifiers, i.e. just the feature name). """ if className not in self._baseline: return None # No baseline available for specified class return self._baseline[className].getTestFeatures(test)
117
124
import ast import csv import logging import math import os from nose_parameterized import parameterized import numpy import SimpleITK as sitk import six from radiomics import getTestCase, imageoperations # Get the logger. This is done outside the class, as it is needed by both the class and the custom_name_func logger = logging.getLogger('radiomics.testing') TEST_CASES = ('brain1', 'brain2', 'breast1', 'lung1', 'lung2') def custom_name_func(testcase_func, param_num, param): """ A custom test name function that will ensure that the tests are run such that they're batched with all tests for a given data set are run together, avoiding re-reading the data more than necessary. Tests are run in alphabetical order, so put the test case first. An alternate option is to right justify the test number (param_num) with zeroes so that the numerical and alphabetical orders are the same. Not providing this method when there are more than 10 tests results in tests running in an order similar to: test_*.test_scenario_0_* test_*.test_scenario_10_* test_*.test_scenario_11_* ... test_*.test_scenario_19_* test_*.test_scenario_1_* test_*.test_scenario_20_* """ global logger logger.debug('custom_name_func: function name = %s, param_num = {0:0>3}, param.args = %s'.format(param_num), testcase_func.__name__, param.args) return str("%s_%s" % ( testcase_func.__name__, parameterized.to_safe_name("_".join(str(x) for x in param.args)), )) class RadiomicsTestUtils: """ This utility class reads in and stores the baseline files stored in 'data\baseline' (one per feature class) It provides utility methods to get the baseline feature value for a feature class and compare it to the result generated by the test. """ def __init__(self): self._logger = logging.getLogger('radiomics.testing.utils') self._logger.debug('RadiomicsTestUtils') # the image and mask volumes self._image = None self._mask = None self._current_image = None self._current_mask = None self._bb = None self._imageType = None # set up file paths self._dataDir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "data") self._baselineDir = os.path.join(self._dataDir, 'baseline') self._tests = set() self._test = None # Test, specifies an image and mask and some configuration (settings) self._testCase = None # Test image and mask to use in configured test self._testedSet = set() self._baseline = {} self.readBaselineFiles() self._current_config = {} self._featureClassName = None self._results = {} self._diffs = {} for test in self.getTests(): self._results[test] = {} self._diffs[test] = {} def readBaselineFiles(self): """ Reads the 'baseline' folder contained in dataDir. All files starting with 'baseline_' are read as baseline files. These files should therefore be named as follows: 'baseline_<className>.csv'. """ baselineFiles = [fileName for fileName in os.listdir(self._baselineDir) if os.path.isfile(os.path.join(self._baselineDir, fileName)) and fileName.startswith('baseline_')] assert len(baselineFiles) > 0 for baselineFile in baselineFiles: newBaseline = PyRadiomicsBaseline.readBaselineFile(os.path.join(self._baselineDir, baselineFile)) cls = newBaseline.cls self._logger.debug('Read baseline for class %s', cls) self._baseline[cls] = newBaseline self._tests |= newBaseline.tests def getTests(self): """ Return all the tests for which there are baseline information. """ return self._tests def getFeatureNames(self, className, test): """ Gets all features for which a baseline value is available for the current class and test case. Returns a list containing the feature names (without image type and feature class specifiers, i.e. just the feature name). """ if className not in self._baseline: return None # No baseline available for specified class return self._baseline[className].getTestFeatures(test) def setFeatureClassAndTestCase(self, className, test): """ Set testing suite to specified testCase and feature class. Throws an assertion error if either class or test case are not recognized. These have to be set here together, as the settings with which the test case has to be loaded are defined per feature class in the baseline (extracted from provenance information). Only (re)loads an image/mask if the test case has changed, or the change of feature class causes a change in test settings. If feature class and test case are unchanged, nothing is reloaded and function returns False. If either feature class or test case is changed, function returns True. """ global TEST_CASES if self._featureClassName == className and self._test == test: return False self._test = test self._testedSet.add(self._test) # First set featureClass if necessary, because if settings have changed, testCase needs te be reloaded if self._featureClassName != className: self._logger.debug('Setting feature class name to %s', className) assert className in self._baseline.keys() # Check if a baseline has been read for this class self._featureClassName = className # Check if test settings have changed if self._current_config != self._baseline[className].getTestConfig(test): self._current_config = self._baseline[className].getTestConfig(test) self._testCase = None # forces image to be reloaded (as settings have changed) # Next, set testCase if necessary if self._testCase != self._current_config['TestCase']: self._testCase = self._current_config['TestCase'] self._logger.info("Reading the image and mask for test case %s", self._testCase) assert self._current_config['TestCase'] in TEST_CASES imageName, maskName = getTestCase(self._testCase) assert imageName is not None assert maskName is not None self._image = sitk.ReadImage(imageName) self._mask = sitk.ReadImage(maskName) if 'ImageHash' in self._current_config: assert sitk.Hash(self._image) == self._current_config['ImageHash'] if 'MaskHash' in self._current_config: assert sitk.Hash(self._mask) == self._current_config['MaskHash'] settings = self._current_config.get('Settings', {}) interpolator = settings.get('interpolator', sitk.sitkBSpline) resampledPixelSpacing = settings.get('resampledPixelSpacing', None) if interpolator is not None and resampledPixelSpacing is not None: self._image, self._mask = imageoperations.resampleImage(self._image, self._mask, resampledPixelSpacing, interpolator, settings.get('label', 1), settings.get('padDistance', 5)) self._bb, correctedMask = imageoperations.checkMask(self._image, self._mask, **settings) if correctedMask is not None: self._mask = correctedMask self._imageType = None return True def getImage(self, imageType): if self._imageType != imageType: self._applyFilter(imageType) return self._current_image def getMask(self, imageType): if self._imageType != imageType: self._applyFilter(imageType) return self._current_mask def _applyFilter(self, imageType): if imageType == 'original': self._current_image, self._current_mask = imageoperations.cropToTumorMask(self._image, self._mask, self._bb) else: raise NotImplementedError() self._imageType = imageType def getSettings(self): return self._current_config.get('Settings', {}) def checkResult(self, featureName, value): """ Use utility methods to get and test the results against the expected baseline value for this key. """ longName = '_'.join(featureName) if value is None: self._diffs[self._test][longName] = None self._results[self._test][longName] = None assert (value is not None) if math.isnan(value): self._diffs[self._test][longName] = numpy.nan self._results[self._test][longName] = numpy.nan assert (not math.isnan(value)) # save the result using the baseline class and feature names self._logger.debug('checkResults: featureName = %s', featureName) self._results[self._test][longName] = value baselineValue = self._baseline[self._featureClassName].getBaselineValue(self._test, longName) assert baselineValue is not None baselineValue = float(baselineValue) self._logger.debug('checkResults: for featureName %s, got baseline value = %f', featureName, baselineValue) if baselineValue == 0.0: # avoid divide by zero, the difference is either 0% if the value is also zero, or 100% if value - baselineValue == 0.0: percentDiff = 0.0 else: percentDiff = 1.0 else: percentDiff = abs(1.0 - (value / baselineValue)) # save the difference self._diffs[self._test][longName] = percentDiff # check for a less than three percent difference if (percentDiff >= 0.03): self._logger.error('checkResult %s, baseline value = %f, calculated = %f, diff = %f%%', featureName, float(baselineValue), value, percentDiff * 100) assert (percentDiff < 0.03) def getResults(self): return self._results def getDiffs(self): return self._diffs def getDataDir(self): return self._dataDir def writeCSV(self, data, fileName): """ Write out data in a csv file. Assumes a data structure with: {'id1' : {'f1':n1, 'f2':n2}, 'id2' : {'f1':n3, 'f2':n4}} """ # Get the headers from the first testCase in _testedSet # If no tests were run, the length of _testedSet will be 0, and no files should be written if len(self._testedSet) > 0: with open(fileName, 'w') as csvFile: csvFileWriter = csv.writer(csvFile, lineterminator='\n') testedCases = sorted(self._testedSet) header = sorted(data[testedCases[0]].keys()) header = ['testCase'] + header csvFileWriter.writerow(header) for testCase in testedCases: thisCase = data[testCase] thisCase['testCase'] = testCase row = [] for h in header: row = row + [thisCase.get(h, "N/A")] csvFileWriter.writerow(row) self._logger.info('Wrote to file %s', fileName) else: self._logger.info('No test cases run, aborting file write to %s', fileName) class PyRadiomicsBaseline: def __init__(self, featureClassName): self.logger = logging.getLogger('radiomics.testing.baseline') self.cls = featureClassName self.configuration = {} self.baseline = {} self.tests = set() @classmethod def readBaselineFile(cls, baselineFile): featureClassName = os.path.basename(baselineFile)[9:-4] new_baseline = cls(featureClassName) new_baseline.logger.debug('Reading baseline for class %s', new_baseline.cls) with open(baselineFile, 'r' if six.PY3 else 'rb') as baselineReader: csvReader = csv.reader(baselineReader) tests = six.next(csvReader)[1:] for case in tests: new_baseline.configuration[case] = {} new_baseline.baseline[case] = {} for testRow in csvReader: for case_idx, case in enumerate(tests, start=1): if 'general_info' in testRow[0]: new_baseline.configuration[case][testRow[0]] = testRow[case_idx] else: new_baseline.baseline[case][testRow[0]] = testRow[case_idx] new_baseline.tests = set(tests) return new_baseline def getTestConfig(self, test): if test not in self.configuration: return {} # This test is not present in the baseline for this class config = { 'TestCase': self.configuration[test].get('general_info_TestCase', None), 'Settings': ast.literal_eval(self.configuration[test].get('general_info_GeneralSettings', '{}')), } if 'general_info_ImageHash' in self.configuration[test]: config['ImageHash'] = self.configuration[test]['general_info_ImageHash'] if 'general_info_MaskHash' in self.configuration[test]: config['MaskHash'] = self.configuration[test]['general_info_MaskHash'] if config['TestCase'] is None: self.logger.error('Missing key "general_info_TestCase". Cannot configure!') return None return config def getTestFeatures(self, test): """ Gets all features for which a baseline value is available for the current class and test case. Returns a list containing the feature names. """ if test not in self.baseline: return None # This test is not present in the baseline for this class return list(self.baseline[test].keys()) def getBaselineValue(self, test, featureName): if test not in self.baseline: return None return self.baseline[test].get(featureName, None) def writeBaselineFile(self, baselineDir): baselineFile = os.path.join(baselineDir, 'baseline_%s.csv' % self.cls) testCases = list(self.baseline.keys()) with open(baselineFile, 'wb') as baseline: csvWriter = csv.writer(baseline) header = ['featureName'] + testCases csvWriter.writerow(header) config = self.configuration[testCases[0]].keys() for c in config: row = [c] for testCase in testCases: row.append(str(self.configuration[testCase].get(c, ''))) csvWriter.writerow(row) features = self.baseline[testCases[0]].keys() for f in features: row = [f] for testCase in testCases: row.append(str(self.baseline[testCase].get(f, ''))) csvWriter.writerow(row)
gradients
Take gradient of output node with respect to each node in node_list. Parameters ---------- output_node: output node that we are taking derivative of. node_list: list of nodes that we are taking derivative wrt. Returns ------- A list of gradient values, one for each node in node_list respectively.
""" library to take autodiff and execute a computation graph """ from __future__ import absolute_import import numpy as np from .Node import Op from .. import ndarray from ..stream import * import ctypes import os from pynvml import * FLAG_SHOW_GRAPH = False G_NODE_ID = 0 NAME_RULE = 1 def communicate_init(worker_num, worker_id, source_ip, target_ip): global lib_communicate # lib_communicate.DL_Connect_Init(2, 0, "*:4001", "localhost:4002") # lib_communicate.DL_Connect_Init(2, 1, "*:4002", "localhost:4001") curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) lib_path = os.path.join(curr_path, '../../build/lib/') path_to_so_file = os.path.join(lib_path, "lib_communication.so") lib_communicate = ctypes.cdll.LoadLibrary(path_to_so_file) lib_communicate.DL_Connect_Init( worker_num, worker_id, source_ip, target_ip) def communicate_finish(): lib_communicate.DL_Communicate_Close() class Distributed_CommunicateOp(Op): def __call__(self, nodeA): new_node = Op.__call__(self) new_node.inputs = [nodeA] new_node.name = "Distributed_Communicate(%s)" % (nodeA.name) # print nodeA.name return new_node def compute(self, node, input_vals, output_val, use_numpy=True): after_reduce_gradient_cpu = ndarray.empty( shape=output_val.shape, ctx=ndarray.cpu(0)) if use_numpy: gradient_val_cpu = ndarray.array(input_vals[0], ctx=ndarray.cpu(0)) else: gradient_val_cpu = ndarray.array( input_vals[0].asnumpy(), ctx=ndarray.cpu(0)) # print gradient_val_cpu.asnumpy() lib_communicate.DL_Communicate_Init(gradient_val_cpu.handle) lib_communicate.DL_Communicate( gradient_val_cpu.handle, after_reduce_gradient_cpu.handle) # print after_reduce_gradient_cpu.asnumpy() if use_numpy: output_val[:] = after_reduce_gradient_cpu.asnumpy() else: after_reduce_gradient_cpu.copyto(output_val) def gradient(self, node, output_grad): raise NotImplementedError def infer_shape(self, node, input_shapes): return input_shapes[0] distributed_communicate_op = Distributed_CommunicateOp() class StreamExecutor(object): """Executor computes values for given set of nodes in computation graph.""" def __init__(self, eval_node_list, ctx = None, stream = None, policy = None): """ Parameters ---------- eval_node_list: list of nodes whose values need to be computed. ctx: runtime DLContext, default is None which means np.ndarray on cpu topo_order: list of nodes in topological order node_to_shape_map: dict from node to shape of the node node_to_arr_map: dict from node to ndarray.NDArray allocated for node feed_shapes: shapes of feed_dict from last run(...) """ self.eval_node_list = eval_node_list self.ctx = ctx if stream is None: self.stream = create_stream_handle(ctx) else: self.stream = stream self.stream.sync() self.topo_order = find_topo_sort(self.eval_node_list) self.node_to_shape_map = None self.node_to_arr_map = None self.feed_shapes = None self.policy = policy if self.policy == 'swap': self.swap_queue = [] def infer_shape(self, feed_shapes): """Given shapes of feed_dict nodes, infer shape for all nodes in graph. Implementation note: Iteratively calls node.op.infer_shape to infer shapes. Node shapes stored in self.node_to_shape_map. Parameters ---------- feed_shapes: node->shapes mapping for feed_dict nodes. """ """TODO: Your code here""" self.node_to_shape_map = {} for node in self.topo_order: if node in feed_shapes: self.node_to_shape_map[node] = feed_shapes[node] else: # print(node.name) input_shapes = [self.node_to_shape_map[n] for n in node.inputs] self.node_to_shape_map[node] = node.op.infer_shape( node, input_shapes) def memory_plan(self, feed_shapes): """Allocates ndarray.NDArray for every node except feed_dict nodes. Implementation note: Option 1: Alloc a ndarray.NDArray per node that persists across run() Option 2: Implement a memory pool to reuse memory for nodes of same shapes. More details see Lecture 7. For both options, self.node_to_arr_map stores node->NDArray mapping to allow mapping to persist across multiple executor.run(). Hint: use ndarray.empty(shape, ctx=self.ctx) to allocate NDArray. Parameters ---------- feed_shapes: node->shapes mapping for feed_dict nodes. """ """TODO: Your code here""" assert (self.ctx is not None) # self.infer_shape(feed_shapes) self.node_to_arr_map = {} for node, shape in self.node_to_shape_map.items(): if self.policy == 'swap': if not node.swap: self.node_to_arr_map[node] = ndarray.empty( shape, ctx=self.ctx) elif self.policy == 'vdnn': self.node_to_arr_map[node] = np.empty(shape) else: self.node_to_arr_map[node] = ndarray.empty(shape, ctx=self.ctx) def run(self, feed_dict, convert_to_numpy_ret_vals=False): """ Parameters ---------- feed_dict: a dictionary of node->np.ndarray supplied by user. convert_to_numpy_ret_vals: whether to convert ret vals to np.array Returns ------- A list of values for nodes in eval_node_list. NDArray or np.ndarray. """ def are_feed_shapes_equal(sa, sb): if (not isinstance(sa, dict)) or (not isinstance(sb, dict)): return False unmatched_item = set(sa.items()) ^ set(sb.items()) return len(unmatched_item) == 0 # Assume self.ctx is None implies numpy array and numpy ops. use_numpy = self.ctx is None node_to_val_map = {} for node, value in feed_dict.items(): if use_numpy: # all values passed in feed_dict must be np.ndarray assert isinstance(value, np.ndarray) node_to_val_map[node] = value else: # convert values to ndarray.NDArray if necessary if isinstance(value, np.ndarray): node_to_val_map[node] = ndarray.array(value, ctx=self.ctx) elif isinstance(value, ndarray.NDArray): node_to_val_map[node] = value else: assert False, "feed_dict value type not supported" # print"xxxx" # collect shapes for all placeholders # infer shape if feed_shapes changed since last run # e.g. call run() on test data after trainng # print feed_shapes feed_shapes = {} for node in node_to_val_map: feed_shapes[node] = node_to_val_map[node].shape if(not are_feed_shapes_equal(feed_shapes, self.feed_shapes)): self.infer_shape(feed_shapes) self.feed_shapes = feed_shapes if (not use_numpy): self.memory_plan(self.feed_shapes) for node in self.topo_order: if node in node_to_val_map: continue input_vals = [node_to_val_map[n] for n in node.inputs] if use_numpy: node_val = np.empty(shape=self.node_to_shape_map[node]) else: node_val = self.node_to_arr_map[node] # print(node.name) node.op.compute(node, input_vals, node_val, use_numpy, self.stream) node_to_val_map[node] = node_val self.stream.sync() if not use_numpy and convert_to_numpy_ret_vals: return [node_to_val_map[n].asnumpy() for n in self.eval_node_list] return [node_to_val_map[n] for n in self.eval_node_list] # def run(self, feed_dict, convert_to_numpy_ret_vals=False): # """ # Parameters # ---------- # feed_dict: a dictionary of node->np.ndarray supplied by user. # convert_to_numpy_ret_vals: whether to convert ret vals to np.array # Returns # ------- # A list of values for nodes in eval_node_list. NDArray or np.ndarray. # """ # def are_feed_shapes_equal(sa, sb): # if (not isinstance(sa, dict)) or (not isinstance(sb, dict)): # return False # unmatched_item = set(sa.items()) ^ set(sb.items()) # return len(unmatched_item) == 0 # # Assume self.ctx is None implies numpy array and numpy ops. # use_numpy = self.ctx is None # node_to_val_map = {} # for node, value in feed_dict.items(): # if self.policy == 'vdnn': # assert isinstance(value, np.ndarray) # node_to_val_map[node] = value # else: # if use_numpy: # # all values passed in feed_dict must be np.ndarray # assert isinstance(value, np.ndarray) # node_to_val_map[node] = value # else: # # convert values to ndarray.NDArray if necessary # if isinstance(value, np.ndarray): # if self.policy == 'swap': # if node.swap == True: # node_to_val_map[node] = value # else: # node_to_val_map[node] = ndarray.array(value, ctx=self.ctx) # else: # node_to_val_map[node] = ndarray.array(value, ctx=self.ctx) # elif isinstance(value, ndarray.NDArray): # node_to_val_map[node] = value # else: # assert False, "feed_dict value type not supported" # # collect shapes for all placeholders # feed_shapes = {} # for node in node_to_val_map: # feed_shapes[node] = node_to_val_map[node].shape # # infer shape if feed_shapes changed since last run # # e.g. call run() on test data after trainng # # print feed_shapes # if (not are_feed_shapes_equal(feed_shapes, self.feed_shapes)): # self.infer_shape(feed_shapes) # self.feed_shapes = feed_shapes # if not self.policy == 'vdnn': # # plan memory if using GPU # if (not use_numpy): # self.memory_plan(feed_shapes) # # Traverse graph in topo order and compute values for all nodes. # global FLAG_SHOW_GRAPH # if self.policy == 'swap': # # generate swap queue # if not use_numpy: # for node in self.topo_order: # if node not in node_to_val_map: # # variable in placeholder # for input_node in node.inputs: # if input_node.swap == True: # self.swap_queue.append(input_node) # # variable grad # if node.swap == True: # self.swap_queue.append(node) # node_in_GPU = None # if FLAG_SHOW_GRAPH: # print "Show swap queue:" # for node in self.swap_queue: # print node # elif self.policy == 'vdnn': # # TODO traverse graph to select in-gpu window # window = [0,0] # if not use_numpy: # nvmlInit() # handle = nvmlDeviceGetHandleByIndex(0) # info = nvmlDeviceGetMemoryInfo(handle) # gpu_mem = info.free # nvmlShutdown() # loss_node = self.eval_node_list[0] # window[1] = self.topo_order.index(loss_node)+1 # window[0] = self.topo_order.index(loss_node)+1 # for node in reversed(self.topo_order[:window[1]+1]): # node_size = 4 # float32 # #print node, self.node_to_shape_map[node] # for shape in self.node_to_shape_map[node]: # node_size = node_size * shape # if gpu_mem > node_size: # gpu_mem = gpu_mem - node_size # window[0] = window[0] - 1 # #print "gpu_mem:",gpu_mem # # Traverse graph in topo order and compute values for all nodes. # if FLAG_SHOW_GRAPH: # print "run topo_order" # # Show graph dependency # if FLAG_SHOW_GRAPH: # print "node:",node # print "node.desc:",node.desc # for node in self.topo_order: # if self.policy == 'vdnn': # # Skip placeholder nodes # if node in node_to_val_map: # continue # # H2D before compute # ## Collect inputs # input_vals = [] # for n in node.inputs: # if not use_numpy: # if isinstance(node_to_val_map[n], np.ndarray): # node_to_val_map[n] = ndarray.array(node_to_val_map[n], ctx=self.ctx) # input_vals.append(node_to_val_map[n]) # ## Alloc node space # if use_numpy: # node_val = np.empty(shape=self.node_to_shape_map[node]) # else: # node_val = ndarray.empty(shape=self.node_to_shape_map[node], ctx=self.ctx) # # Compute # # node_val is modified in-place whether np.ndarray or NDArray # node.op.compute(node, input_vals, node_val, use_numpy) # # D2H after compute # if use_numpy: # node_to_val_map[node] = node_val # else: # node_index = self.topo_order.index(node) # if node_index > window[0] and node_index < window[1]: # node_to_val_map[node] = node_val # continue # node_to_val_map[node] = node_val.asnumpy() # del node_val # for n in node.inputs: # if isinstance(node_to_val_map[n], ndarray.NDArray): # tmp_val = node_to_val_map[n].asnumpy() # del node_to_val_map[n] # node_to_val_map[n] = tmp_val # elif self.policy == 'swap': # # Switch in GPU # if not use_numpy: # if self.swap_queue and (node_in_GPU==None): # swap_node = self.swap_queue[0] # if swap_node in node_to_val_map: # node_to_val_map[swap_node] = ndarray.array(node_to_val_map[swap_node], ctx=self.ctx) # else: # self.node_to_arr_map[swap_node] = ndarray.empty(self.node_to_shape_map[swap_node], ctx=self.ctx) # node_in_GPU = swap_node.id # if node in node_to_val_map: # # Skip placeholder nodes. Values already provided by feed_dict. # continue # # Compute # input_vals = [node_to_val_map[n] for n in node.inputs] # if use_numpy: # node_val = np.empty(shape=self.node_to_shape_map[node]) # else: # node_val = self.node_to_arr_map[node] # # node_val is modified in-place whether np.ndarray or NDArray # node.op.compute(node, input_vals, node_val, use_numpy) # if node.swap == True: # node_to_val_map[node] = node_val.asnumpy() # del node_val # del self.node_to_arr_map[node] # del self.swap_queue[0] # node_in_GPU = None # else: # node_to_val_map[node] = node_val # # Switch out GPU # if not use_numpy: # if self.swap_queue: # if self.swap_queue[0] in node.inputs: # out_node = self.swap_queue.pop(0) # if self.swap_queue: # if not self.swap_queue[0].id == node_in_GPU: # tmp_array = node_to_val_map[out_node].asnumpy() # del node_to_val_map[out_node] # node_to_val_map[out_node] = tmp_array # node_in_GPU = None # else: # if node in node_to_val_map: # # Skip placeholder nodes. Values already provided by feed_dict. # continue # input_vals = [node_to_val_map[n] for n in node.inputs] # # print self.node_to_shape_map[node] # if use_numpy: # node_val = np.empty(shape=self.node_to_shape_map[node]) # else: # node_val = self.node_to_arr_map[node] # # node_val is modified in-place whether np.ndarray or NDArray # # if (len(node.inputs) == 1): # # print "computs",node.inputs[0].name # # else: # # print "computs",node.inputs[0].name,node.inputs[1].name # # print node.name # # print node_val.shape # # print "xxx" # # print node.name # node.op.compute(node, input_vals, node_val, use_numpy) # # print "xxx" # node_to_val_map[node] = node_val # # print "xxx" # if FLAG_SHOW_GRAPH: # FLAG_SHOW_GRAPH = False # # Collect node values. # if not use_numpy and convert_to_numpy_ret_vals: # if self.policy == 'swap': # node_values = [] # for n in self.eval_node_list: # if n.swap == True: # node_values.append(node_to_val_map[n]) # else: # node_values.append(node_to_val_map[n].asnumpy()) # return node_values # elif self.policy == 'vdnn': # return [node_to_val_map[n] for n in self.eval_node_list] # else: # return [node_to_val_map[n].asnumpy() for n in self.eval_node_list] # return [node_to_val_map[n] for n in self.eval_node_list] # MASKED: gradients function (lines 446-488) def distributed_gradients(output_node, node_list, scheduler_policy=None): """Take gradient of output node with respect to each node in node_list. Parameters ---------- output_node: output node that we are taking derivative of. node_list: list of nodes that we are taking derivative wrt. Returns ------- A list of gradient values, one for each node in node_list respectively. """ from .OnesLike import oneslike_op node_to_output_grads_list = {} node_to_output_grads_list[output_node] = [oneslike_op(output_node)] node_to_output_grad = {} # Traverse forward graph in reverse topological order reverse_topo_order = reversed(find_topo_sort([output_node])) for node in reverse_topo_order: output_grad = sum_node_list(node_to_output_grads_list[node]) node_to_output_grad[node] = output_grad input_grads_list = node.op.gradient(node, output_grad) #print len(node.name) #print len(node.inputs) #raw_input("\n\nPress the enter key to exit.") for i in range(len(node.inputs)): if node.inputs[i] not in node_to_output_grads_list: node_to_output_grads_list[node.inputs[i]] = [] # Calculate partial adjoint for input nodes. node_to_output_grads_list[node.inputs[i]].append( input_grads_list[i]) if scheduler_policy == 'swap': for node in node_list: if node.swap: node_to_output_grad[node].swap = True # grad_node_list = [node_to_output_grad[node] for node in node_list] grad_node_list = [distributed_communicate_op( node_to_output_grad[node]) for node in node_list] return grad_node_list ################## # Helper Methods # ################## def find_topo_sort(node_list): """Given a list of nodes, return a topo ordering of nodes ending in them. A simple algorithm is to do a post-order DFS traversal on the given nodes, going backwards based on input edges. Since a node is added to the ordering after all its predecessors are traversed due to post-order DFS, we get a topological sort. """ visited = set() topo_order = [] for node in node_list: topo_sort_dfs(node, visited, topo_order) return topo_order def topo_sort_dfs(node, visited, topo_order): """Post-order DFS""" if node in visited: return visited.add(node) for n in node.inputs: topo_sort_dfs(n, visited, topo_order) topo_order.append(node) def sum_node_list(node_list): """Custom sum func to avoid creating redundant nodes in Python sum func.""" from operator import add from functools import reduce return reduce(add, node_list) def broadcast_rule(shape_a, shape_b): """Return output shape of broadcast shape_a, shape_b. e.g. broadcast_rule((3,2), (4,3,2)) returns output_shape = (4,3,2) Check out explanations and more examples at https://docs.scipy.org/doc/numpy-1.10.0/user/basics.broadcasting.html http://eli.thegreenplace.net/2015/broadcasting-arrays-in-numpy/ """ assert(isinstance(shape_a, tuple)) assert(isinstance(shape_b, tuple)) if len(shape_a) > len(shape_b): longer_shape, shorter_shape = shape_a, shape_b else: longer_shape, shorter_shape = shape_b, shape_a len_diff = len(longer_shape) - len(shorter_shape) for i in range(len_diff): # pad with leading 1s shorter_shape = (1,) + shorter_shape assert len(shorter_shape) == len(longer_shape) output_shape = list(longer_shape) for i in range(len(output_shape)): assert (shorter_shape[i] == longer_shape[i]) \ or (shorter_shape[i] == 1) \ or (longer_shape[i] == 1) output_shape[i] = max(shorter_shape[i], longer_shape[i]) return tuple(output_shape)
def gradients(output_node, node_list, scheduler_policy=None): """Take gradient of output node with respect to each node in node_list. Parameters ---------- output_node: output node that we are taking derivative of. node_list: list of nodes that we are taking derivative wrt. Returns ------- A list of gradient values, one for each node in node_list respectively. """ from . import OnesLike node_to_output_grads_list = {} node_to_output_grads_list[output_node] = [ OnesLike.oneslike_op(output_node)] node_to_output_grad = {} # Traverse forward graph in reverse topological order reverse_topo_order = reversed(find_topo_sort([output_node])) for node in reverse_topo_order: output_grad = sum_node_list(node_to_output_grads_list[node]) node_to_output_grad[node] = output_grad input_grads_list = node.op.gradient(node, output_grad) #print len(node.name) #print len(node.inputs) #raw_input("\n\nPress the enter key to exit.") for i in range(len(node.inputs)): if node.inputs[i] not in node_to_output_grads_list: node_to_output_grads_list[node.inputs[i]] = [] # Calculate partial adjoint for input nodes. # print node.name node_to_output_grads_list[node.inputs[i]].append( input_grads_list[i]) if scheduler_policy == 'swap': for node in node_list: if node.swap: node_to_output_grad[node].swap = True grad_node_list = [node_to_output_grad[node] for node in node_list] # grad_node_list = [distributed_communicate_op(node_to_output_grad[node]) for node in node_list] return grad_node_list
446
488
""" library to take autodiff and execute a computation graph """ from __future__ import absolute_import import numpy as np from .Node import Op from .. import ndarray from ..stream import * import ctypes import os from pynvml import * FLAG_SHOW_GRAPH = False G_NODE_ID = 0 NAME_RULE = 1 def communicate_init(worker_num, worker_id, source_ip, target_ip): global lib_communicate # lib_communicate.DL_Connect_Init(2, 0, "*:4001", "localhost:4002") # lib_communicate.DL_Connect_Init(2, 1, "*:4002", "localhost:4001") curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) lib_path = os.path.join(curr_path, '../../build/lib/') path_to_so_file = os.path.join(lib_path, "lib_communication.so") lib_communicate = ctypes.cdll.LoadLibrary(path_to_so_file) lib_communicate.DL_Connect_Init( worker_num, worker_id, source_ip, target_ip) def communicate_finish(): lib_communicate.DL_Communicate_Close() class Distributed_CommunicateOp(Op): def __call__(self, nodeA): new_node = Op.__call__(self) new_node.inputs = [nodeA] new_node.name = "Distributed_Communicate(%s)" % (nodeA.name) # print nodeA.name return new_node def compute(self, node, input_vals, output_val, use_numpy=True): after_reduce_gradient_cpu = ndarray.empty( shape=output_val.shape, ctx=ndarray.cpu(0)) if use_numpy: gradient_val_cpu = ndarray.array(input_vals[0], ctx=ndarray.cpu(0)) else: gradient_val_cpu = ndarray.array( input_vals[0].asnumpy(), ctx=ndarray.cpu(0)) # print gradient_val_cpu.asnumpy() lib_communicate.DL_Communicate_Init(gradient_val_cpu.handle) lib_communicate.DL_Communicate( gradient_val_cpu.handle, after_reduce_gradient_cpu.handle) # print after_reduce_gradient_cpu.asnumpy() if use_numpy: output_val[:] = after_reduce_gradient_cpu.asnumpy() else: after_reduce_gradient_cpu.copyto(output_val) def gradient(self, node, output_grad): raise NotImplementedError def infer_shape(self, node, input_shapes): return input_shapes[0] distributed_communicate_op = Distributed_CommunicateOp() class StreamExecutor(object): """Executor computes values for given set of nodes in computation graph.""" def __init__(self, eval_node_list, ctx = None, stream = None, policy = None): """ Parameters ---------- eval_node_list: list of nodes whose values need to be computed. ctx: runtime DLContext, default is None which means np.ndarray on cpu topo_order: list of nodes in topological order node_to_shape_map: dict from node to shape of the node node_to_arr_map: dict from node to ndarray.NDArray allocated for node feed_shapes: shapes of feed_dict from last run(...) """ self.eval_node_list = eval_node_list self.ctx = ctx if stream is None: self.stream = create_stream_handle(ctx) else: self.stream = stream self.stream.sync() self.topo_order = find_topo_sort(self.eval_node_list) self.node_to_shape_map = None self.node_to_arr_map = None self.feed_shapes = None self.policy = policy if self.policy == 'swap': self.swap_queue = [] def infer_shape(self, feed_shapes): """Given shapes of feed_dict nodes, infer shape for all nodes in graph. Implementation note: Iteratively calls node.op.infer_shape to infer shapes. Node shapes stored in self.node_to_shape_map. Parameters ---------- feed_shapes: node->shapes mapping for feed_dict nodes. """ """TODO: Your code here""" self.node_to_shape_map = {} for node in self.topo_order: if node in feed_shapes: self.node_to_shape_map[node] = feed_shapes[node] else: # print(node.name) input_shapes = [self.node_to_shape_map[n] for n in node.inputs] self.node_to_shape_map[node] = node.op.infer_shape( node, input_shapes) def memory_plan(self, feed_shapes): """Allocates ndarray.NDArray for every node except feed_dict nodes. Implementation note: Option 1: Alloc a ndarray.NDArray per node that persists across run() Option 2: Implement a memory pool to reuse memory for nodes of same shapes. More details see Lecture 7. For both options, self.node_to_arr_map stores node->NDArray mapping to allow mapping to persist across multiple executor.run(). Hint: use ndarray.empty(shape, ctx=self.ctx) to allocate NDArray. Parameters ---------- feed_shapes: node->shapes mapping for feed_dict nodes. """ """TODO: Your code here""" assert (self.ctx is not None) # self.infer_shape(feed_shapes) self.node_to_arr_map = {} for node, shape in self.node_to_shape_map.items(): if self.policy == 'swap': if not node.swap: self.node_to_arr_map[node] = ndarray.empty( shape, ctx=self.ctx) elif self.policy == 'vdnn': self.node_to_arr_map[node] = np.empty(shape) else: self.node_to_arr_map[node] = ndarray.empty(shape, ctx=self.ctx) def run(self, feed_dict, convert_to_numpy_ret_vals=False): """ Parameters ---------- feed_dict: a dictionary of node->np.ndarray supplied by user. convert_to_numpy_ret_vals: whether to convert ret vals to np.array Returns ------- A list of values for nodes in eval_node_list. NDArray or np.ndarray. """ def are_feed_shapes_equal(sa, sb): if (not isinstance(sa, dict)) or (not isinstance(sb, dict)): return False unmatched_item = set(sa.items()) ^ set(sb.items()) return len(unmatched_item) == 0 # Assume self.ctx is None implies numpy array and numpy ops. use_numpy = self.ctx is None node_to_val_map = {} for node, value in feed_dict.items(): if use_numpy: # all values passed in feed_dict must be np.ndarray assert isinstance(value, np.ndarray) node_to_val_map[node] = value else: # convert values to ndarray.NDArray if necessary if isinstance(value, np.ndarray): node_to_val_map[node] = ndarray.array(value, ctx=self.ctx) elif isinstance(value, ndarray.NDArray): node_to_val_map[node] = value else: assert False, "feed_dict value type not supported" # print"xxxx" # collect shapes for all placeholders # infer shape if feed_shapes changed since last run # e.g. call run() on test data after trainng # print feed_shapes feed_shapes = {} for node in node_to_val_map: feed_shapes[node] = node_to_val_map[node].shape if(not are_feed_shapes_equal(feed_shapes, self.feed_shapes)): self.infer_shape(feed_shapes) self.feed_shapes = feed_shapes if (not use_numpy): self.memory_plan(self.feed_shapes) for node in self.topo_order: if node in node_to_val_map: continue input_vals = [node_to_val_map[n] for n in node.inputs] if use_numpy: node_val = np.empty(shape=self.node_to_shape_map[node]) else: node_val = self.node_to_arr_map[node] # print(node.name) node.op.compute(node, input_vals, node_val, use_numpy, self.stream) node_to_val_map[node] = node_val self.stream.sync() if not use_numpy and convert_to_numpy_ret_vals: return [node_to_val_map[n].asnumpy() for n in self.eval_node_list] return [node_to_val_map[n] for n in self.eval_node_list] # def run(self, feed_dict, convert_to_numpy_ret_vals=False): # """ # Parameters # ---------- # feed_dict: a dictionary of node->np.ndarray supplied by user. # convert_to_numpy_ret_vals: whether to convert ret vals to np.array # Returns # ------- # A list of values for nodes in eval_node_list. NDArray or np.ndarray. # """ # def are_feed_shapes_equal(sa, sb): # if (not isinstance(sa, dict)) or (not isinstance(sb, dict)): # return False # unmatched_item = set(sa.items()) ^ set(sb.items()) # return len(unmatched_item) == 0 # # Assume self.ctx is None implies numpy array and numpy ops. # use_numpy = self.ctx is None # node_to_val_map = {} # for node, value in feed_dict.items(): # if self.policy == 'vdnn': # assert isinstance(value, np.ndarray) # node_to_val_map[node] = value # else: # if use_numpy: # # all values passed in feed_dict must be np.ndarray # assert isinstance(value, np.ndarray) # node_to_val_map[node] = value # else: # # convert values to ndarray.NDArray if necessary # if isinstance(value, np.ndarray): # if self.policy == 'swap': # if node.swap == True: # node_to_val_map[node] = value # else: # node_to_val_map[node] = ndarray.array(value, ctx=self.ctx) # else: # node_to_val_map[node] = ndarray.array(value, ctx=self.ctx) # elif isinstance(value, ndarray.NDArray): # node_to_val_map[node] = value # else: # assert False, "feed_dict value type not supported" # # collect shapes for all placeholders # feed_shapes = {} # for node in node_to_val_map: # feed_shapes[node] = node_to_val_map[node].shape # # infer shape if feed_shapes changed since last run # # e.g. call run() on test data after trainng # # print feed_shapes # if (not are_feed_shapes_equal(feed_shapes, self.feed_shapes)): # self.infer_shape(feed_shapes) # self.feed_shapes = feed_shapes # if not self.policy == 'vdnn': # # plan memory if using GPU # if (not use_numpy): # self.memory_plan(feed_shapes) # # Traverse graph in topo order and compute values for all nodes. # global FLAG_SHOW_GRAPH # if self.policy == 'swap': # # generate swap queue # if not use_numpy: # for node in self.topo_order: # if node not in node_to_val_map: # # variable in placeholder # for input_node in node.inputs: # if input_node.swap == True: # self.swap_queue.append(input_node) # # variable grad # if node.swap == True: # self.swap_queue.append(node) # node_in_GPU = None # if FLAG_SHOW_GRAPH: # print "Show swap queue:" # for node in self.swap_queue: # print node # elif self.policy == 'vdnn': # # TODO traverse graph to select in-gpu window # window = [0,0] # if not use_numpy: # nvmlInit() # handle = nvmlDeviceGetHandleByIndex(0) # info = nvmlDeviceGetMemoryInfo(handle) # gpu_mem = info.free # nvmlShutdown() # loss_node = self.eval_node_list[0] # window[1] = self.topo_order.index(loss_node)+1 # window[0] = self.topo_order.index(loss_node)+1 # for node in reversed(self.topo_order[:window[1]+1]): # node_size = 4 # float32 # #print node, self.node_to_shape_map[node] # for shape in self.node_to_shape_map[node]: # node_size = node_size * shape # if gpu_mem > node_size: # gpu_mem = gpu_mem - node_size # window[0] = window[0] - 1 # #print "gpu_mem:",gpu_mem # # Traverse graph in topo order and compute values for all nodes. # if FLAG_SHOW_GRAPH: # print "run topo_order" # # Show graph dependency # if FLAG_SHOW_GRAPH: # print "node:",node # print "node.desc:",node.desc # for node in self.topo_order: # if self.policy == 'vdnn': # # Skip placeholder nodes # if node in node_to_val_map: # continue # # H2D before compute # ## Collect inputs # input_vals = [] # for n in node.inputs: # if not use_numpy: # if isinstance(node_to_val_map[n], np.ndarray): # node_to_val_map[n] = ndarray.array(node_to_val_map[n], ctx=self.ctx) # input_vals.append(node_to_val_map[n]) # ## Alloc node space # if use_numpy: # node_val = np.empty(shape=self.node_to_shape_map[node]) # else: # node_val = ndarray.empty(shape=self.node_to_shape_map[node], ctx=self.ctx) # # Compute # # node_val is modified in-place whether np.ndarray or NDArray # node.op.compute(node, input_vals, node_val, use_numpy) # # D2H after compute # if use_numpy: # node_to_val_map[node] = node_val # else: # node_index = self.topo_order.index(node) # if node_index > window[0] and node_index < window[1]: # node_to_val_map[node] = node_val # continue # node_to_val_map[node] = node_val.asnumpy() # del node_val # for n in node.inputs: # if isinstance(node_to_val_map[n], ndarray.NDArray): # tmp_val = node_to_val_map[n].asnumpy() # del node_to_val_map[n] # node_to_val_map[n] = tmp_val # elif self.policy == 'swap': # # Switch in GPU # if not use_numpy: # if self.swap_queue and (node_in_GPU==None): # swap_node = self.swap_queue[0] # if swap_node in node_to_val_map: # node_to_val_map[swap_node] = ndarray.array(node_to_val_map[swap_node], ctx=self.ctx) # else: # self.node_to_arr_map[swap_node] = ndarray.empty(self.node_to_shape_map[swap_node], ctx=self.ctx) # node_in_GPU = swap_node.id # if node in node_to_val_map: # # Skip placeholder nodes. Values already provided by feed_dict. # continue # # Compute # input_vals = [node_to_val_map[n] for n in node.inputs] # if use_numpy: # node_val = np.empty(shape=self.node_to_shape_map[node]) # else: # node_val = self.node_to_arr_map[node] # # node_val is modified in-place whether np.ndarray or NDArray # node.op.compute(node, input_vals, node_val, use_numpy) # if node.swap == True: # node_to_val_map[node] = node_val.asnumpy() # del node_val # del self.node_to_arr_map[node] # del self.swap_queue[0] # node_in_GPU = None # else: # node_to_val_map[node] = node_val # # Switch out GPU # if not use_numpy: # if self.swap_queue: # if self.swap_queue[0] in node.inputs: # out_node = self.swap_queue.pop(0) # if self.swap_queue: # if not self.swap_queue[0].id == node_in_GPU: # tmp_array = node_to_val_map[out_node].asnumpy() # del node_to_val_map[out_node] # node_to_val_map[out_node] = tmp_array # node_in_GPU = None # else: # if node in node_to_val_map: # # Skip placeholder nodes. Values already provided by feed_dict. # continue # input_vals = [node_to_val_map[n] for n in node.inputs] # # print self.node_to_shape_map[node] # if use_numpy: # node_val = np.empty(shape=self.node_to_shape_map[node]) # else: # node_val = self.node_to_arr_map[node] # # node_val is modified in-place whether np.ndarray or NDArray # # if (len(node.inputs) == 1): # # print "computs",node.inputs[0].name # # else: # # print "computs",node.inputs[0].name,node.inputs[1].name # # print node.name # # print node_val.shape # # print "xxx" # # print node.name # node.op.compute(node, input_vals, node_val, use_numpy) # # print "xxx" # node_to_val_map[node] = node_val # # print "xxx" # if FLAG_SHOW_GRAPH: # FLAG_SHOW_GRAPH = False # # Collect node values. # if not use_numpy and convert_to_numpy_ret_vals: # if self.policy == 'swap': # node_values = [] # for n in self.eval_node_list: # if n.swap == True: # node_values.append(node_to_val_map[n]) # else: # node_values.append(node_to_val_map[n].asnumpy()) # return node_values # elif self.policy == 'vdnn': # return [node_to_val_map[n] for n in self.eval_node_list] # else: # return [node_to_val_map[n].asnumpy() for n in self.eval_node_list] # return [node_to_val_map[n] for n in self.eval_node_list] def gradients(output_node, node_list, scheduler_policy=None): """Take gradient of output node with respect to each node in node_list. Parameters ---------- output_node: output node that we are taking derivative of. node_list: list of nodes that we are taking derivative wrt. Returns ------- A list of gradient values, one for each node in node_list respectively. """ from . import OnesLike node_to_output_grads_list = {} node_to_output_grads_list[output_node] = [ OnesLike.oneslike_op(output_node)] node_to_output_grad = {} # Traverse forward graph in reverse topological order reverse_topo_order = reversed(find_topo_sort([output_node])) for node in reverse_topo_order: output_grad = sum_node_list(node_to_output_grads_list[node]) node_to_output_grad[node] = output_grad input_grads_list = node.op.gradient(node, output_grad) #print len(node.name) #print len(node.inputs) #raw_input("\n\nPress the enter key to exit.") for i in range(len(node.inputs)): if node.inputs[i] not in node_to_output_grads_list: node_to_output_grads_list[node.inputs[i]] = [] # Calculate partial adjoint for input nodes. # print node.name node_to_output_grads_list[node.inputs[i]].append( input_grads_list[i]) if scheduler_policy == 'swap': for node in node_list: if node.swap: node_to_output_grad[node].swap = True grad_node_list = [node_to_output_grad[node] for node in node_list] # grad_node_list = [distributed_communicate_op(node_to_output_grad[node]) for node in node_list] return grad_node_list def distributed_gradients(output_node, node_list, scheduler_policy=None): """Take gradient of output node with respect to each node in node_list. Parameters ---------- output_node: output node that we are taking derivative of. node_list: list of nodes that we are taking derivative wrt. Returns ------- A list of gradient values, one for each node in node_list respectively. """ from .OnesLike import oneslike_op node_to_output_grads_list = {} node_to_output_grads_list[output_node] = [oneslike_op(output_node)] node_to_output_grad = {} # Traverse forward graph in reverse topological order reverse_topo_order = reversed(find_topo_sort([output_node])) for node in reverse_topo_order: output_grad = sum_node_list(node_to_output_grads_list[node]) node_to_output_grad[node] = output_grad input_grads_list = node.op.gradient(node, output_grad) #print len(node.name) #print len(node.inputs) #raw_input("\n\nPress the enter key to exit.") for i in range(len(node.inputs)): if node.inputs[i] not in node_to_output_grads_list: node_to_output_grads_list[node.inputs[i]] = [] # Calculate partial adjoint for input nodes. node_to_output_grads_list[node.inputs[i]].append( input_grads_list[i]) if scheduler_policy == 'swap': for node in node_list: if node.swap: node_to_output_grad[node].swap = True # grad_node_list = [node_to_output_grad[node] for node in node_list] grad_node_list = [distributed_communicate_op( node_to_output_grad[node]) for node in node_list] return grad_node_list ################## # Helper Methods # ################## def find_topo_sort(node_list): """Given a list of nodes, return a topo ordering of nodes ending in them. A simple algorithm is to do a post-order DFS traversal on the given nodes, going backwards based on input edges. Since a node is added to the ordering after all its predecessors are traversed due to post-order DFS, we get a topological sort. """ visited = set() topo_order = [] for node in node_list: topo_sort_dfs(node, visited, topo_order) return topo_order def topo_sort_dfs(node, visited, topo_order): """Post-order DFS""" if node in visited: return visited.add(node) for n in node.inputs: topo_sort_dfs(n, visited, topo_order) topo_order.append(node) def sum_node_list(node_list): """Custom sum func to avoid creating redundant nodes in Python sum func.""" from operator import add from functools import reduce return reduce(add, node_list) def broadcast_rule(shape_a, shape_b): """Return output shape of broadcast shape_a, shape_b. e.g. broadcast_rule((3,2), (4,3,2)) returns output_shape = (4,3,2) Check out explanations and more examples at https://docs.scipy.org/doc/numpy-1.10.0/user/basics.broadcasting.html http://eli.thegreenplace.net/2015/broadcasting-arrays-in-numpy/ """ assert(isinstance(shape_a, tuple)) assert(isinstance(shape_b, tuple)) if len(shape_a) > len(shape_b): longer_shape, shorter_shape = shape_a, shape_b else: longer_shape, shorter_shape = shape_b, shape_a len_diff = len(longer_shape) - len(shorter_shape) for i in range(len_diff): # pad with leading 1s shorter_shape = (1,) + shorter_shape assert len(shorter_shape) == len(longer_shape) output_shape = list(longer_shape) for i in range(len(output_shape)): assert (shorter_shape[i] == longer_shape[i]) \ or (shorter_shape[i] == 1) \ or (longer_shape[i] == 1) output_shape[i] = max(shorter_shape[i], longer_shape[i]) return tuple(output_shape)
distributed_gradients
Take gradient of output node with respect to each node in node_list. Parameters ---------- output_node: output node that we are taking derivative of. node_list: list of nodes that we are taking derivative wrt. Returns ------- A list of gradient values, one for each node in node_list respectively.
""" library to take autodiff and execute a computation graph """ from __future__ import absolute_import import numpy as np from .Node import Op from .. import ndarray from ..stream import * import ctypes import os from pynvml import * FLAG_SHOW_GRAPH = False G_NODE_ID = 0 NAME_RULE = 1 def communicate_init(worker_num, worker_id, source_ip, target_ip): global lib_communicate # lib_communicate.DL_Connect_Init(2, 0, "*:4001", "localhost:4002") # lib_communicate.DL_Connect_Init(2, 1, "*:4002", "localhost:4001") curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) lib_path = os.path.join(curr_path, '../../build/lib/') path_to_so_file = os.path.join(lib_path, "lib_communication.so") lib_communicate = ctypes.cdll.LoadLibrary(path_to_so_file) lib_communicate.DL_Connect_Init( worker_num, worker_id, source_ip, target_ip) def communicate_finish(): lib_communicate.DL_Communicate_Close() class Distributed_CommunicateOp(Op): def __call__(self, nodeA): new_node = Op.__call__(self) new_node.inputs = [nodeA] new_node.name = "Distributed_Communicate(%s)" % (nodeA.name) # print nodeA.name return new_node def compute(self, node, input_vals, output_val, use_numpy=True): after_reduce_gradient_cpu = ndarray.empty( shape=output_val.shape, ctx=ndarray.cpu(0)) if use_numpy: gradient_val_cpu = ndarray.array(input_vals[0], ctx=ndarray.cpu(0)) else: gradient_val_cpu = ndarray.array( input_vals[0].asnumpy(), ctx=ndarray.cpu(0)) # print gradient_val_cpu.asnumpy() lib_communicate.DL_Communicate_Init(gradient_val_cpu.handle) lib_communicate.DL_Communicate( gradient_val_cpu.handle, after_reduce_gradient_cpu.handle) # print after_reduce_gradient_cpu.asnumpy() if use_numpy: output_val[:] = after_reduce_gradient_cpu.asnumpy() else: after_reduce_gradient_cpu.copyto(output_val) def gradient(self, node, output_grad): raise NotImplementedError def infer_shape(self, node, input_shapes): return input_shapes[0] distributed_communicate_op = Distributed_CommunicateOp() class StreamExecutor(object): """Executor computes values for given set of nodes in computation graph.""" def __init__(self, eval_node_list, ctx = None, stream = None, policy = None): """ Parameters ---------- eval_node_list: list of nodes whose values need to be computed. ctx: runtime DLContext, default is None which means np.ndarray on cpu topo_order: list of nodes in topological order node_to_shape_map: dict from node to shape of the node node_to_arr_map: dict from node to ndarray.NDArray allocated for node feed_shapes: shapes of feed_dict from last run(...) """ self.eval_node_list = eval_node_list self.ctx = ctx if stream is None: self.stream = create_stream_handle(ctx) else: self.stream = stream self.stream.sync() self.topo_order = find_topo_sort(self.eval_node_list) self.node_to_shape_map = None self.node_to_arr_map = None self.feed_shapes = None self.policy = policy if self.policy == 'swap': self.swap_queue = [] def infer_shape(self, feed_shapes): """Given shapes of feed_dict nodes, infer shape for all nodes in graph. Implementation note: Iteratively calls node.op.infer_shape to infer shapes. Node shapes stored in self.node_to_shape_map. Parameters ---------- feed_shapes: node->shapes mapping for feed_dict nodes. """ """TODO: Your code here""" self.node_to_shape_map = {} for node in self.topo_order: if node in feed_shapes: self.node_to_shape_map[node] = feed_shapes[node] else: # print(node.name) input_shapes = [self.node_to_shape_map[n] for n in node.inputs] self.node_to_shape_map[node] = node.op.infer_shape( node, input_shapes) def memory_plan(self, feed_shapes): """Allocates ndarray.NDArray for every node except feed_dict nodes. Implementation note: Option 1: Alloc a ndarray.NDArray per node that persists across run() Option 2: Implement a memory pool to reuse memory for nodes of same shapes. More details see Lecture 7. For both options, self.node_to_arr_map stores node->NDArray mapping to allow mapping to persist across multiple executor.run(). Hint: use ndarray.empty(shape, ctx=self.ctx) to allocate NDArray. Parameters ---------- feed_shapes: node->shapes mapping for feed_dict nodes. """ """TODO: Your code here""" assert (self.ctx is not None) # self.infer_shape(feed_shapes) self.node_to_arr_map = {} for node, shape in self.node_to_shape_map.items(): if self.policy == 'swap': if not node.swap: self.node_to_arr_map[node] = ndarray.empty( shape, ctx=self.ctx) elif self.policy == 'vdnn': self.node_to_arr_map[node] = np.empty(shape) else: self.node_to_arr_map[node] = ndarray.empty(shape, ctx=self.ctx) def run(self, feed_dict, convert_to_numpy_ret_vals=False): """ Parameters ---------- feed_dict: a dictionary of node->np.ndarray supplied by user. convert_to_numpy_ret_vals: whether to convert ret vals to np.array Returns ------- A list of values for nodes in eval_node_list. NDArray or np.ndarray. """ def are_feed_shapes_equal(sa, sb): if (not isinstance(sa, dict)) or (not isinstance(sb, dict)): return False unmatched_item = set(sa.items()) ^ set(sb.items()) return len(unmatched_item) == 0 # Assume self.ctx is None implies numpy array and numpy ops. use_numpy = self.ctx is None node_to_val_map = {} for node, value in feed_dict.items(): if use_numpy: # all values passed in feed_dict must be np.ndarray assert isinstance(value, np.ndarray) node_to_val_map[node] = value else: # convert values to ndarray.NDArray if necessary if isinstance(value, np.ndarray): node_to_val_map[node] = ndarray.array(value, ctx=self.ctx) elif isinstance(value, ndarray.NDArray): node_to_val_map[node] = value else: assert False, "feed_dict value type not supported" # print"xxxx" # collect shapes for all placeholders # infer shape if feed_shapes changed since last run # e.g. call run() on test data after trainng # print feed_shapes feed_shapes = {} for node in node_to_val_map: feed_shapes[node] = node_to_val_map[node].shape if(not are_feed_shapes_equal(feed_shapes, self.feed_shapes)): self.infer_shape(feed_shapes) self.feed_shapes = feed_shapes if (not use_numpy): self.memory_plan(self.feed_shapes) for node in self.topo_order: if node in node_to_val_map: continue input_vals = [node_to_val_map[n] for n in node.inputs] if use_numpy: node_val = np.empty(shape=self.node_to_shape_map[node]) else: node_val = self.node_to_arr_map[node] # print(node.name) node.op.compute(node, input_vals, node_val, use_numpy, self.stream) node_to_val_map[node] = node_val self.stream.sync() if not use_numpy and convert_to_numpy_ret_vals: return [node_to_val_map[n].asnumpy() for n in self.eval_node_list] return [node_to_val_map[n] for n in self.eval_node_list] # def run(self, feed_dict, convert_to_numpy_ret_vals=False): # """ # Parameters # ---------- # feed_dict: a dictionary of node->np.ndarray supplied by user. # convert_to_numpy_ret_vals: whether to convert ret vals to np.array # Returns # ------- # A list of values for nodes in eval_node_list. NDArray or np.ndarray. # """ # def are_feed_shapes_equal(sa, sb): # if (not isinstance(sa, dict)) or (not isinstance(sb, dict)): # return False # unmatched_item = set(sa.items()) ^ set(sb.items()) # return len(unmatched_item) == 0 # # Assume self.ctx is None implies numpy array and numpy ops. # use_numpy = self.ctx is None # node_to_val_map = {} # for node, value in feed_dict.items(): # if self.policy == 'vdnn': # assert isinstance(value, np.ndarray) # node_to_val_map[node] = value # else: # if use_numpy: # # all values passed in feed_dict must be np.ndarray # assert isinstance(value, np.ndarray) # node_to_val_map[node] = value # else: # # convert values to ndarray.NDArray if necessary # if isinstance(value, np.ndarray): # if self.policy == 'swap': # if node.swap == True: # node_to_val_map[node] = value # else: # node_to_val_map[node] = ndarray.array(value, ctx=self.ctx) # else: # node_to_val_map[node] = ndarray.array(value, ctx=self.ctx) # elif isinstance(value, ndarray.NDArray): # node_to_val_map[node] = value # else: # assert False, "feed_dict value type not supported" # # collect shapes for all placeholders # feed_shapes = {} # for node in node_to_val_map: # feed_shapes[node] = node_to_val_map[node].shape # # infer shape if feed_shapes changed since last run # # e.g. call run() on test data after trainng # # print feed_shapes # if (not are_feed_shapes_equal(feed_shapes, self.feed_shapes)): # self.infer_shape(feed_shapes) # self.feed_shapes = feed_shapes # if not self.policy == 'vdnn': # # plan memory if using GPU # if (not use_numpy): # self.memory_plan(feed_shapes) # # Traverse graph in topo order and compute values for all nodes. # global FLAG_SHOW_GRAPH # if self.policy == 'swap': # # generate swap queue # if not use_numpy: # for node in self.topo_order: # if node not in node_to_val_map: # # variable in placeholder # for input_node in node.inputs: # if input_node.swap == True: # self.swap_queue.append(input_node) # # variable grad # if node.swap == True: # self.swap_queue.append(node) # node_in_GPU = None # if FLAG_SHOW_GRAPH: # print "Show swap queue:" # for node in self.swap_queue: # print node # elif self.policy == 'vdnn': # # TODO traverse graph to select in-gpu window # window = [0,0] # if not use_numpy: # nvmlInit() # handle = nvmlDeviceGetHandleByIndex(0) # info = nvmlDeviceGetMemoryInfo(handle) # gpu_mem = info.free # nvmlShutdown() # loss_node = self.eval_node_list[0] # window[1] = self.topo_order.index(loss_node)+1 # window[0] = self.topo_order.index(loss_node)+1 # for node in reversed(self.topo_order[:window[1]+1]): # node_size = 4 # float32 # #print node, self.node_to_shape_map[node] # for shape in self.node_to_shape_map[node]: # node_size = node_size * shape # if gpu_mem > node_size: # gpu_mem = gpu_mem - node_size # window[0] = window[0] - 1 # #print "gpu_mem:",gpu_mem # # Traverse graph in topo order and compute values for all nodes. # if FLAG_SHOW_GRAPH: # print "run topo_order" # # Show graph dependency # if FLAG_SHOW_GRAPH: # print "node:",node # print "node.desc:",node.desc # for node in self.topo_order: # if self.policy == 'vdnn': # # Skip placeholder nodes # if node in node_to_val_map: # continue # # H2D before compute # ## Collect inputs # input_vals = [] # for n in node.inputs: # if not use_numpy: # if isinstance(node_to_val_map[n], np.ndarray): # node_to_val_map[n] = ndarray.array(node_to_val_map[n], ctx=self.ctx) # input_vals.append(node_to_val_map[n]) # ## Alloc node space # if use_numpy: # node_val = np.empty(shape=self.node_to_shape_map[node]) # else: # node_val = ndarray.empty(shape=self.node_to_shape_map[node], ctx=self.ctx) # # Compute # # node_val is modified in-place whether np.ndarray or NDArray # node.op.compute(node, input_vals, node_val, use_numpy) # # D2H after compute # if use_numpy: # node_to_val_map[node] = node_val # else: # node_index = self.topo_order.index(node) # if node_index > window[0] and node_index < window[1]: # node_to_val_map[node] = node_val # continue # node_to_val_map[node] = node_val.asnumpy() # del node_val # for n in node.inputs: # if isinstance(node_to_val_map[n], ndarray.NDArray): # tmp_val = node_to_val_map[n].asnumpy() # del node_to_val_map[n] # node_to_val_map[n] = tmp_val # elif self.policy == 'swap': # # Switch in GPU # if not use_numpy: # if self.swap_queue and (node_in_GPU==None): # swap_node = self.swap_queue[0] # if swap_node in node_to_val_map: # node_to_val_map[swap_node] = ndarray.array(node_to_val_map[swap_node], ctx=self.ctx) # else: # self.node_to_arr_map[swap_node] = ndarray.empty(self.node_to_shape_map[swap_node], ctx=self.ctx) # node_in_GPU = swap_node.id # if node in node_to_val_map: # # Skip placeholder nodes. Values already provided by feed_dict. # continue # # Compute # input_vals = [node_to_val_map[n] for n in node.inputs] # if use_numpy: # node_val = np.empty(shape=self.node_to_shape_map[node]) # else: # node_val = self.node_to_arr_map[node] # # node_val is modified in-place whether np.ndarray or NDArray # node.op.compute(node, input_vals, node_val, use_numpy) # if node.swap == True: # node_to_val_map[node] = node_val.asnumpy() # del node_val # del self.node_to_arr_map[node] # del self.swap_queue[0] # node_in_GPU = None # else: # node_to_val_map[node] = node_val # # Switch out GPU # if not use_numpy: # if self.swap_queue: # if self.swap_queue[0] in node.inputs: # out_node = self.swap_queue.pop(0) # if self.swap_queue: # if not self.swap_queue[0].id == node_in_GPU: # tmp_array = node_to_val_map[out_node].asnumpy() # del node_to_val_map[out_node] # node_to_val_map[out_node] = tmp_array # node_in_GPU = None # else: # if node in node_to_val_map: # # Skip placeholder nodes. Values already provided by feed_dict. # continue # input_vals = [node_to_val_map[n] for n in node.inputs] # # print self.node_to_shape_map[node] # if use_numpy: # node_val = np.empty(shape=self.node_to_shape_map[node]) # else: # node_val = self.node_to_arr_map[node] # # node_val is modified in-place whether np.ndarray or NDArray # # if (len(node.inputs) == 1): # # print "computs",node.inputs[0].name # # else: # # print "computs",node.inputs[0].name,node.inputs[1].name # # print node.name # # print node_val.shape # # print "xxx" # # print node.name # node.op.compute(node, input_vals, node_val, use_numpy) # # print "xxx" # node_to_val_map[node] = node_val # # print "xxx" # if FLAG_SHOW_GRAPH: # FLAG_SHOW_GRAPH = False # # Collect node values. # if not use_numpy and convert_to_numpy_ret_vals: # if self.policy == 'swap': # node_values = [] # for n in self.eval_node_list: # if n.swap == True: # node_values.append(node_to_val_map[n]) # else: # node_values.append(node_to_val_map[n].asnumpy()) # return node_values # elif self.policy == 'vdnn': # return [node_to_val_map[n] for n in self.eval_node_list] # else: # return [node_to_val_map[n].asnumpy() for n in self.eval_node_list] # return [node_to_val_map[n] for n in self.eval_node_list] def gradients(output_node, node_list, scheduler_policy=None): """Take gradient of output node with respect to each node in node_list. Parameters ---------- output_node: output node that we are taking derivative of. node_list: list of nodes that we are taking derivative wrt. Returns ------- A list of gradient values, one for each node in node_list respectively. """ from . import OnesLike node_to_output_grads_list = {} node_to_output_grads_list[output_node] = [ OnesLike.oneslike_op(output_node)] node_to_output_grad = {} # Traverse forward graph in reverse topological order reverse_topo_order = reversed(find_topo_sort([output_node])) for node in reverse_topo_order: output_grad = sum_node_list(node_to_output_grads_list[node]) node_to_output_grad[node] = output_grad input_grads_list = node.op.gradient(node, output_grad) #print len(node.name) #print len(node.inputs) #raw_input("\n\nPress the enter key to exit.") for i in range(len(node.inputs)): if node.inputs[i] not in node_to_output_grads_list: node_to_output_grads_list[node.inputs[i]] = [] # Calculate partial adjoint for input nodes. # print node.name node_to_output_grads_list[node.inputs[i]].append( input_grads_list[i]) if scheduler_policy == 'swap': for node in node_list: if node.swap: node_to_output_grad[node].swap = True grad_node_list = [node_to_output_grad[node] for node in node_list] # grad_node_list = [distributed_communicate_op(node_to_output_grad[node]) for node in node_list] return grad_node_list # MASKED: distributed_gradients function (lines 491-531) ################## # Helper Methods # ################## def find_topo_sort(node_list): """Given a list of nodes, return a topo ordering of nodes ending in them. A simple algorithm is to do a post-order DFS traversal on the given nodes, going backwards based on input edges. Since a node is added to the ordering after all its predecessors are traversed due to post-order DFS, we get a topological sort. """ visited = set() topo_order = [] for node in node_list: topo_sort_dfs(node, visited, topo_order) return topo_order def topo_sort_dfs(node, visited, topo_order): """Post-order DFS""" if node in visited: return visited.add(node) for n in node.inputs: topo_sort_dfs(n, visited, topo_order) topo_order.append(node) def sum_node_list(node_list): """Custom sum func to avoid creating redundant nodes in Python sum func.""" from operator import add from functools import reduce return reduce(add, node_list) def broadcast_rule(shape_a, shape_b): """Return output shape of broadcast shape_a, shape_b. e.g. broadcast_rule((3,2), (4,3,2)) returns output_shape = (4,3,2) Check out explanations and more examples at https://docs.scipy.org/doc/numpy-1.10.0/user/basics.broadcasting.html http://eli.thegreenplace.net/2015/broadcasting-arrays-in-numpy/ """ assert(isinstance(shape_a, tuple)) assert(isinstance(shape_b, tuple)) if len(shape_a) > len(shape_b): longer_shape, shorter_shape = shape_a, shape_b else: longer_shape, shorter_shape = shape_b, shape_a len_diff = len(longer_shape) - len(shorter_shape) for i in range(len_diff): # pad with leading 1s shorter_shape = (1,) + shorter_shape assert len(shorter_shape) == len(longer_shape) output_shape = list(longer_shape) for i in range(len(output_shape)): assert (shorter_shape[i] == longer_shape[i]) \ or (shorter_shape[i] == 1) \ or (longer_shape[i] == 1) output_shape[i] = max(shorter_shape[i], longer_shape[i]) return tuple(output_shape)
def distributed_gradients(output_node, node_list, scheduler_policy=None): """Take gradient of output node with respect to each node in node_list. Parameters ---------- output_node: output node that we are taking derivative of. node_list: list of nodes that we are taking derivative wrt. Returns ------- A list of gradient values, one for each node in node_list respectively. """ from .OnesLike import oneslike_op node_to_output_grads_list = {} node_to_output_grads_list[output_node] = [oneslike_op(output_node)] node_to_output_grad = {} # Traverse forward graph in reverse topological order reverse_topo_order = reversed(find_topo_sort([output_node])) for node in reverse_topo_order: output_grad = sum_node_list(node_to_output_grads_list[node]) node_to_output_grad[node] = output_grad input_grads_list = node.op.gradient(node, output_grad) #print len(node.name) #print len(node.inputs) #raw_input("\n\nPress the enter key to exit.") for i in range(len(node.inputs)): if node.inputs[i] not in node_to_output_grads_list: node_to_output_grads_list[node.inputs[i]] = [] # Calculate partial adjoint for input nodes. node_to_output_grads_list[node.inputs[i]].append( input_grads_list[i]) if scheduler_policy == 'swap': for node in node_list: if node.swap: node_to_output_grad[node].swap = True # grad_node_list = [node_to_output_grad[node] for node in node_list] grad_node_list = [distributed_communicate_op( node_to_output_grad[node]) for node in node_list] return grad_node_list
491
531
""" library to take autodiff and execute a computation graph """ from __future__ import absolute_import import numpy as np from .Node import Op from .. import ndarray from ..stream import * import ctypes import os from pynvml import * FLAG_SHOW_GRAPH = False G_NODE_ID = 0 NAME_RULE = 1 def communicate_init(worker_num, worker_id, source_ip, target_ip): global lib_communicate # lib_communicate.DL_Connect_Init(2, 0, "*:4001", "localhost:4002") # lib_communicate.DL_Connect_Init(2, 1, "*:4002", "localhost:4001") curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) lib_path = os.path.join(curr_path, '../../build/lib/') path_to_so_file = os.path.join(lib_path, "lib_communication.so") lib_communicate = ctypes.cdll.LoadLibrary(path_to_so_file) lib_communicate.DL_Connect_Init( worker_num, worker_id, source_ip, target_ip) def communicate_finish(): lib_communicate.DL_Communicate_Close() class Distributed_CommunicateOp(Op): def __call__(self, nodeA): new_node = Op.__call__(self) new_node.inputs = [nodeA] new_node.name = "Distributed_Communicate(%s)" % (nodeA.name) # print nodeA.name return new_node def compute(self, node, input_vals, output_val, use_numpy=True): after_reduce_gradient_cpu = ndarray.empty( shape=output_val.shape, ctx=ndarray.cpu(0)) if use_numpy: gradient_val_cpu = ndarray.array(input_vals[0], ctx=ndarray.cpu(0)) else: gradient_val_cpu = ndarray.array( input_vals[0].asnumpy(), ctx=ndarray.cpu(0)) # print gradient_val_cpu.asnumpy() lib_communicate.DL_Communicate_Init(gradient_val_cpu.handle) lib_communicate.DL_Communicate( gradient_val_cpu.handle, after_reduce_gradient_cpu.handle) # print after_reduce_gradient_cpu.asnumpy() if use_numpy: output_val[:] = after_reduce_gradient_cpu.asnumpy() else: after_reduce_gradient_cpu.copyto(output_val) def gradient(self, node, output_grad): raise NotImplementedError def infer_shape(self, node, input_shapes): return input_shapes[0] distributed_communicate_op = Distributed_CommunicateOp() class StreamExecutor(object): """Executor computes values for given set of nodes in computation graph.""" def __init__(self, eval_node_list, ctx = None, stream = None, policy = None): """ Parameters ---------- eval_node_list: list of nodes whose values need to be computed. ctx: runtime DLContext, default is None which means np.ndarray on cpu topo_order: list of nodes in topological order node_to_shape_map: dict from node to shape of the node node_to_arr_map: dict from node to ndarray.NDArray allocated for node feed_shapes: shapes of feed_dict from last run(...) """ self.eval_node_list = eval_node_list self.ctx = ctx if stream is None: self.stream = create_stream_handle(ctx) else: self.stream = stream self.stream.sync() self.topo_order = find_topo_sort(self.eval_node_list) self.node_to_shape_map = None self.node_to_arr_map = None self.feed_shapes = None self.policy = policy if self.policy == 'swap': self.swap_queue = [] def infer_shape(self, feed_shapes): """Given shapes of feed_dict nodes, infer shape for all nodes in graph. Implementation note: Iteratively calls node.op.infer_shape to infer shapes. Node shapes stored in self.node_to_shape_map. Parameters ---------- feed_shapes: node->shapes mapping for feed_dict nodes. """ """TODO: Your code here""" self.node_to_shape_map = {} for node in self.topo_order: if node in feed_shapes: self.node_to_shape_map[node] = feed_shapes[node] else: # print(node.name) input_shapes = [self.node_to_shape_map[n] for n in node.inputs] self.node_to_shape_map[node] = node.op.infer_shape( node, input_shapes) def memory_plan(self, feed_shapes): """Allocates ndarray.NDArray for every node except feed_dict nodes. Implementation note: Option 1: Alloc a ndarray.NDArray per node that persists across run() Option 2: Implement a memory pool to reuse memory for nodes of same shapes. More details see Lecture 7. For both options, self.node_to_arr_map stores node->NDArray mapping to allow mapping to persist across multiple executor.run(). Hint: use ndarray.empty(shape, ctx=self.ctx) to allocate NDArray. Parameters ---------- feed_shapes: node->shapes mapping for feed_dict nodes. """ """TODO: Your code here""" assert (self.ctx is not None) # self.infer_shape(feed_shapes) self.node_to_arr_map = {} for node, shape in self.node_to_shape_map.items(): if self.policy == 'swap': if not node.swap: self.node_to_arr_map[node] = ndarray.empty( shape, ctx=self.ctx) elif self.policy == 'vdnn': self.node_to_arr_map[node] = np.empty(shape) else: self.node_to_arr_map[node] = ndarray.empty(shape, ctx=self.ctx) def run(self, feed_dict, convert_to_numpy_ret_vals=False): """ Parameters ---------- feed_dict: a dictionary of node->np.ndarray supplied by user. convert_to_numpy_ret_vals: whether to convert ret vals to np.array Returns ------- A list of values for nodes in eval_node_list. NDArray or np.ndarray. """ def are_feed_shapes_equal(sa, sb): if (not isinstance(sa, dict)) or (not isinstance(sb, dict)): return False unmatched_item = set(sa.items()) ^ set(sb.items()) return len(unmatched_item) == 0 # Assume self.ctx is None implies numpy array and numpy ops. use_numpy = self.ctx is None node_to_val_map = {} for node, value in feed_dict.items(): if use_numpy: # all values passed in feed_dict must be np.ndarray assert isinstance(value, np.ndarray) node_to_val_map[node] = value else: # convert values to ndarray.NDArray if necessary if isinstance(value, np.ndarray): node_to_val_map[node] = ndarray.array(value, ctx=self.ctx) elif isinstance(value, ndarray.NDArray): node_to_val_map[node] = value else: assert False, "feed_dict value type not supported" # print"xxxx" # collect shapes for all placeholders # infer shape if feed_shapes changed since last run # e.g. call run() on test data after trainng # print feed_shapes feed_shapes = {} for node in node_to_val_map: feed_shapes[node] = node_to_val_map[node].shape if(not are_feed_shapes_equal(feed_shapes, self.feed_shapes)): self.infer_shape(feed_shapes) self.feed_shapes = feed_shapes if (not use_numpy): self.memory_plan(self.feed_shapes) for node in self.topo_order: if node in node_to_val_map: continue input_vals = [node_to_val_map[n] for n in node.inputs] if use_numpy: node_val = np.empty(shape=self.node_to_shape_map[node]) else: node_val = self.node_to_arr_map[node] # print(node.name) node.op.compute(node, input_vals, node_val, use_numpy, self.stream) node_to_val_map[node] = node_val self.stream.sync() if not use_numpy and convert_to_numpy_ret_vals: return [node_to_val_map[n].asnumpy() for n in self.eval_node_list] return [node_to_val_map[n] for n in self.eval_node_list] # def run(self, feed_dict, convert_to_numpy_ret_vals=False): # """ # Parameters # ---------- # feed_dict: a dictionary of node->np.ndarray supplied by user. # convert_to_numpy_ret_vals: whether to convert ret vals to np.array # Returns # ------- # A list of values for nodes in eval_node_list. NDArray or np.ndarray. # """ # def are_feed_shapes_equal(sa, sb): # if (not isinstance(sa, dict)) or (not isinstance(sb, dict)): # return False # unmatched_item = set(sa.items()) ^ set(sb.items()) # return len(unmatched_item) == 0 # # Assume self.ctx is None implies numpy array and numpy ops. # use_numpy = self.ctx is None # node_to_val_map = {} # for node, value in feed_dict.items(): # if self.policy == 'vdnn': # assert isinstance(value, np.ndarray) # node_to_val_map[node] = value # else: # if use_numpy: # # all values passed in feed_dict must be np.ndarray # assert isinstance(value, np.ndarray) # node_to_val_map[node] = value # else: # # convert values to ndarray.NDArray if necessary # if isinstance(value, np.ndarray): # if self.policy == 'swap': # if node.swap == True: # node_to_val_map[node] = value # else: # node_to_val_map[node] = ndarray.array(value, ctx=self.ctx) # else: # node_to_val_map[node] = ndarray.array(value, ctx=self.ctx) # elif isinstance(value, ndarray.NDArray): # node_to_val_map[node] = value # else: # assert False, "feed_dict value type not supported" # # collect shapes for all placeholders # feed_shapes = {} # for node in node_to_val_map: # feed_shapes[node] = node_to_val_map[node].shape # # infer shape if feed_shapes changed since last run # # e.g. call run() on test data after trainng # # print feed_shapes # if (not are_feed_shapes_equal(feed_shapes, self.feed_shapes)): # self.infer_shape(feed_shapes) # self.feed_shapes = feed_shapes # if not self.policy == 'vdnn': # # plan memory if using GPU # if (not use_numpy): # self.memory_plan(feed_shapes) # # Traverse graph in topo order and compute values for all nodes. # global FLAG_SHOW_GRAPH # if self.policy == 'swap': # # generate swap queue # if not use_numpy: # for node in self.topo_order: # if node not in node_to_val_map: # # variable in placeholder # for input_node in node.inputs: # if input_node.swap == True: # self.swap_queue.append(input_node) # # variable grad # if node.swap == True: # self.swap_queue.append(node) # node_in_GPU = None # if FLAG_SHOW_GRAPH: # print "Show swap queue:" # for node in self.swap_queue: # print node # elif self.policy == 'vdnn': # # TODO traverse graph to select in-gpu window # window = [0,0] # if not use_numpy: # nvmlInit() # handle = nvmlDeviceGetHandleByIndex(0) # info = nvmlDeviceGetMemoryInfo(handle) # gpu_mem = info.free # nvmlShutdown() # loss_node = self.eval_node_list[0] # window[1] = self.topo_order.index(loss_node)+1 # window[0] = self.topo_order.index(loss_node)+1 # for node in reversed(self.topo_order[:window[1]+1]): # node_size = 4 # float32 # #print node, self.node_to_shape_map[node] # for shape in self.node_to_shape_map[node]: # node_size = node_size * shape # if gpu_mem > node_size: # gpu_mem = gpu_mem - node_size # window[0] = window[0] - 1 # #print "gpu_mem:",gpu_mem # # Traverse graph in topo order and compute values for all nodes. # if FLAG_SHOW_GRAPH: # print "run topo_order" # # Show graph dependency # if FLAG_SHOW_GRAPH: # print "node:",node # print "node.desc:",node.desc # for node in self.topo_order: # if self.policy == 'vdnn': # # Skip placeholder nodes # if node in node_to_val_map: # continue # # H2D before compute # ## Collect inputs # input_vals = [] # for n in node.inputs: # if not use_numpy: # if isinstance(node_to_val_map[n], np.ndarray): # node_to_val_map[n] = ndarray.array(node_to_val_map[n], ctx=self.ctx) # input_vals.append(node_to_val_map[n]) # ## Alloc node space # if use_numpy: # node_val = np.empty(shape=self.node_to_shape_map[node]) # else: # node_val = ndarray.empty(shape=self.node_to_shape_map[node], ctx=self.ctx) # # Compute # # node_val is modified in-place whether np.ndarray or NDArray # node.op.compute(node, input_vals, node_val, use_numpy) # # D2H after compute # if use_numpy: # node_to_val_map[node] = node_val # else: # node_index = self.topo_order.index(node) # if node_index > window[0] and node_index < window[1]: # node_to_val_map[node] = node_val # continue # node_to_val_map[node] = node_val.asnumpy() # del node_val # for n in node.inputs: # if isinstance(node_to_val_map[n], ndarray.NDArray): # tmp_val = node_to_val_map[n].asnumpy() # del node_to_val_map[n] # node_to_val_map[n] = tmp_val # elif self.policy == 'swap': # # Switch in GPU # if not use_numpy: # if self.swap_queue and (node_in_GPU==None): # swap_node = self.swap_queue[0] # if swap_node in node_to_val_map: # node_to_val_map[swap_node] = ndarray.array(node_to_val_map[swap_node], ctx=self.ctx) # else: # self.node_to_arr_map[swap_node] = ndarray.empty(self.node_to_shape_map[swap_node], ctx=self.ctx) # node_in_GPU = swap_node.id # if node in node_to_val_map: # # Skip placeholder nodes. Values already provided by feed_dict. # continue # # Compute # input_vals = [node_to_val_map[n] for n in node.inputs] # if use_numpy: # node_val = np.empty(shape=self.node_to_shape_map[node]) # else: # node_val = self.node_to_arr_map[node] # # node_val is modified in-place whether np.ndarray or NDArray # node.op.compute(node, input_vals, node_val, use_numpy) # if node.swap == True: # node_to_val_map[node] = node_val.asnumpy() # del node_val # del self.node_to_arr_map[node] # del self.swap_queue[0] # node_in_GPU = None # else: # node_to_val_map[node] = node_val # # Switch out GPU # if not use_numpy: # if self.swap_queue: # if self.swap_queue[0] in node.inputs: # out_node = self.swap_queue.pop(0) # if self.swap_queue: # if not self.swap_queue[0].id == node_in_GPU: # tmp_array = node_to_val_map[out_node].asnumpy() # del node_to_val_map[out_node] # node_to_val_map[out_node] = tmp_array # node_in_GPU = None # else: # if node in node_to_val_map: # # Skip placeholder nodes. Values already provided by feed_dict. # continue # input_vals = [node_to_val_map[n] for n in node.inputs] # # print self.node_to_shape_map[node] # if use_numpy: # node_val = np.empty(shape=self.node_to_shape_map[node]) # else: # node_val = self.node_to_arr_map[node] # # node_val is modified in-place whether np.ndarray or NDArray # # if (len(node.inputs) == 1): # # print "computs",node.inputs[0].name # # else: # # print "computs",node.inputs[0].name,node.inputs[1].name # # print node.name # # print node_val.shape # # print "xxx" # # print node.name # node.op.compute(node, input_vals, node_val, use_numpy) # # print "xxx" # node_to_val_map[node] = node_val # # print "xxx" # if FLAG_SHOW_GRAPH: # FLAG_SHOW_GRAPH = False # # Collect node values. # if not use_numpy and convert_to_numpy_ret_vals: # if self.policy == 'swap': # node_values = [] # for n in self.eval_node_list: # if n.swap == True: # node_values.append(node_to_val_map[n]) # else: # node_values.append(node_to_val_map[n].asnumpy()) # return node_values # elif self.policy == 'vdnn': # return [node_to_val_map[n] for n in self.eval_node_list] # else: # return [node_to_val_map[n].asnumpy() for n in self.eval_node_list] # return [node_to_val_map[n] for n in self.eval_node_list] def gradients(output_node, node_list, scheduler_policy=None): """Take gradient of output node with respect to each node in node_list. Parameters ---------- output_node: output node that we are taking derivative of. node_list: list of nodes that we are taking derivative wrt. Returns ------- A list of gradient values, one for each node in node_list respectively. """ from . import OnesLike node_to_output_grads_list = {} node_to_output_grads_list[output_node] = [ OnesLike.oneslike_op(output_node)] node_to_output_grad = {} # Traverse forward graph in reverse topological order reverse_topo_order = reversed(find_topo_sort([output_node])) for node in reverse_topo_order: output_grad = sum_node_list(node_to_output_grads_list[node]) node_to_output_grad[node] = output_grad input_grads_list = node.op.gradient(node, output_grad) #print len(node.name) #print len(node.inputs) #raw_input("\n\nPress the enter key to exit.") for i in range(len(node.inputs)): if node.inputs[i] not in node_to_output_grads_list: node_to_output_grads_list[node.inputs[i]] = [] # Calculate partial adjoint for input nodes. # print node.name node_to_output_grads_list[node.inputs[i]].append( input_grads_list[i]) if scheduler_policy == 'swap': for node in node_list: if node.swap: node_to_output_grad[node].swap = True grad_node_list = [node_to_output_grad[node] for node in node_list] # grad_node_list = [distributed_communicate_op(node_to_output_grad[node]) for node in node_list] return grad_node_list def distributed_gradients(output_node, node_list, scheduler_policy=None): """Take gradient of output node with respect to each node in node_list. Parameters ---------- output_node: output node that we are taking derivative of. node_list: list of nodes that we are taking derivative wrt. Returns ------- A list of gradient values, one for each node in node_list respectively. """ from .OnesLike import oneslike_op node_to_output_grads_list = {} node_to_output_grads_list[output_node] = [oneslike_op(output_node)] node_to_output_grad = {} # Traverse forward graph in reverse topological order reverse_topo_order = reversed(find_topo_sort([output_node])) for node in reverse_topo_order: output_grad = sum_node_list(node_to_output_grads_list[node]) node_to_output_grad[node] = output_grad input_grads_list = node.op.gradient(node, output_grad) #print len(node.name) #print len(node.inputs) #raw_input("\n\nPress the enter key to exit.") for i in range(len(node.inputs)): if node.inputs[i] not in node_to_output_grads_list: node_to_output_grads_list[node.inputs[i]] = [] # Calculate partial adjoint for input nodes. node_to_output_grads_list[node.inputs[i]].append( input_grads_list[i]) if scheduler_policy == 'swap': for node in node_list: if node.swap: node_to_output_grad[node].swap = True # grad_node_list = [node_to_output_grad[node] for node in node_list] grad_node_list = [distributed_communicate_op( node_to_output_grad[node]) for node in node_list] return grad_node_list ################## # Helper Methods # ################## def find_topo_sort(node_list): """Given a list of nodes, return a topo ordering of nodes ending in them. A simple algorithm is to do a post-order DFS traversal on the given nodes, going backwards based on input edges. Since a node is added to the ordering after all its predecessors are traversed due to post-order DFS, we get a topological sort. """ visited = set() topo_order = [] for node in node_list: topo_sort_dfs(node, visited, topo_order) return topo_order def topo_sort_dfs(node, visited, topo_order): """Post-order DFS""" if node in visited: return visited.add(node) for n in node.inputs: topo_sort_dfs(n, visited, topo_order) topo_order.append(node) def sum_node_list(node_list): """Custom sum func to avoid creating redundant nodes in Python sum func.""" from operator import add from functools import reduce return reduce(add, node_list) def broadcast_rule(shape_a, shape_b): """Return output shape of broadcast shape_a, shape_b. e.g. broadcast_rule((3,2), (4,3,2)) returns output_shape = (4,3,2) Check out explanations and more examples at https://docs.scipy.org/doc/numpy-1.10.0/user/basics.broadcasting.html http://eli.thegreenplace.net/2015/broadcasting-arrays-in-numpy/ """ assert(isinstance(shape_a, tuple)) assert(isinstance(shape_b, tuple)) if len(shape_a) > len(shape_b): longer_shape, shorter_shape = shape_a, shape_b else: longer_shape, shorter_shape = shape_b, shape_a len_diff = len(longer_shape) - len(shorter_shape) for i in range(len_diff): # pad with leading 1s shorter_shape = (1,) + shorter_shape assert len(shorter_shape) == len(longer_shape) output_shape = list(longer_shape) for i in range(len(output_shape)): assert (shorter_shape[i] == longer_shape[i]) \ or (shorter_shape[i] == 1) \ or (longer_shape[i] == 1) output_shape[i] = max(shorter_shape[i], longer_shape[i]) return tuple(output_shape)
find_topo_sort
Given a list of nodes, return a topo ordering of nodes ending in them. A simple algorithm is to do a post-order DFS traversal on the given nodes, going backwards based on input edges. Since a node is added to the ordering after all its predecessors are traversed due to post-order DFS, we get a topological sort.
""" library to take autodiff and execute a computation graph """ from __future__ import absolute_import import numpy as np from .Node import Op from .. import ndarray from ..stream import * import ctypes import os from pynvml import * FLAG_SHOW_GRAPH = False G_NODE_ID = 0 NAME_RULE = 1 def communicate_init(worker_num, worker_id, source_ip, target_ip): global lib_communicate # lib_communicate.DL_Connect_Init(2, 0, "*:4001", "localhost:4002") # lib_communicate.DL_Connect_Init(2, 1, "*:4002", "localhost:4001") curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) lib_path = os.path.join(curr_path, '../../build/lib/') path_to_so_file = os.path.join(lib_path, "lib_communication.so") lib_communicate = ctypes.cdll.LoadLibrary(path_to_so_file) lib_communicate.DL_Connect_Init( worker_num, worker_id, source_ip, target_ip) def communicate_finish(): lib_communicate.DL_Communicate_Close() class Distributed_CommunicateOp(Op): def __call__(self, nodeA): new_node = Op.__call__(self) new_node.inputs = [nodeA] new_node.name = "Distributed_Communicate(%s)" % (nodeA.name) # print nodeA.name return new_node def compute(self, node, input_vals, output_val, use_numpy=True): after_reduce_gradient_cpu = ndarray.empty( shape=output_val.shape, ctx=ndarray.cpu(0)) if use_numpy: gradient_val_cpu = ndarray.array(input_vals[0], ctx=ndarray.cpu(0)) else: gradient_val_cpu = ndarray.array( input_vals[0].asnumpy(), ctx=ndarray.cpu(0)) # print gradient_val_cpu.asnumpy() lib_communicate.DL_Communicate_Init(gradient_val_cpu.handle) lib_communicate.DL_Communicate( gradient_val_cpu.handle, after_reduce_gradient_cpu.handle) # print after_reduce_gradient_cpu.asnumpy() if use_numpy: output_val[:] = after_reduce_gradient_cpu.asnumpy() else: after_reduce_gradient_cpu.copyto(output_val) def gradient(self, node, output_grad): raise NotImplementedError def infer_shape(self, node, input_shapes): return input_shapes[0] distributed_communicate_op = Distributed_CommunicateOp() class StreamExecutor(object): """Executor computes values for given set of nodes in computation graph.""" def __init__(self, eval_node_list, ctx = None, stream = None, policy = None): """ Parameters ---------- eval_node_list: list of nodes whose values need to be computed. ctx: runtime DLContext, default is None which means np.ndarray on cpu topo_order: list of nodes in topological order node_to_shape_map: dict from node to shape of the node node_to_arr_map: dict from node to ndarray.NDArray allocated for node feed_shapes: shapes of feed_dict from last run(...) """ self.eval_node_list = eval_node_list self.ctx = ctx if stream is None: self.stream = create_stream_handle(ctx) else: self.stream = stream self.stream.sync() self.topo_order = find_topo_sort(self.eval_node_list) self.node_to_shape_map = None self.node_to_arr_map = None self.feed_shapes = None self.policy = policy if self.policy == 'swap': self.swap_queue = [] def infer_shape(self, feed_shapes): """Given shapes of feed_dict nodes, infer shape for all nodes in graph. Implementation note: Iteratively calls node.op.infer_shape to infer shapes. Node shapes stored in self.node_to_shape_map. Parameters ---------- feed_shapes: node->shapes mapping for feed_dict nodes. """ """TODO: Your code here""" self.node_to_shape_map = {} for node in self.topo_order: if node in feed_shapes: self.node_to_shape_map[node] = feed_shapes[node] else: # print(node.name) input_shapes = [self.node_to_shape_map[n] for n in node.inputs] self.node_to_shape_map[node] = node.op.infer_shape( node, input_shapes) def memory_plan(self, feed_shapes): """Allocates ndarray.NDArray for every node except feed_dict nodes. Implementation note: Option 1: Alloc a ndarray.NDArray per node that persists across run() Option 2: Implement a memory pool to reuse memory for nodes of same shapes. More details see Lecture 7. For both options, self.node_to_arr_map stores node->NDArray mapping to allow mapping to persist across multiple executor.run(). Hint: use ndarray.empty(shape, ctx=self.ctx) to allocate NDArray. Parameters ---------- feed_shapes: node->shapes mapping for feed_dict nodes. """ """TODO: Your code here""" assert (self.ctx is not None) # self.infer_shape(feed_shapes) self.node_to_arr_map = {} for node, shape in self.node_to_shape_map.items(): if self.policy == 'swap': if not node.swap: self.node_to_arr_map[node] = ndarray.empty( shape, ctx=self.ctx) elif self.policy == 'vdnn': self.node_to_arr_map[node] = np.empty(shape) else: self.node_to_arr_map[node] = ndarray.empty(shape, ctx=self.ctx) def run(self, feed_dict, convert_to_numpy_ret_vals=False): """ Parameters ---------- feed_dict: a dictionary of node->np.ndarray supplied by user. convert_to_numpy_ret_vals: whether to convert ret vals to np.array Returns ------- A list of values for nodes in eval_node_list. NDArray or np.ndarray. """ def are_feed_shapes_equal(sa, sb): if (not isinstance(sa, dict)) or (not isinstance(sb, dict)): return False unmatched_item = set(sa.items()) ^ set(sb.items()) return len(unmatched_item) == 0 # Assume self.ctx is None implies numpy array and numpy ops. use_numpy = self.ctx is None node_to_val_map = {} for node, value in feed_dict.items(): if use_numpy: # all values passed in feed_dict must be np.ndarray assert isinstance(value, np.ndarray) node_to_val_map[node] = value else: # convert values to ndarray.NDArray if necessary if isinstance(value, np.ndarray): node_to_val_map[node] = ndarray.array(value, ctx=self.ctx) elif isinstance(value, ndarray.NDArray): node_to_val_map[node] = value else: assert False, "feed_dict value type not supported" # print"xxxx" # collect shapes for all placeholders # infer shape if feed_shapes changed since last run # e.g. call run() on test data after trainng # print feed_shapes feed_shapes = {} for node in node_to_val_map: feed_shapes[node] = node_to_val_map[node].shape if(not are_feed_shapes_equal(feed_shapes, self.feed_shapes)): self.infer_shape(feed_shapes) self.feed_shapes = feed_shapes if (not use_numpy): self.memory_plan(self.feed_shapes) for node in self.topo_order: if node in node_to_val_map: continue input_vals = [node_to_val_map[n] for n in node.inputs] if use_numpy: node_val = np.empty(shape=self.node_to_shape_map[node]) else: node_val = self.node_to_arr_map[node] # print(node.name) node.op.compute(node, input_vals, node_val, use_numpy, self.stream) node_to_val_map[node] = node_val self.stream.sync() if not use_numpy and convert_to_numpy_ret_vals: return [node_to_val_map[n].asnumpy() for n in self.eval_node_list] return [node_to_val_map[n] for n in self.eval_node_list] # def run(self, feed_dict, convert_to_numpy_ret_vals=False): # """ # Parameters # ---------- # feed_dict: a dictionary of node->np.ndarray supplied by user. # convert_to_numpy_ret_vals: whether to convert ret vals to np.array # Returns # ------- # A list of values for nodes in eval_node_list. NDArray or np.ndarray. # """ # def are_feed_shapes_equal(sa, sb): # if (not isinstance(sa, dict)) or (not isinstance(sb, dict)): # return False # unmatched_item = set(sa.items()) ^ set(sb.items()) # return len(unmatched_item) == 0 # # Assume self.ctx is None implies numpy array and numpy ops. # use_numpy = self.ctx is None # node_to_val_map = {} # for node, value in feed_dict.items(): # if self.policy == 'vdnn': # assert isinstance(value, np.ndarray) # node_to_val_map[node] = value # else: # if use_numpy: # # all values passed in feed_dict must be np.ndarray # assert isinstance(value, np.ndarray) # node_to_val_map[node] = value # else: # # convert values to ndarray.NDArray if necessary # if isinstance(value, np.ndarray): # if self.policy == 'swap': # if node.swap == True: # node_to_val_map[node] = value # else: # node_to_val_map[node] = ndarray.array(value, ctx=self.ctx) # else: # node_to_val_map[node] = ndarray.array(value, ctx=self.ctx) # elif isinstance(value, ndarray.NDArray): # node_to_val_map[node] = value # else: # assert False, "feed_dict value type not supported" # # collect shapes for all placeholders # feed_shapes = {} # for node in node_to_val_map: # feed_shapes[node] = node_to_val_map[node].shape # # infer shape if feed_shapes changed since last run # # e.g. call run() on test data after trainng # # print feed_shapes # if (not are_feed_shapes_equal(feed_shapes, self.feed_shapes)): # self.infer_shape(feed_shapes) # self.feed_shapes = feed_shapes # if not self.policy == 'vdnn': # # plan memory if using GPU # if (not use_numpy): # self.memory_plan(feed_shapes) # # Traverse graph in topo order and compute values for all nodes. # global FLAG_SHOW_GRAPH # if self.policy == 'swap': # # generate swap queue # if not use_numpy: # for node in self.topo_order: # if node not in node_to_val_map: # # variable in placeholder # for input_node in node.inputs: # if input_node.swap == True: # self.swap_queue.append(input_node) # # variable grad # if node.swap == True: # self.swap_queue.append(node) # node_in_GPU = None # if FLAG_SHOW_GRAPH: # print "Show swap queue:" # for node in self.swap_queue: # print node # elif self.policy == 'vdnn': # # TODO traverse graph to select in-gpu window # window = [0,0] # if not use_numpy: # nvmlInit() # handle = nvmlDeviceGetHandleByIndex(0) # info = nvmlDeviceGetMemoryInfo(handle) # gpu_mem = info.free # nvmlShutdown() # loss_node = self.eval_node_list[0] # window[1] = self.topo_order.index(loss_node)+1 # window[0] = self.topo_order.index(loss_node)+1 # for node in reversed(self.topo_order[:window[1]+1]): # node_size = 4 # float32 # #print node, self.node_to_shape_map[node] # for shape in self.node_to_shape_map[node]: # node_size = node_size * shape # if gpu_mem > node_size: # gpu_mem = gpu_mem - node_size # window[0] = window[0] - 1 # #print "gpu_mem:",gpu_mem # # Traverse graph in topo order and compute values for all nodes. # if FLAG_SHOW_GRAPH: # print "run topo_order" # # Show graph dependency # if FLAG_SHOW_GRAPH: # print "node:",node # print "node.desc:",node.desc # for node in self.topo_order: # if self.policy == 'vdnn': # # Skip placeholder nodes # if node in node_to_val_map: # continue # # H2D before compute # ## Collect inputs # input_vals = [] # for n in node.inputs: # if not use_numpy: # if isinstance(node_to_val_map[n], np.ndarray): # node_to_val_map[n] = ndarray.array(node_to_val_map[n], ctx=self.ctx) # input_vals.append(node_to_val_map[n]) # ## Alloc node space # if use_numpy: # node_val = np.empty(shape=self.node_to_shape_map[node]) # else: # node_val = ndarray.empty(shape=self.node_to_shape_map[node], ctx=self.ctx) # # Compute # # node_val is modified in-place whether np.ndarray or NDArray # node.op.compute(node, input_vals, node_val, use_numpy) # # D2H after compute # if use_numpy: # node_to_val_map[node] = node_val # else: # node_index = self.topo_order.index(node) # if node_index > window[0] and node_index < window[1]: # node_to_val_map[node] = node_val # continue # node_to_val_map[node] = node_val.asnumpy() # del node_val # for n in node.inputs: # if isinstance(node_to_val_map[n], ndarray.NDArray): # tmp_val = node_to_val_map[n].asnumpy() # del node_to_val_map[n] # node_to_val_map[n] = tmp_val # elif self.policy == 'swap': # # Switch in GPU # if not use_numpy: # if self.swap_queue and (node_in_GPU==None): # swap_node = self.swap_queue[0] # if swap_node in node_to_val_map: # node_to_val_map[swap_node] = ndarray.array(node_to_val_map[swap_node], ctx=self.ctx) # else: # self.node_to_arr_map[swap_node] = ndarray.empty(self.node_to_shape_map[swap_node], ctx=self.ctx) # node_in_GPU = swap_node.id # if node in node_to_val_map: # # Skip placeholder nodes. Values already provided by feed_dict. # continue # # Compute # input_vals = [node_to_val_map[n] for n in node.inputs] # if use_numpy: # node_val = np.empty(shape=self.node_to_shape_map[node]) # else: # node_val = self.node_to_arr_map[node] # # node_val is modified in-place whether np.ndarray or NDArray # node.op.compute(node, input_vals, node_val, use_numpy) # if node.swap == True: # node_to_val_map[node] = node_val.asnumpy() # del node_val # del self.node_to_arr_map[node] # del self.swap_queue[0] # node_in_GPU = None # else: # node_to_val_map[node] = node_val # # Switch out GPU # if not use_numpy: # if self.swap_queue: # if self.swap_queue[0] in node.inputs: # out_node = self.swap_queue.pop(0) # if self.swap_queue: # if not self.swap_queue[0].id == node_in_GPU: # tmp_array = node_to_val_map[out_node].asnumpy() # del node_to_val_map[out_node] # node_to_val_map[out_node] = tmp_array # node_in_GPU = None # else: # if node in node_to_val_map: # # Skip placeholder nodes. Values already provided by feed_dict. # continue # input_vals = [node_to_val_map[n] for n in node.inputs] # # print self.node_to_shape_map[node] # if use_numpy: # node_val = np.empty(shape=self.node_to_shape_map[node]) # else: # node_val = self.node_to_arr_map[node] # # node_val is modified in-place whether np.ndarray or NDArray # # if (len(node.inputs) == 1): # # print "computs",node.inputs[0].name # # else: # # print "computs",node.inputs[0].name,node.inputs[1].name # # print node.name # # print node_val.shape # # print "xxx" # # print node.name # node.op.compute(node, input_vals, node_val, use_numpy) # # print "xxx" # node_to_val_map[node] = node_val # # print "xxx" # if FLAG_SHOW_GRAPH: # FLAG_SHOW_GRAPH = False # # Collect node values. # if not use_numpy and convert_to_numpy_ret_vals: # if self.policy == 'swap': # node_values = [] # for n in self.eval_node_list: # if n.swap == True: # node_values.append(node_to_val_map[n]) # else: # node_values.append(node_to_val_map[n].asnumpy()) # return node_values # elif self.policy == 'vdnn': # return [node_to_val_map[n] for n in self.eval_node_list] # else: # return [node_to_val_map[n].asnumpy() for n in self.eval_node_list] # return [node_to_val_map[n] for n in self.eval_node_list] def gradients(output_node, node_list, scheduler_policy=None): """Take gradient of output node with respect to each node in node_list. Parameters ---------- output_node: output node that we are taking derivative of. node_list: list of nodes that we are taking derivative wrt. Returns ------- A list of gradient values, one for each node in node_list respectively. """ from . import OnesLike node_to_output_grads_list = {} node_to_output_grads_list[output_node] = [ OnesLike.oneslike_op(output_node)] node_to_output_grad = {} # Traverse forward graph in reverse topological order reverse_topo_order = reversed(find_topo_sort([output_node])) for node in reverse_topo_order: output_grad = sum_node_list(node_to_output_grads_list[node]) node_to_output_grad[node] = output_grad input_grads_list = node.op.gradient(node, output_grad) #print len(node.name) #print len(node.inputs) #raw_input("\n\nPress the enter key to exit.") for i in range(len(node.inputs)): if node.inputs[i] not in node_to_output_grads_list: node_to_output_grads_list[node.inputs[i]] = [] # Calculate partial adjoint for input nodes. # print node.name node_to_output_grads_list[node.inputs[i]].append( input_grads_list[i]) if scheduler_policy == 'swap': for node in node_list: if node.swap: node_to_output_grad[node].swap = True grad_node_list = [node_to_output_grad[node] for node in node_list] # grad_node_list = [distributed_communicate_op(node_to_output_grad[node]) for node in node_list] return grad_node_list def distributed_gradients(output_node, node_list, scheduler_policy=None): """Take gradient of output node with respect to each node in node_list. Parameters ---------- output_node: output node that we are taking derivative of. node_list: list of nodes that we are taking derivative wrt. Returns ------- A list of gradient values, one for each node in node_list respectively. """ from .OnesLike import oneslike_op node_to_output_grads_list = {} node_to_output_grads_list[output_node] = [oneslike_op(output_node)] node_to_output_grad = {} # Traverse forward graph in reverse topological order reverse_topo_order = reversed(find_topo_sort([output_node])) for node in reverse_topo_order: output_grad = sum_node_list(node_to_output_grads_list[node]) node_to_output_grad[node] = output_grad input_grads_list = node.op.gradient(node, output_grad) #print len(node.name) #print len(node.inputs) #raw_input("\n\nPress the enter key to exit.") for i in range(len(node.inputs)): if node.inputs[i] not in node_to_output_grads_list: node_to_output_grads_list[node.inputs[i]] = [] # Calculate partial adjoint for input nodes. node_to_output_grads_list[node.inputs[i]].append( input_grads_list[i]) if scheduler_policy == 'swap': for node in node_list: if node.swap: node_to_output_grad[node].swap = True # grad_node_list = [node_to_output_grad[node] for node in node_list] grad_node_list = [distributed_communicate_op( node_to_output_grad[node]) for node in node_list] return grad_node_list ################## # Helper Methods # ################## # MASKED: find_topo_sort function (lines 538-551) def topo_sort_dfs(node, visited, topo_order): """Post-order DFS""" if node in visited: return visited.add(node) for n in node.inputs: topo_sort_dfs(n, visited, topo_order) topo_order.append(node) def sum_node_list(node_list): """Custom sum func to avoid creating redundant nodes in Python sum func.""" from operator import add from functools import reduce return reduce(add, node_list) def broadcast_rule(shape_a, shape_b): """Return output shape of broadcast shape_a, shape_b. e.g. broadcast_rule((3,2), (4,3,2)) returns output_shape = (4,3,2) Check out explanations and more examples at https://docs.scipy.org/doc/numpy-1.10.0/user/basics.broadcasting.html http://eli.thegreenplace.net/2015/broadcasting-arrays-in-numpy/ """ assert(isinstance(shape_a, tuple)) assert(isinstance(shape_b, tuple)) if len(shape_a) > len(shape_b): longer_shape, shorter_shape = shape_a, shape_b else: longer_shape, shorter_shape = shape_b, shape_a len_diff = len(longer_shape) - len(shorter_shape) for i in range(len_diff): # pad with leading 1s shorter_shape = (1,) + shorter_shape assert len(shorter_shape) == len(longer_shape) output_shape = list(longer_shape) for i in range(len(output_shape)): assert (shorter_shape[i] == longer_shape[i]) \ or (shorter_shape[i] == 1) \ or (longer_shape[i] == 1) output_shape[i] = max(shorter_shape[i], longer_shape[i]) return tuple(output_shape)
def find_topo_sort(node_list): """Given a list of nodes, return a topo ordering of nodes ending in them. A simple algorithm is to do a post-order DFS traversal on the given nodes, going backwards based on input edges. Since a node is added to the ordering after all its predecessors are traversed due to post-order DFS, we get a topological sort. """ visited = set() topo_order = [] for node in node_list: topo_sort_dfs(node, visited, topo_order) return topo_order
538
551
""" library to take autodiff and execute a computation graph """ from __future__ import absolute_import import numpy as np from .Node import Op from .. import ndarray from ..stream import * import ctypes import os from pynvml import * FLAG_SHOW_GRAPH = False G_NODE_ID = 0 NAME_RULE = 1 def communicate_init(worker_num, worker_id, source_ip, target_ip): global lib_communicate # lib_communicate.DL_Connect_Init(2, 0, "*:4001", "localhost:4002") # lib_communicate.DL_Connect_Init(2, 1, "*:4002", "localhost:4001") curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) lib_path = os.path.join(curr_path, '../../build/lib/') path_to_so_file = os.path.join(lib_path, "lib_communication.so") lib_communicate = ctypes.cdll.LoadLibrary(path_to_so_file) lib_communicate.DL_Connect_Init( worker_num, worker_id, source_ip, target_ip) def communicate_finish(): lib_communicate.DL_Communicate_Close() class Distributed_CommunicateOp(Op): def __call__(self, nodeA): new_node = Op.__call__(self) new_node.inputs = [nodeA] new_node.name = "Distributed_Communicate(%s)" % (nodeA.name) # print nodeA.name return new_node def compute(self, node, input_vals, output_val, use_numpy=True): after_reduce_gradient_cpu = ndarray.empty( shape=output_val.shape, ctx=ndarray.cpu(0)) if use_numpy: gradient_val_cpu = ndarray.array(input_vals[0], ctx=ndarray.cpu(0)) else: gradient_val_cpu = ndarray.array( input_vals[0].asnumpy(), ctx=ndarray.cpu(0)) # print gradient_val_cpu.asnumpy() lib_communicate.DL_Communicate_Init(gradient_val_cpu.handle) lib_communicate.DL_Communicate( gradient_val_cpu.handle, after_reduce_gradient_cpu.handle) # print after_reduce_gradient_cpu.asnumpy() if use_numpy: output_val[:] = after_reduce_gradient_cpu.asnumpy() else: after_reduce_gradient_cpu.copyto(output_val) def gradient(self, node, output_grad): raise NotImplementedError def infer_shape(self, node, input_shapes): return input_shapes[0] distributed_communicate_op = Distributed_CommunicateOp() class StreamExecutor(object): """Executor computes values for given set of nodes in computation graph.""" def __init__(self, eval_node_list, ctx = None, stream = None, policy = None): """ Parameters ---------- eval_node_list: list of nodes whose values need to be computed. ctx: runtime DLContext, default is None which means np.ndarray on cpu topo_order: list of nodes in topological order node_to_shape_map: dict from node to shape of the node node_to_arr_map: dict from node to ndarray.NDArray allocated for node feed_shapes: shapes of feed_dict from last run(...) """ self.eval_node_list = eval_node_list self.ctx = ctx if stream is None: self.stream = create_stream_handle(ctx) else: self.stream = stream self.stream.sync() self.topo_order = find_topo_sort(self.eval_node_list) self.node_to_shape_map = None self.node_to_arr_map = None self.feed_shapes = None self.policy = policy if self.policy == 'swap': self.swap_queue = [] def infer_shape(self, feed_shapes): """Given shapes of feed_dict nodes, infer shape for all nodes in graph. Implementation note: Iteratively calls node.op.infer_shape to infer shapes. Node shapes stored in self.node_to_shape_map. Parameters ---------- feed_shapes: node->shapes mapping for feed_dict nodes. """ """TODO: Your code here""" self.node_to_shape_map = {} for node in self.topo_order: if node in feed_shapes: self.node_to_shape_map[node] = feed_shapes[node] else: # print(node.name) input_shapes = [self.node_to_shape_map[n] for n in node.inputs] self.node_to_shape_map[node] = node.op.infer_shape( node, input_shapes) def memory_plan(self, feed_shapes): """Allocates ndarray.NDArray for every node except feed_dict nodes. Implementation note: Option 1: Alloc a ndarray.NDArray per node that persists across run() Option 2: Implement a memory pool to reuse memory for nodes of same shapes. More details see Lecture 7. For both options, self.node_to_arr_map stores node->NDArray mapping to allow mapping to persist across multiple executor.run(). Hint: use ndarray.empty(shape, ctx=self.ctx) to allocate NDArray. Parameters ---------- feed_shapes: node->shapes mapping for feed_dict nodes. """ """TODO: Your code here""" assert (self.ctx is not None) # self.infer_shape(feed_shapes) self.node_to_arr_map = {} for node, shape in self.node_to_shape_map.items(): if self.policy == 'swap': if not node.swap: self.node_to_arr_map[node] = ndarray.empty( shape, ctx=self.ctx) elif self.policy == 'vdnn': self.node_to_arr_map[node] = np.empty(shape) else: self.node_to_arr_map[node] = ndarray.empty(shape, ctx=self.ctx) def run(self, feed_dict, convert_to_numpy_ret_vals=False): """ Parameters ---------- feed_dict: a dictionary of node->np.ndarray supplied by user. convert_to_numpy_ret_vals: whether to convert ret vals to np.array Returns ------- A list of values for nodes in eval_node_list. NDArray or np.ndarray. """ def are_feed_shapes_equal(sa, sb): if (not isinstance(sa, dict)) or (not isinstance(sb, dict)): return False unmatched_item = set(sa.items()) ^ set(sb.items()) return len(unmatched_item) == 0 # Assume self.ctx is None implies numpy array and numpy ops. use_numpy = self.ctx is None node_to_val_map = {} for node, value in feed_dict.items(): if use_numpy: # all values passed in feed_dict must be np.ndarray assert isinstance(value, np.ndarray) node_to_val_map[node] = value else: # convert values to ndarray.NDArray if necessary if isinstance(value, np.ndarray): node_to_val_map[node] = ndarray.array(value, ctx=self.ctx) elif isinstance(value, ndarray.NDArray): node_to_val_map[node] = value else: assert False, "feed_dict value type not supported" # print"xxxx" # collect shapes for all placeholders # infer shape if feed_shapes changed since last run # e.g. call run() on test data after trainng # print feed_shapes feed_shapes = {} for node in node_to_val_map: feed_shapes[node] = node_to_val_map[node].shape if(not are_feed_shapes_equal(feed_shapes, self.feed_shapes)): self.infer_shape(feed_shapes) self.feed_shapes = feed_shapes if (not use_numpy): self.memory_plan(self.feed_shapes) for node in self.topo_order: if node in node_to_val_map: continue input_vals = [node_to_val_map[n] for n in node.inputs] if use_numpy: node_val = np.empty(shape=self.node_to_shape_map[node]) else: node_val = self.node_to_arr_map[node] # print(node.name) node.op.compute(node, input_vals, node_val, use_numpy, self.stream) node_to_val_map[node] = node_val self.stream.sync() if not use_numpy and convert_to_numpy_ret_vals: return [node_to_val_map[n].asnumpy() for n in self.eval_node_list] return [node_to_val_map[n] for n in self.eval_node_list] # def run(self, feed_dict, convert_to_numpy_ret_vals=False): # """ # Parameters # ---------- # feed_dict: a dictionary of node->np.ndarray supplied by user. # convert_to_numpy_ret_vals: whether to convert ret vals to np.array # Returns # ------- # A list of values for nodes in eval_node_list. NDArray or np.ndarray. # """ # def are_feed_shapes_equal(sa, sb): # if (not isinstance(sa, dict)) or (not isinstance(sb, dict)): # return False # unmatched_item = set(sa.items()) ^ set(sb.items()) # return len(unmatched_item) == 0 # # Assume self.ctx is None implies numpy array and numpy ops. # use_numpy = self.ctx is None # node_to_val_map = {} # for node, value in feed_dict.items(): # if self.policy == 'vdnn': # assert isinstance(value, np.ndarray) # node_to_val_map[node] = value # else: # if use_numpy: # # all values passed in feed_dict must be np.ndarray # assert isinstance(value, np.ndarray) # node_to_val_map[node] = value # else: # # convert values to ndarray.NDArray if necessary # if isinstance(value, np.ndarray): # if self.policy == 'swap': # if node.swap == True: # node_to_val_map[node] = value # else: # node_to_val_map[node] = ndarray.array(value, ctx=self.ctx) # else: # node_to_val_map[node] = ndarray.array(value, ctx=self.ctx) # elif isinstance(value, ndarray.NDArray): # node_to_val_map[node] = value # else: # assert False, "feed_dict value type not supported" # # collect shapes for all placeholders # feed_shapes = {} # for node in node_to_val_map: # feed_shapes[node] = node_to_val_map[node].shape # # infer shape if feed_shapes changed since last run # # e.g. call run() on test data after trainng # # print feed_shapes # if (not are_feed_shapes_equal(feed_shapes, self.feed_shapes)): # self.infer_shape(feed_shapes) # self.feed_shapes = feed_shapes # if not self.policy == 'vdnn': # # plan memory if using GPU # if (not use_numpy): # self.memory_plan(feed_shapes) # # Traverse graph in topo order and compute values for all nodes. # global FLAG_SHOW_GRAPH # if self.policy == 'swap': # # generate swap queue # if not use_numpy: # for node in self.topo_order: # if node not in node_to_val_map: # # variable in placeholder # for input_node in node.inputs: # if input_node.swap == True: # self.swap_queue.append(input_node) # # variable grad # if node.swap == True: # self.swap_queue.append(node) # node_in_GPU = None # if FLAG_SHOW_GRAPH: # print "Show swap queue:" # for node in self.swap_queue: # print node # elif self.policy == 'vdnn': # # TODO traverse graph to select in-gpu window # window = [0,0] # if not use_numpy: # nvmlInit() # handle = nvmlDeviceGetHandleByIndex(0) # info = nvmlDeviceGetMemoryInfo(handle) # gpu_mem = info.free # nvmlShutdown() # loss_node = self.eval_node_list[0] # window[1] = self.topo_order.index(loss_node)+1 # window[0] = self.topo_order.index(loss_node)+1 # for node in reversed(self.topo_order[:window[1]+1]): # node_size = 4 # float32 # #print node, self.node_to_shape_map[node] # for shape in self.node_to_shape_map[node]: # node_size = node_size * shape # if gpu_mem > node_size: # gpu_mem = gpu_mem - node_size # window[0] = window[0] - 1 # #print "gpu_mem:",gpu_mem # # Traverse graph in topo order and compute values for all nodes. # if FLAG_SHOW_GRAPH: # print "run topo_order" # # Show graph dependency # if FLAG_SHOW_GRAPH: # print "node:",node # print "node.desc:",node.desc # for node in self.topo_order: # if self.policy == 'vdnn': # # Skip placeholder nodes # if node in node_to_val_map: # continue # # H2D before compute # ## Collect inputs # input_vals = [] # for n in node.inputs: # if not use_numpy: # if isinstance(node_to_val_map[n], np.ndarray): # node_to_val_map[n] = ndarray.array(node_to_val_map[n], ctx=self.ctx) # input_vals.append(node_to_val_map[n]) # ## Alloc node space # if use_numpy: # node_val = np.empty(shape=self.node_to_shape_map[node]) # else: # node_val = ndarray.empty(shape=self.node_to_shape_map[node], ctx=self.ctx) # # Compute # # node_val is modified in-place whether np.ndarray or NDArray # node.op.compute(node, input_vals, node_val, use_numpy) # # D2H after compute # if use_numpy: # node_to_val_map[node] = node_val # else: # node_index = self.topo_order.index(node) # if node_index > window[0] and node_index < window[1]: # node_to_val_map[node] = node_val # continue # node_to_val_map[node] = node_val.asnumpy() # del node_val # for n in node.inputs: # if isinstance(node_to_val_map[n], ndarray.NDArray): # tmp_val = node_to_val_map[n].asnumpy() # del node_to_val_map[n] # node_to_val_map[n] = tmp_val # elif self.policy == 'swap': # # Switch in GPU # if not use_numpy: # if self.swap_queue and (node_in_GPU==None): # swap_node = self.swap_queue[0] # if swap_node in node_to_val_map: # node_to_val_map[swap_node] = ndarray.array(node_to_val_map[swap_node], ctx=self.ctx) # else: # self.node_to_arr_map[swap_node] = ndarray.empty(self.node_to_shape_map[swap_node], ctx=self.ctx) # node_in_GPU = swap_node.id # if node in node_to_val_map: # # Skip placeholder nodes. Values already provided by feed_dict. # continue # # Compute # input_vals = [node_to_val_map[n] for n in node.inputs] # if use_numpy: # node_val = np.empty(shape=self.node_to_shape_map[node]) # else: # node_val = self.node_to_arr_map[node] # # node_val is modified in-place whether np.ndarray or NDArray # node.op.compute(node, input_vals, node_val, use_numpy) # if node.swap == True: # node_to_val_map[node] = node_val.asnumpy() # del node_val # del self.node_to_arr_map[node] # del self.swap_queue[0] # node_in_GPU = None # else: # node_to_val_map[node] = node_val # # Switch out GPU # if not use_numpy: # if self.swap_queue: # if self.swap_queue[0] in node.inputs: # out_node = self.swap_queue.pop(0) # if self.swap_queue: # if not self.swap_queue[0].id == node_in_GPU: # tmp_array = node_to_val_map[out_node].asnumpy() # del node_to_val_map[out_node] # node_to_val_map[out_node] = tmp_array # node_in_GPU = None # else: # if node in node_to_val_map: # # Skip placeholder nodes. Values already provided by feed_dict. # continue # input_vals = [node_to_val_map[n] for n in node.inputs] # # print self.node_to_shape_map[node] # if use_numpy: # node_val = np.empty(shape=self.node_to_shape_map[node]) # else: # node_val = self.node_to_arr_map[node] # # node_val is modified in-place whether np.ndarray or NDArray # # if (len(node.inputs) == 1): # # print "computs",node.inputs[0].name # # else: # # print "computs",node.inputs[0].name,node.inputs[1].name # # print node.name # # print node_val.shape # # print "xxx" # # print node.name # node.op.compute(node, input_vals, node_val, use_numpy) # # print "xxx" # node_to_val_map[node] = node_val # # print "xxx" # if FLAG_SHOW_GRAPH: # FLAG_SHOW_GRAPH = False # # Collect node values. # if not use_numpy and convert_to_numpy_ret_vals: # if self.policy == 'swap': # node_values = [] # for n in self.eval_node_list: # if n.swap == True: # node_values.append(node_to_val_map[n]) # else: # node_values.append(node_to_val_map[n].asnumpy()) # return node_values # elif self.policy == 'vdnn': # return [node_to_val_map[n] for n in self.eval_node_list] # else: # return [node_to_val_map[n].asnumpy() for n in self.eval_node_list] # return [node_to_val_map[n] for n in self.eval_node_list] def gradients(output_node, node_list, scheduler_policy=None): """Take gradient of output node with respect to each node in node_list. Parameters ---------- output_node: output node that we are taking derivative of. node_list: list of nodes that we are taking derivative wrt. Returns ------- A list of gradient values, one for each node in node_list respectively. """ from . import OnesLike node_to_output_grads_list = {} node_to_output_grads_list[output_node] = [ OnesLike.oneslike_op(output_node)] node_to_output_grad = {} # Traverse forward graph in reverse topological order reverse_topo_order = reversed(find_topo_sort([output_node])) for node in reverse_topo_order: output_grad = sum_node_list(node_to_output_grads_list[node]) node_to_output_grad[node] = output_grad input_grads_list = node.op.gradient(node, output_grad) #print len(node.name) #print len(node.inputs) #raw_input("\n\nPress the enter key to exit.") for i in range(len(node.inputs)): if node.inputs[i] not in node_to_output_grads_list: node_to_output_grads_list[node.inputs[i]] = [] # Calculate partial adjoint for input nodes. # print node.name node_to_output_grads_list[node.inputs[i]].append( input_grads_list[i]) if scheduler_policy == 'swap': for node in node_list: if node.swap: node_to_output_grad[node].swap = True grad_node_list = [node_to_output_grad[node] for node in node_list] # grad_node_list = [distributed_communicate_op(node_to_output_grad[node]) for node in node_list] return grad_node_list def distributed_gradients(output_node, node_list, scheduler_policy=None): """Take gradient of output node with respect to each node in node_list. Parameters ---------- output_node: output node that we are taking derivative of. node_list: list of nodes that we are taking derivative wrt. Returns ------- A list of gradient values, one for each node in node_list respectively. """ from .OnesLike import oneslike_op node_to_output_grads_list = {} node_to_output_grads_list[output_node] = [oneslike_op(output_node)] node_to_output_grad = {} # Traverse forward graph in reverse topological order reverse_topo_order = reversed(find_topo_sort([output_node])) for node in reverse_topo_order: output_grad = sum_node_list(node_to_output_grads_list[node]) node_to_output_grad[node] = output_grad input_grads_list = node.op.gradient(node, output_grad) #print len(node.name) #print len(node.inputs) #raw_input("\n\nPress the enter key to exit.") for i in range(len(node.inputs)): if node.inputs[i] not in node_to_output_grads_list: node_to_output_grads_list[node.inputs[i]] = [] # Calculate partial adjoint for input nodes. node_to_output_grads_list[node.inputs[i]].append( input_grads_list[i]) if scheduler_policy == 'swap': for node in node_list: if node.swap: node_to_output_grad[node].swap = True # grad_node_list = [node_to_output_grad[node] for node in node_list] grad_node_list = [distributed_communicate_op( node_to_output_grad[node]) for node in node_list] return grad_node_list ################## # Helper Methods # ################## def find_topo_sort(node_list): """Given a list of nodes, return a topo ordering of nodes ending in them. A simple algorithm is to do a post-order DFS traversal on the given nodes, going backwards based on input edges. Since a node is added to the ordering after all its predecessors are traversed due to post-order DFS, we get a topological sort. """ visited = set() topo_order = [] for node in node_list: topo_sort_dfs(node, visited, topo_order) return topo_order def topo_sort_dfs(node, visited, topo_order): """Post-order DFS""" if node in visited: return visited.add(node) for n in node.inputs: topo_sort_dfs(n, visited, topo_order) topo_order.append(node) def sum_node_list(node_list): """Custom sum func to avoid creating redundant nodes in Python sum func.""" from operator import add from functools import reduce return reduce(add, node_list) def broadcast_rule(shape_a, shape_b): """Return output shape of broadcast shape_a, shape_b. e.g. broadcast_rule((3,2), (4,3,2)) returns output_shape = (4,3,2) Check out explanations and more examples at https://docs.scipy.org/doc/numpy-1.10.0/user/basics.broadcasting.html http://eli.thegreenplace.net/2015/broadcasting-arrays-in-numpy/ """ assert(isinstance(shape_a, tuple)) assert(isinstance(shape_b, tuple)) if len(shape_a) > len(shape_b): longer_shape, shorter_shape = shape_a, shape_b else: longer_shape, shorter_shape = shape_b, shape_a len_diff = len(longer_shape) - len(shorter_shape) for i in range(len_diff): # pad with leading 1s shorter_shape = (1,) + shorter_shape assert len(shorter_shape) == len(longer_shape) output_shape = list(longer_shape) for i in range(len(output_shape)): assert (shorter_shape[i] == longer_shape[i]) \ or (shorter_shape[i] == 1) \ or (longer_shape[i] == 1) output_shape[i] = max(shorter_shape[i], longer_shape[i]) return tuple(output_shape)
broadcast_rule
Return output shape of broadcast shape_a, shape_b. e.g. broadcast_rule((3,2), (4,3,2)) returns output_shape = (4,3,2) Check out explanations and more examples at https://docs.scipy.org/doc/numpy-1.10.0/user/basics.broadcasting.html http://eli.thegreenplace.net/2015/broadcasting-arrays-in-numpy/
""" library to take autodiff and execute a computation graph """ from __future__ import absolute_import import numpy as np from .Node import Op from .. import ndarray from ..stream import * import ctypes import os from pynvml import * FLAG_SHOW_GRAPH = False G_NODE_ID = 0 NAME_RULE = 1 def communicate_init(worker_num, worker_id, source_ip, target_ip): global lib_communicate # lib_communicate.DL_Connect_Init(2, 0, "*:4001", "localhost:4002") # lib_communicate.DL_Connect_Init(2, 1, "*:4002", "localhost:4001") curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) lib_path = os.path.join(curr_path, '../../build/lib/') path_to_so_file = os.path.join(lib_path, "lib_communication.so") lib_communicate = ctypes.cdll.LoadLibrary(path_to_so_file) lib_communicate.DL_Connect_Init( worker_num, worker_id, source_ip, target_ip) def communicate_finish(): lib_communicate.DL_Communicate_Close() class Distributed_CommunicateOp(Op): def __call__(self, nodeA): new_node = Op.__call__(self) new_node.inputs = [nodeA] new_node.name = "Distributed_Communicate(%s)" % (nodeA.name) # print nodeA.name return new_node def compute(self, node, input_vals, output_val, use_numpy=True): after_reduce_gradient_cpu = ndarray.empty( shape=output_val.shape, ctx=ndarray.cpu(0)) if use_numpy: gradient_val_cpu = ndarray.array(input_vals[0], ctx=ndarray.cpu(0)) else: gradient_val_cpu = ndarray.array( input_vals[0].asnumpy(), ctx=ndarray.cpu(0)) # print gradient_val_cpu.asnumpy() lib_communicate.DL_Communicate_Init(gradient_val_cpu.handle) lib_communicate.DL_Communicate( gradient_val_cpu.handle, after_reduce_gradient_cpu.handle) # print after_reduce_gradient_cpu.asnumpy() if use_numpy: output_val[:] = after_reduce_gradient_cpu.asnumpy() else: after_reduce_gradient_cpu.copyto(output_val) def gradient(self, node, output_grad): raise NotImplementedError def infer_shape(self, node, input_shapes): return input_shapes[0] distributed_communicate_op = Distributed_CommunicateOp() class StreamExecutor(object): """Executor computes values for given set of nodes in computation graph.""" def __init__(self, eval_node_list, ctx = None, stream = None, policy = None): """ Parameters ---------- eval_node_list: list of nodes whose values need to be computed. ctx: runtime DLContext, default is None which means np.ndarray on cpu topo_order: list of nodes in topological order node_to_shape_map: dict from node to shape of the node node_to_arr_map: dict from node to ndarray.NDArray allocated for node feed_shapes: shapes of feed_dict from last run(...) """ self.eval_node_list = eval_node_list self.ctx = ctx if stream is None: self.stream = create_stream_handle(ctx) else: self.stream = stream self.stream.sync() self.topo_order = find_topo_sort(self.eval_node_list) self.node_to_shape_map = None self.node_to_arr_map = None self.feed_shapes = None self.policy = policy if self.policy == 'swap': self.swap_queue = [] def infer_shape(self, feed_shapes): """Given shapes of feed_dict nodes, infer shape for all nodes in graph. Implementation note: Iteratively calls node.op.infer_shape to infer shapes. Node shapes stored in self.node_to_shape_map. Parameters ---------- feed_shapes: node->shapes mapping for feed_dict nodes. """ """TODO: Your code here""" self.node_to_shape_map = {} for node in self.topo_order: if node in feed_shapes: self.node_to_shape_map[node] = feed_shapes[node] else: # print(node.name) input_shapes = [self.node_to_shape_map[n] for n in node.inputs] self.node_to_shape_map[node] = node.op.infer_shape( node, input_shapes) def memory_plan(self, feed_shapes): """Allocates ndarray.NDArray for every node except feed_dict nodes. Implementation note: Option 1: Alloc a ndarray.NDArray per node that persists across run() Option 2: Implement a memory pool to reuse memory for nodes of same shapes. More details see Lecture 7. For both options, self.node_to_arr_map stores node->NDArray mapping to allow mapping to persist across multiple executor.run(). Hint: use ndarray.empty(shape, ctx=self.ctx) to allocate NDArray. Parameters ---------- feed_shapes: node->shapes mapping for feed_dict nodes. """ """TODO: Your code here""" assert (self.ctx is not None) # self.infer_shape(feed_shapes) self.node_to_arr_map = {} for node, shape in self.node_to_shape_map.items(): if self.policy == 'swap': if not node.swap: self.node_to_arr_map[node] = ndarray.empty( shape, ctx=self.ctx) elif self.policy == 'vdnn': self.node_to_arr_map[node] = np.empty(shape) else: self.node_to_arr_map[node] = ndarray.empty(shape, ctx=self.ctx) def run(self, feed_dict, convert_to_numpy_ret_vals=False): """ Parameters ---------- feed_dict: a dictionary of node->np.ndarray supplied by user. convert_to_numpy_ret_vals: whether to convert ret vals to np.array Returns ------- A list of values for nodes in eval_node_list. NDArray or np.ndarray. """ def are_feed_shapes_equal(sa, sb): if (not isinstance(sa, dict)) or (not isinstance(sb, dict)): return False unmatched_item = set(sa.items()) ^ set(sb.items()) return len(unmatched_item) == 0 # Assume self.ctx is None implies numpy array and numpy ops. use_numpy = self.ctx is None node_to_val_map = {} for node, value in feed_dict.items(): if use_numpy: # all values passed in feed_dict must be np.ndarray assert isinstance(value, np.ndarray) node_to_val_map[node] = value else: # convert values to ndarray.NDArray if necessary if isinstance(value, np.ndarray): node_to_val_map[node] = ndarray.array(value, ctx=self.ctx) elif isinstance(value, ndarray.NDArray): node_to_val_map[node] = value else: assert False, "feed_dict value type not supported" # print"xxxx" # collect shapes for all placeholders # infer shape if feed_shapes changed since last run # e.g. call run() on test data after trainng # print feed_shapes feed_shapes = {} for node in node_to_val_map: feed_shapes[node] = node_to_val_map[node].shape if(not are_feed_shapes_equal(feed_shapes, self.feed_shapes)): self.infer_shape(feed_shapes) self.feed_shapes = feed_shapes if (not use_numpy): self.memory_plan(self.feed_shapes) for node in self.topo_order: if node in node_to_val_map: continue input_vals = [node_to_val_map[n] for n in node.inputs] if use_numpy: node_val = np.empty(shape=self.node_to_shape_map[node]) else: node_val = self.node_to_arr_map[node] # print(node.name) node.op.compute(node, input_vals, node_val, use_numpy, self.stream) node_to_val_map[node] = node_val self.stream.sync() if not use_numpy and convert_to_numpy_ret_vals: return [node_to_val_map[n].asnumpy() for n in self.eval_node_list] return [node_to_val_map[n] for n in self.eval_node_list] # def run(self, feed_dict, convert_to_numpy_ret_vals=False): # """ # Parameters # ---------- # feed_dict: a dictionary of node->np.ndarray supplied by user. # convert_to_numpy_ret_vals: whether to convert ret vals to np.array # Returns # ------- # A list of values for nodes in eval_node_list. NDArray or np.ndarray. # """ # def are_feed_shapes_equal(sa, sb): # if (not isinstance(sa, dict)) or (not isinstance(sb, dict)): # return False # unmatched_item = set(sa.items()) ^ set(sb.items()) # return len(unmatched_item) == 0 # # Assume self.ctx is None implies numpy array and numpy ops. # use_numpy = self.ctx is None # node_to_val_map = {} # for node, value in feed_dict.items(): # if self.policy == 'vdnn': # assert isinstance(value, np.ndarray) # node_to_val_map[node] = value # else: # if use_numpy: # # all values passed in feed_dict must be np.ndarray # assert isinstance(value, np.ndarray) # node_to_val_map[node] = value # else: # # convert values to ndarray.NDArray if necessary # if isinstance(value, np.ndarray): # if self.policy == 'swap': # if node.swap == True: # node_to_val_map[node] = value # else: # node_to_val_map[node] = ndarray.array(value, ctx=self.ctx) # else: # node_to_val_map[node] = ndarray.array(value, ctx=self.ctx) # elif isinstance(value, ndarray.NDArray): # node_to_val_map[node] = value # else: # assert False, "feed_dict value type not supported" # # collect shapes for all placeholders # feed_shapes = {} # for node in node_to_val_map: # feed_shapes[node] = node_to_val_map[node].shape # # infer shape if feed_shapes changed since last run # # e.g. call run() on test data after trainng # # print feed_shapes # if (not are_feed_shapes_equal(feed_shapes, self.feed_shapes)): # self.infer_shape(feed_shapes) # self.feed_shapes = feed_shapes # if not self.policy == 'vdnn': # # plan memory if using GPU # if (not use_numpy): # self.memory_plan(feed_shapes) # # Traverse graph in topo order and compute values for all nodes. # global FLAG_SHOW_GRAPH # if self.policy == 'swap': # # generate swap queue # if not use_numpy: # for node in self.topo_order: # if node not in node_to_val_map: # # variable in placeholder # for input_node in node.inputs: # if input_node.swap == True: # self.swap_queue.append(input_node) # # variable grad # if node.swap == True: # self.swap_queue.append(node) # node_in_GPU = None # if FLAG_SHOW_GRAPH: # print "Show swap queue:" # for node in self.swap_queue: # print node # elif self.policy == 'vdnn': # # TODO traverse graph to select in-gpu window # window = [0,0] # if not use_numpy: # nvmlInit() # handle = nvmlDeviceGetHandleByIndex(0) # info = nvmlDeviceGetMemoryInfo(handle) # gpu_mem = info.free # nvmlShutdown() # loss_node = self.eval_node_list[0] # window[1] = self.topo_order.index(loss_node)+1 # window[0] = self.topo_order.index(loss_node)+1 # for node in reversed(self.topo_order[:window[1]+1]): # node_size = 4 # float32 # #print node, self.node_to_shape_map[node] # for shape in self.node_to_shape_map[node]: # node_size = node_size * shape # if gpu_mem > node_size: # gpu_mem = gpu_mem - node_size # window[0] = window[0] - 1 # #print "gpu_mem:",gpu_mem # # Traverse graph in topo order and compute values for all nodes. # if FLAG_SHOW_GRAPH: # print "run topo_order" # # Show graph dependency # if FLAG_SHOW_GRAPH: # print "node:",node # print "node.desc:",node.desc # for node in self.topo_order: # if self.policy == 'vdnn': # # Skip placeholder nodes # if node in node_to_val_map: # continue # # H2D before compute # ## Collect inputs # input_vals = [] # for n in node.inputs: # if not use_numpy: # if isinstance(node_to_val_map[n], np.ndarray): # node_to_val_map[n] = ndarray.array(node_to_val_map[n], ctx=self.ctx) # input_vals.append(node_to_val_map[n]) # ## Alloc node space # if use_numpy: # node_val = np.empty(shape=self.node_to_shape_map[node]) # else: # node_val = ndarray.empty(shape=self.node_to_shape_map[node], ctx=self.ctx) # # Compute # # node_val is modified in-place whether np.ndarray or NDArray # node.op.compute(node, input_vals, node_val, use_numpy) # # D2H after compute # if use_numpy: # node_to_val_map[node] = node_val # else: # node_index = self.topo_order.index(node) # if node_index > window[0] and node_index < window[1]: # node_to_val_map[node] = node_val # continue # node_to_val_map[node] = node_val.asnumpy() # del node_val # for n in node.inputs: # if isinstance(node_to_val_map[n], ndarray.NDArray): # tmp_val = node_to_val_map[n].asnumpy() # del node_to_val_map[n] # node_to_val_map[n] = tmp_val # elif self.policy == 'swap': # # Switch in GPU # if not use_numpy: # if self.swap_queue and (node_in_GPU==None): # swap_node = self.swap_queue[0] # if swap_node in node_to_val_map: # node_to_val_map[swap_node] = ndarray.array(node_to_val_map[swap_node], ctx=self.ctx) # else: # self.node_to_arr_map[swap_node] = ndarray.empty(self.node_to_shape_map[swap_node], ctx=self.ctx) # node_in_GPU = swap_node.id # if node in node_to_val_map: # # Skip placeholder nodes. Values already provided by feed_dict. # continue # # Compute # input_vals = [node_to_val_map[n] for n in node.inputs] # if use_numpy: # node_val = np.empty(shape=self.node_to_shape_map[node]) # else: # node_val = self.node_to_arr_map[node] # # node_val is modified in-place whether np.ndarray or NDArray # node.op.compute(node, input_vals, node_val, use_numpy) # if node.swap == True: # node_to_val_map[node] = node_val.asnumpy() # del node_val # del self.node_to_arr_map[node] # del self.swap_queue[0] # node_in_GPU = None # else: # node_to_val_map[node] = node_val # # Switch out GPU # if not use_numpy: # if self.swap_queue: # if self.swap_queue[0] in node.inputs: # out_node = self.swap_queue.pop(0) # if self.swap_queue: # if not self.swap_queue[0].id == node_in_GPU: # tmp_array = node_to_val_map[out_node].asnumpy() # del node_to_val_map[out_node] # node_to_val_map[out_node] = tmp_array # node_in_GPU = None # else: # if node in node_to_val_map: # # Skip placeholder nodes. Values already provided by feed_dict. # continue # input_vals = [node_to_val_map[n] for n in node.inputs] # # print self.node_to_shape_map[node] # if use_numpy: # node_val = np.empty(shape=self.node_to_shape_map[node]) # else: # node_val = self.node_to_arr_map[node] # # node_val is modified in-place whether np.ndarray or NDArray # # if (len(node.inputs) == 1): # # print "computs",node.inputs[0].name # # else: # # print "computs",node.inputs[0].name,node.inputs[1].name # # print node.name # # print node_val.shape # # print "xxx" # # print node.name # node.op.compute(node, input_vals, node_val, use_numpy) # # print "xxx" # node_to_val_map[node] = node_val # # print "xxx" # if FLAG_SHOW_GRAPH: # FLAG_SHOW_GRAPH = False # # Collect node values. # if not use_numpy and convert_to_numpy_ret_vals: # if self.policy == 'swap': # node_values = [] # for n in self.eval_node_list: # if n.swap == True: # node_values.append(node_to_val_map[n]) # else: # node_values.append(node_to_val_map[n].asnumpy()) # return node_values # elif self.policy == 'vdnn': # return [node_to_val_map[n] for n in self.eval_node_list] # else: # return [node_to_val_map[n].asnumpy() for n in self.eval_node_list] # return [node_to_val_map[n] for n in self.eval_node_list] def gradients(output_node, node_list, scheduler_policy=None): """Take gradient of output node with respect to each node in node_list. Parameters ---------- output_node: output node that we are taking derivative of. node_list: list of nodes that we are taking derivative wrt. Returns ------- A list of gradient values, one for each node in node_list respectively. """ from . import OnesLike node_to_output_grads_list = {} node_to_output_grads_list[output_node] = [ OnesLike.oneslike_op(output_node)] node_to_output_grad = {} # Traverse forward graph in reverse topological order reverse_topo_order = reversed(find_topo_sort([output_node])) for node in reverse_topo_order: output_grad = sum_node_list(node_to_output_grads_list[node]) node_to_output_grad[node] = output_grad input_grads_list = node.op.gradient(node, output_grad) #print len(node.name) #print len(node.inputs) #raw_input("\n\nPress the enter key to exit.") for i in range(len(node.inputs)): if node.inputs[i] not in node_to_output_grads_list: node_to_output_grads_list[node.inputs[i]] = [] # Calculate partial adjoint for input nodes. # print node.name node_to_output_grads_list[node.inputs[i]].append( input_grads_list[i]) if scheduler_policy == 'swap': for node in node_list: if node.swap: node_to_output_grad[node].swap = True grad_node_list = [node_to_output_grad[node] for node in node_list] # grad_node_list = [distributed_communicate_op(node_to_output_grad[node]) for node in node_list] return grad_node_list def distributed_gradients(output_node, node_list, scheduler_policy=None): """Take gradient of output node with respect to each node in node_list. Parameters ---------- output_node: output node that we are taking derivative of. node_list: list of nodes that we are taking derivative wrt. Returns ------- A list of gradient values, one for each node in node_list respectively. """ from .OnesLike import oneslike_op node_to_output_grads_list = {} node_to_output_grads_list[output_node] = [oneslike_op(output_node)] node_to_output_grad = {} # Traverse forward graph in reverse topological order reverse_topo_order = reversed(find_topo_sort([output_node])) for node in reverse_topo_order: output_grad = sum_node_list(node_to_output_grads_list[node]) node_to_output_grad[node] = output_grad input_grads_list = node.op.gradient(node, output_grad) #print len(node.name) #print len(node.inputs) #raw_input("\n\nPress the enter key to exit.") for i in range(len(node.inputs)): if node.inputs[i] not in node_to_output_grads_list: node_to_output_grads_list[node.inputs[i]] = [] # Calculate partial adjoint for input nodes. node_to_output_grads_list[node.inputs[i]].append( input_grads_list[i]) if scheduler_policy == 'swap': for node in node_list: if node.swap: node_to_output_grad[node].swap = True # grad_node_list = [node_to_output_grad[node] for node in node_list] grad_node_list = [distributed_communicate_op( node_to_output_grad[node]) for node in node_list] return grad_node_list ################## # Helper Methods # ################## def find_topo_sort(node_list): """Given a list of nodes, return a topo ordering of nodes ending in them. A simple algorithm is to do a post-order DFS traversal on the given nodes, going backwards based on input edges. Since a node is added to the ordering after all its predecessors are traversed due to post-order DFS, we get a topological sort. """ visited = set() topo_order = [] for node in node_list: topo_sort_dfs(node, visited, topo_order) return topo_order def topo_sort_dfs(node, visited, topo_order): """Post-order DFS""" if node in visited: return visited.add(node) for n in node.inputs: topo_sort_dfs(n, visited, topo_order) topo_order.append(node) def sum_node_list(node_list): """Custom sum func to avoid creating redundant nodes in Python sum func.""" from operator import add from functools import reduce return reduce(add, node_list) # MASKED: broadcast_rule function (lines 571-597)
def broadcast_rule(shape_a, shape_b): """Return output shape of broadcast shape_a, shape_b. e.g. broadcast_rule((3,2), (4,3,2)) returns output_shape = (4,3,2) Check out explanations and more examples at https://docs.scipy.org/doc/numpy-1.10.0/user/basics.broadcasting.html http://eli.thegreenplace.net/2015/broadcasting-arrays-in-numpy/ """ assert(isinstance(shape_a, tuple)) assert(isinstance(shape_b, tuple)) if len(shape_a) > len(shape_b): longer_shape, shorter_shape = shape_a, shape_b else: longer_shape, shorter_shape = shape_b, shape_a len_diff = len(longer_shape) - len(shorter_shape) for i in range(len_diff): # pad with leading 1s shorter_shape = (1,) + shorter_shape assert len(shorter_shape) == len(longer_shape) output_shape = list(longer_shape) for i in range(len(output_shape)): assert (shorter_shape[i] == longer_shape[i]) \ or (shorter_shape[i] == 1) \ or (longer_shape[i] == 1) output_shape[i] = max(shorter_shape[i], longer_shape[i]) return tuple(output_shape)
571
597
""" library to take autodiff and execute a computation graph """ from __future__ import absolute_import import numpy as np from .Node import Op from .. import ndarray from ..stream import * import ctypes import os from pynvml import * FLAG_SHOW_GRAPH = False G_NODE_ID = 0 NAME_RULE = 1 def communicate_init(worker_num, worker_id, source_ip, target_ip): global lib_communicate # lib_communicate.DL_Connect_Init(2, 0, "*:4001", "localhost:4002") # lib_communicate.DL_Connect_Init(2, 1, "*:4002", "localhost:4001") curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) lib_path = os.path.join(curr_path, '../../build/lib/') path_to_so_file = os.path.join(lib_path, "lib_communication.so") lib_communicate = ctypes.cdll.LoadLibrary(path_to_so_file) lib_communicate.DL_Connect_Init( worker_num, worker_id, source_ip, target_ip) def communicate_finish(): lib_communicate.DL_Communicate_Close() class Distributed_CommunicateOp(Op): def __call__(self, nodeA): new_node = Op.__call__(self) new_node.inputs = [nodeA] new_node.name = "Distributed_Communicate(%s)" % (nodeA.name) # print nodeA.name return new_node def compute(self, node, input_vals, output_val, use_numpy=True): after_reduce_gradient_cpu = ndarray.empty( shape=output_val.shape, ctx=ndarray.cpu(0)) if use_numpy: gradient_val_cpu = ndarray.array(input_vals[0], ctx=ndarray.cpu(0)) else: gradient_val_cpu = ndarray.array( input_vals[0].asnumpy(), ctx=ndarray.cpu(0)) # print gradient_val_cpu.asnumpy() lib_communicate.DL_Communicate_Init(gradient_val_cpu.handle) lib_communicate.DL_Communicate( gradient_val_cpu.handle, after_reduce_gradient_cpu.handle) # print after_reduce_gradient_cpu.asnumpy() if use_numpy: output_val[:] = after_reduce_gradient_cpu.asnumpy() else: after_reduce_gradient_cpu.copyto(output_val) def gradient(self, node, output_grad): raise NotImplementedError def infer_shape(self, node, input_shapes): return input_shapes[0] distributed_communicate_op = Distributed_CommunicateOp() class StreamExecutor(object): """Executor computes values for given set of nodes in computation graph.""" def __init__(self, eval_node_list, ctx = None, stream = None, policy = None): """ Parameters ---------- eval_node_list: list of nodes whose values need to be computed. ctx: runtime DLContext, default is None which means np.ndarray on cpu topo_order: list of nodes in topological order node_to_shape_map: dict from node to shape of the node node_to_arr_map: dict from node to ndarray.NDArray allocated for node feed_shapes: shapes of feed_dict from last run(...) """ self.eval_node_list = eval_node_list self.ctx = ctx if stream is None: self.stream = create_stream_handle(ctx) else: self.stream = stream self.stream.sync() self.topo_order = find_topo_sort(self.eval_node_list) self.node_to_shape_map = None self.node_to_arr_map = None self.feed_shapes = None self.policy = policy if self.policy == 'swap': self.swap_queue = [] def infer_shape(self, feed_shapes): """Given shapes of feed_dict nodes, infer shape for all nodes in graph. Implementation note: Iteratively calls node.op.infer_shape to infer shapes. Node shapes stored in self.node_to_shape_map. Parameters ---------- feed_shapes: node->shapes mapping for feed_dict nodes. """ """TODO: Your code here""" self.node_to_shape_map = {} for node in self.topo_order: if node in feed_shapes: self.node_to_shape_map[node] = feed_shapes[node] else: # print(node.name) input_shapes = [self.node_to_shape_map[n] for n in node.inputs] self.node_to_shape_map[node] = node.op.infer_shape( node, input_shapes) def memory_plan(self, feed_shapes): """Allocates ndarray.NDArray for every node except feed_dict nodes. Implementation note: Option 1: Alloc a ndarray.NDArray per node that persists across run() Option 2: Implement a memory pool to reuse memory for nodes of same shapes. More details see Lecture 7. For both options, self.node_to_arr_map stores node->NDArray mapping to allow mapping to persist across multiple executor.run(). Hint: use ndarray.empty(shape, ctx=self.ctx) to allocate NDArray. Parameters ---------- feed_shapes: node->shapes mapping for feed_dict nodes. """ """TODO: Your code here""" assert (self.ctx is not None) # self.infer_shape(feed_shapes) self.node_to_arr_map = {} for node, shape in self.node_to_shape_map.items(): if self.policy == 'swap': if not node.swap: self.node_to_arr_map[node] = ndarray.empty( shape, ctx=self.ctx) elif self.policy == 'vdnn': self.node_to_arr_map[node] = np.empty(shape) else: self.node_to_arr_map[node] = ndarray.empty(shape, ctx=self.ctx) def run(self, feed_dict, convert_to_numpy_ret_vals=False): """ Parameters ---------- feed_dict: a dictionary of node->np.ndarray supplied by user. convert_to_numpy_ret_vals: whether to convert ret vals to np.array Returns ------- A list of values for nodes in eval_node_list. NDArray or np.ndarray. """ def are_feed_shapes_equal(sa, sb): if (not isinstance(sa, dict)) or (not isinstance(sb, dict)): return False unmatched_item = set(sa.items()) ^ set(sb.items()) return len(unmatched_item) == 0 # Assume self.ctx is None implies numpy array and numpy ops. use_numpy = self.ctx is None node_to_val_map = {} for node, value in feed_dict.items(): if use_numpy: # all values passed in feed_dict must be np.ndarray assert isinstance(value, np.ndarray) node_to_val_map[node] = value else: # convert values to ndarray.NDArray if necessary if isinstance(value, np.ndarray): node_to_val_map[node] = ndarray.array(value, ctx=self.ctx) elif isinstance(value, ndarray.NDArray): node_to_val_map[node] = value else: assert False, "feed_dict value type not supported" # print"xxxx" # collect shapes for all placeholders # infer shape if feed_shapes changed since last run # e.g. call run() on test data after trainng # print feed_shapes feed_shapes = {} for node in node_to_val_map: feed_shapes[node] = node_to_val_map[node].shape if(not are_feed_shapes_equal(feed_shapes, self.feed_shapes)): self.infer_shape(feed_shapes) self.feed_shapes = feed_shapes if (not use_numpy): self.memory_plan(self.feed_shapes) for node in self.topo_order: if node in node_to_val_map: continue input_vals = [node_to_val_map[n] for n in node.inputs] if use_numpy: node_val = np.empty(shape=self.node_to_shape_map[node]) else: node_val = self.node_to_arr_map[node] # print(node.name) node.op.compute(node, input_vals, node_val, use_numpy, self.stream) node_to_val_map[node] = node_val self.stream.sync() if not use_numpy and convert_to_numpy_ret_vals: return [node_to_val_map[n].asnumpy() for n in self.eval_node_list] return [node_to_val_map[n] for n in self.eval_node_list] # def run(self, feed_dict, convert_to_numpy_ret_vals=False): # """ # Parameters # ---------- # feed_dict: a dictionary of node->np.ndarray supplied by user. # convert_to_numpy_ret_vals: whether to convert ret vals to np.array # Returns # ------- # A list of values for nodes in eval_node_list. NDArray or np.ndarray. # """ # def are_feed_shapes_equal(sa, sb): # if (not isinstance(sa, dict)) or (not isinstance(sb, dict)): # return False # unmatched_item = set(sa.items()) ^ set(sb.items()) # return len(unmatched_item) == 0 # # Assume self.ctx is None implies numpy array and numpy ops. # use_numpy = self.ctx is None # node_to_val_map = {} # for node, value in feed_dict.items(): # if self.policy == 'vdnn': # assert isinstance(value, np.ndarray) # node_to_val_map[node] = value # else: # if use_numpy: # # all values passed in feed_dict must be np.ndarray # assert isinstance(value, np.ndarray) # node_to_val_map[node] = value # else: # # convert values to ndarray.NDArray if necessary # if isinstance(value, np.ndarray): # if self.policy == 'swap': # if node.swap == True: # node_to_val_map[node] = value # else: # node_to_val_map[node] = ndarray.array(value, ctx=self.ctx) # else: # node_to_val_map[node] = ndarray.array(value, ctx=self.ctx) # elif isinstance(value, ndarray.NDArray): # node_to_val_map[node] = value # else: # assert False, "feed_dict value type not supported" # # collect shapes for all placeholders # feed_shapes = {} # for node in node_to_val_map: # feed_shapes[node] = node_to_val_map[node].shape # # infer shape if feed_shapes changed since last run # # e.g. call run() on test data after trainng # # print feed_shapes # if (not are_feed_shapes_equal(feed_shapes, self.feed_shapes)): # self.infer_shape(feed_shapes) # self.feed_shapes = feed_shapes # if not self.policy == 'vdnn': # # plan memory if using GPU # if (not use_numpy): # self.memory_plan(feed_shapes) # # Traverse graph in topo order and compute values for all nodes. # global FLAG_SHOW_GRAPH # if self.policy == 'swap': # # generate swap queue # if not use_numpy: # for node in self.topo_order: # if node not in node_to_val_map: # # variable in placeholder # for input_node in node.inputs: # if input_node.swap == True: # self.swap_queue.append(input_node) # # variable grad # if node.swap == True: # self.swap_queue.append(node) # node_in_GPU = None # if FLAG_SHOW_GRAPH: # print "Show swap queue:" # for node in self.swap_queue: # print node # elif self.policy == 'vdnn': # # TODO traverse graph to select in-gpu window # window = [0,0] # if not use_numpy: # nvmlInit() # handle = nvmlDeviceGetHandleByIndex(0) # info = nvmlDeviceGetMemoryInfo(handle) # gpu_mem = info.free # nvmlShutdown() # loss_node = self.eval_node_list[0] # window[1] = self.topo_order.index(loss_node)+1 # window[0] = self.topo_order.index(loss_node)+1 # for node in reversed(self.topo_order[:window[1]+1]): # node_size = 4 # float32 # #print node, self.node_to_shape_map[node] # for shape in self.node_to_shape_map[node]: # node_size = node_size * shape # if gpu_mem > node_size: # gpu_mem = gpu_mem - node_size # window[0] = window[0] - 1 # #print "gpu_mem:",gpu_mem # # Traverse graph in topo order and compute values for all nodes. # if FLAG_SHOW_GRAPH: # print "run topo_order" # # Show graph dependency # if FLAG_SHOW_GRAPH: # print "node:",node # print "node.desc:",node.desc # for node in self.topo_order: # if self.policy == 'vdnn': # # Skip placeholder nodes # if node in node_to_val_map: # continue # # H2D before compute # ## Collect inputs # input_vals = [] # for n in node.inputs: # if not use_numpy: # if isinstance(node_to_val_map[n], np.ndarray): # node_to_val_map[n] = ndarray.array(node_to_val_map[n], ctx=self.ctx) # input_vals.append(node_to_val_map[n]) # ## Alloc node space # if use_numpy: # node_val = np.empty(shape=self.node_to_shape_map[node]) # else: # node_val = ndarray.empty(shape=self.node_to_shape_map[node], ctx=self.ctx) # # Compute # # node_val is modified in-place whether np.ndarray or NDArray # node.op.compute(node, input_vals, node_val, use_numpy) # # D2H after compute # if use_numpy: # node_to_val_map[node] = node_val # else: # node_index = self.topo_order.index(node) # if node_index > window[0] and node_index < window[1]: # node_to_val_map[node] = node_val # continue # node_to_val_map[node] = node_val.asnumpy() # del node_val # for n in node.inputs: # if isinstance(node_to_val_map[n], ndarray.NDArray): # tmp_val = node_to_val_map[n].asnumpy() # del node_to_val_map[n] # node_to_val_map[n] = tmp_val # elif self.policy == 'swap': # # Switch in GPU # if not use_numpy: # if self.swap_queue and (node_in_GPU==None): # swap_node = self.swap_queue[0] # if swap_node in node_to_val_map: # node_to_val_map[swap_node] = ndarray.array(node_to_val_map[swap_node], ctx=self.ctx) # else: # self.node_to_arr_map[swap_node] = ndarray.empty(self.node_to_shape_map[swap_node], ctx=self.ctx) # node_in_GPU = swap_node.id # if node in node_to_val_map: # # Skip placeholder nodes. Values already provided by feed_dict. # continue # # Compute # input_vals = [node_to_val_map[n] for n in node.inputs] # if use_numpy: # node_val = np.empty(shape=self.node_to_shape_map[node]) # else: # node_val = self.node_to_arr_map[node] # # node_val is modified in-place whether np.ndarray or NDArray # node.op.compute(node, input_vals, node_val, use_numpy) # if node.swap == True: # node_to_val_map[node] = node_val.asnumpy() # del node_val # del self.node_to_arr_map[node] # del self.swap_queue[0] # node_in_GPU = None # else: # node_to_val_map[node] = node_val # # Switch out GPU # if not use_numpy: # if self.swap_queue: # if self.swap_queue[0] in node.inputs: # out_node = self.swap_queue.pop(0) # if self.swap_queue: # if not self.swap_queue[0].id == node_in_GPU: # tmp_array = node_to_val_map[out_node].asnumpy() # del node_to_val_map[out_node] # node_to_val_map[out_node] = tmp_array # node_in_GPU = None # else: # if node in node_to_val_map: # # Skip placeholder nodes. Values already provided by feed_dict. # continue # input_vals = [node_to_val_map[n] for n in node.inputs] # # print self.node_to_shape_map[node] # if use_numpy: # node_val = np.empty(shape=self.node_to_shape_map[node]) # else: # node_val = self.node_to_arr_map[node] # # node_val is modified in-place whether np.ndarray or NDArray # # if (len(node.inputs) == 1): # # print "computs",node.inputs[0].name # # else: # # print "computs",node.inputs[0].name,node.inputs[1].name # # print node.name # # print node_val.shape # # print "xxx" # # print node.name # node.op.compute(node, input_vals, node_val, use_numpy) # # print "xxx" # node_to_val_map[node] = node_val # # print "xxx" # if FLAG_SHOW_GRAPH: # FLAG_SHOW_GRAPH = False # # Collect node values. # if not use_numpy and convert_to_numpy_ret_vals: # if self.policy == 'swap': # node_values = [] # for n in self.eval_node_list: # if n.swap == True: # node_values.append(node_to_val_map[n]) # else: # node_values.append(node_to_val_map[n].asnumpy()) # return node_values # elif self.policy == 'vdnn': # return [node_to_val_map[n] for n in self.eval_node_list] # else: # return [node_to_val_map[n].asnumpy() for n in self.eval_node_list] # return [node_to_val_map[n] for n in self.eval_node_list] def gradients(output_node, node_list, scheduler_policy=None): """Take gradient of output node with respect to each node in node_list. Parameters ---------- output_node: output node that we are taking derivative of. node_list: list of nodes that we are taking derivative wrt. Returns ------- A list of gradient values, one for each node in node_list respectively. """ from . import OnesLike node_to_output_grads_list = {} node_to_output_grads_list[output_node] = [ OnesLike.oneslike_op(output_node)] node_to_output_grad = {} # Traverse forward graph in reverse topological order reverse_topo_order = reversed(find_topo_sort([output_node])) for node in reverse_topo_order: output_grad = sum_node_list(node_to_output_grads_list[node]) node_to_output_grad[node] = output_grad input_grads_list = node.op.gradient(node, output_grad) #print len(node.name) #print len(node.inputs) #raw_input("\n\nPress the enter key to exit.") for i in range(len(node.inputs)): if node.inputs[i] not in node_to_output_grads_list: node_to_output_grads_list[node.inputs[i]] = [] # Calculate partial adjoint for input nodes. # print node.name node_to_output_grads_list[node.inputs[i]].append( input_grads_list[i]) if scheduler_policy == 'swap': for node in node_list: if node.swap: node_to_output_grad[node].swap = True grad_node_list = [node_to_output_grad[node] for node in node_list] # grad_node_list = [distributed_communicate_op(node_to_output_grad[node]) for node in node_list] return grad_node_list def distributed_gradients(output_node, node_list, scheduler_policy=None): """Take gradient of output node with respect to each node in node_list. Parameters ---------- output_node: output node that we are taking derivative of. node_list: list of nodes that we are taking derivative wrt. Returns ------- A list of gradient values, one for each node in node_list respectively. """ from .OnesLike import oneslike_op node_to_output_grads_list = {} node_to_output_grads_list[output_node] = [oneslike_op(output_node)] node_to_output_grad = {} # Traverse forward graph in reverse topological order reverse_topo_order = reversed(find_topo_sort([output_node])) for node in reverse_topo_order: output_grad = sum_node_list(node_to_output_grads_list[node]) node_to_output_grad[node] = output_grad input_grads_list = node.op.gradient(node, output_grad) #print len(node.name) #print len(node.inputs) #raw_input("\n\nPress the enter key to exit.") for i in range(len(node.inputs)): if node.inputs[i] not in node_to_output_grads_list: node_to_output_grads_list[node.inputs[i]] = [] # Calculate partial adjoint for input nodes. node_to_output_grads_list[node.inputs[i]].append( input_grads_list[i]) if scheduler_policy == 'swap': for node in node_list: if node.swap: node_to_output_grad[node].swap = True # grad_node_list = [node_to_output_grad[node] for node in node_list] grad_node_list = [distributed_communicate_op( node_to_output_grad[node]) for node in node_list] return grad_node_list ################## # Helper Methods # ################## def find_topo_sort(node_list): """Given a list of nodes, return a topo ordering of nodes ending in them. A simple algorithm is to do a post-order DFS traversal on the given nodes, going backwards based on input edges. Since a node is added to the ordering after all its predecessors are traversed due to post-order DFS, we get a topological sort. """ visited = set() topo_order = [] for node in node_list: topo_sort_dfs(node, visited, topo_order) return topo_order def topo_sort_dfs(node, visited, topo_order): """Post-order DFS""" if node in visited: return visited.add(node) for n in node.inputs: topo_sort_dfs(n, visited, topo_order) topo_order.append(node) def sum_node_list(node_list): """Custom sum func to avoid creating redundant nodes in Python sum func.""" from operator import add from functools import reduce return reduce(add, node_list) def broadcast_rule(shape_a, shape_b): """Return output shape of broadcast shape_a, shape_b. e.g. broadcast_rule((3,2), (4,3,2)) returns output_shape = (4,3,2) Check out explanations and more examples at https://docs.scipy.org/doc/numpy-1.10.0/user/basics.broadcasting.html http://eli.thegreenplace.net/2015/broadcasting-arrays-in-numpy/ """ assert(isinstance(shape_a, tuple)) assert(isinstance(shape_b, tuple)) if len(shape_a) > len(shape_b): longer_shape, shorter_shape = shape_a, shape_b else: longer_shape, shorter_shape = shape_b, shape_a len_diff = len(longer_shape) - len(shorter_shape) for i in range(len_diff): # pad with leading 1s shorter_shape = (1,) + shorter_shape assert len(shorter_shape) == len(longer_shape) output_shape = list(longer_shape) for i in range(len(output_shape)): assert (shorter_shape[i] == longer_shape[i]) \ or (shorter_shape[i] == 1) \ or (longer_shape[i] == 1) output_shape[i] = max(shorter_shape[i], longer_shape[i]) return tuple(output_shape)
infer_shape
Given shapes of feed_dict nodes, infer shape for all nodes in graph. Implementation note: Iteratively calls node.op.infer_shape to infer shapes. Node shapes stored in self.node_to_shape_map. Parameters ---------- feed_shapes: node->shapes mapping for feed_dict nodes.
""" library to take autodiff and execute a computation graph """ from __future__ import absolute_import import numpy as np from .Node import Op from .. import ndarray from ..stream import * import ctypes import os from pynvml import * FLAG_SHOW_GRAPH = False G_NODE_ID = 0 NAME_RULE = 1 def communicate_init(worker_num, worker_id, source_ip, target_ip): global lib_communicate # lib_communicate.DL_Connect_Init(2, 0, "*:4001", "localhost:4002") # lib_communicate.DL_Connect_Init(2, 1, "*:4002", "localhost:4001") curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) lib_path = os.path.join(curr_path, '../../build/lib/') path_to_so_file = os.path.join(lib_path, "lib_communication.so") lib_communicate = ctypes.cdll.LoadLibrary(path_to_so_file) lib_communicate.DL_Connect_Init( worker_num, worker_id, source_ip, target_ip) def communicate_finish(): lib_communicate.DL_Communicate_Close() class Distributed_CommunicateOp(Op): def __call__(self, nodeA): new_node = Op.__call__(self) new_node.inputs = [nodeA] new_node.name = "Distributed_Communicate(%s)" % (nodeA.name) # print nodeA.name return new_node def compute(self, node, input_vals, output_val, use_numpy=True): after_reduce_gradient_cpu = ndarray.empty( shape=output_val.shape, ctx=ndarray.cpu(0)) if use_numpy: gradient_val_cpu = ndarray.array(input_vals[0], ctx=ndarray.cpu(0)) else: gradient_val_cpu = ndarray.array( input_vals[0].asnumpy(), ctx=ndarray.cpu(0)) # print gradient_val_cpu.asnumpy() lib_communicate.DL_Communicate_Init(gradient_val_cpu.handle) lib_communicate.DL_Communicate( gradient_val_cpu.handle, after_reduce_gradient_cpu.handle) # print after_reduce_gradient_cpu.asnumpy() if use_numpy: output_val[:] = after_reduce_gradient_cpu.asnumpy() else: after_reduce_gradient_cpu.copyto(output_val) def gradient(self, node, output_grad): raise NotImplementedError def infer_shape(self, node, input_shapes): return input_shapes[0] distributed_communicate_op = Distributed_CommunicateOp() class StreamExecutor(object): """Executor computes values for given set of nodes in computation graph.""" def __init__(self, eval_node_list, ctx = None, stream = None, policy = None): """ Parameters ---------- eval_node_list: list of nodes whose values need to be computed. ctx: runtime DLContext, default is None which means np.ndarray on cpu topo_order: list of nodes in topological order node_to_shape_map: dict from node to shape of the node node_to_arr_map: dict from node to ndarray.NDArray allocated for node feed_shapes: shapes of feed_dict from last run(...) """ self.eval_node_list = eval_node_list self.ctx = ctx if stream is None: self.stream = create_stream_handle(ctx) else: self.stream = stream self.stream.sync() self.topo_order = find_topo_sort(self.eval_node_list) self.node_to_shape_map = None self.node_to_arr_map = None self.feed_shapes = None self.policy = policy if self.policy == 'swap': self.swap_queue = [] # MASKED: infer_shape function (lines 97-118) def memory_plan(self, feed_shapes): """Allocates ndarray.NDArray for every node except feed_dict nodes. Implementation note: Option 1: Alloc a ndarray.NDArray per node that persists across run() Option 2: Implement a memory pool to reuse memory for nodes of same shapes. More details see Lecture 7. For both options, self.node_to_arr_map stores node->NDArray mapping to allow mapping to persist across multiple executor.run(). Hint: use ndarray.empty(shape, ctx=self.ctx) to allocate NDArray. Parameters ---------- feed_shapes: node->shapes mapping for feed_dict nodes. """ """TODO: Your code here""" assert (self.ctx is not None) # self.infer_shape(feed_shapes) self.node_to_arr_map = {} for node, shape in self.node_to_shape_map.items(): if self.policy == 'swap': if not node.swap: self.node_to_arr_map[node] = ndarray.empty( shape, ctx=self.ctx) elif self.policy == 'vdnn': self.node_to_arr_map[node] = np.empty(shape) else: self.node_to_arr_map[node] = ndarray.empty(shape, ctx=self.ctx) def run(self, feed_dict, convert_to_numpy_ret_vals=False): """ Parameters ---------- feed_dict: a dictionary of node->np.ndarray supplied by user. convert_to_numpy_ret_vals: whether to convert ret vals to np.array Returns ------- A list of values for nodes in eval_node_list. NDArray or np.ndarray. """ def are_feed_shapes_equal(sa, sb): if (not isinstance(sa, dict)) or (not isinstance(sb, dict)): return False unmatched_item = set(sa.items()) ^ set(sb.items()) return len(unmatched_item) == 0 # Assume self.ctx is None implies numpy array and numpy ops. use_numpy = self.ctx is None node_to_val_map = {} for node, value in feed_dict.items(): if use_numpy: # all values passed in feed_dict must be np.ndarray assert isinstance(value, np.ndarray) node_to_val_map[node] = value else: # convert values to ndarray.NDArray if necessary if isinstance(value, np.ndarray): node_to_val_map[node] = ndarray.array(value, ctx=self.ctx) elif isinstance(value, ndarray.NDArray): node_to_val_map[node] = value else: assert False, "feed_dict value type not supported" # print"xxxx" # collect shapes for all placeholders # infer shape if feed_shapes changed since last run # e.g. call run() on test data after trainng # print feed_shapes feed_shapes = {} for node in node_to_val_map: feed_shapes[node] = node_to_val_map[node].shape if(not are_feed_shapes_equal(feed_shapes, self.feed_shapes)): self.infer_shape(feed_shapes) self.feed_shapes = feed_shapes if (not use_numpy): self.memory_plan(self.feed_shapes) for node in self.topo_order: if node in node_to_val_map: continue input_vals = [node_to_val_map[n] for n in node.inputs] if use_numpy: node_val = np.empty(shape=self.node_to_shape_map[node]) else: node_val = self.node_to_arr_map[node] # print(node.name) node.op.compute(node, input_vals, node_val, use_numpy, self.stream) node_to_val_map[node] = node_val self.stream.sync() if not use_numpy and convert_to_numpy_ret_vals: return [node_to_val_map[n].asnumpy() for n in self.eval_node_list] return [node_to_val_map[n] for n in self.eval_node_list] # def run(self, feed_dict, convert_to_numpy_ret_vals=False): # """ # Parameters # ---------- # feed_dict: a dictionary of node->np.ndarray supplied by user. # convert_to_numpy_ret_vals: whether to convert ret vals to np.array # Returns # ------- # A list of values for nodes in eval_node_list. NDArray or np.ndarray. # """ # def are_feed_shapes_equal(sa, sb): # if (not isinstance(sa, dict)) or (not isinstance(sb, dict)): # return False # unmatched_item = set(sa.items()) ^ set(sb.items()) # return len(unmatched_item) == 0 # # Assume self.ctx is None implies numpy array and numpy ops. # use_numpy = self.ctx is None # node_to_val_map = {} # for node, value in feed_dict.items(): # if self.policy == 'vdnn': # assert isinstance(value, np.ndarray) # node_to_val_map[node] = value # else: # if use_numpy: # # all values passed in feed_dict must be np.ndarray # assert isinstance(value, np.ndarray) # node_to_val_map[node] = value # else: # # convert values to ndarray.NDArray if necessary # if isinstance(value, np.ndarray): # if self.policy == 'swap': # if node.swap == True: # node_to_val_map[node] = value # else: # node_to_val_map[node] = ndarray.array(value, ctx=self.ctx) # else: # node_to_val_map[node] = ndarray.array(value, ctx=self.ctx) # elif isinstance(value, ndarray.NDArray): # node_to_val_map[node] = value # else: # assert False, "feed_dict value type not supported" # # collect shapes for all placeholders # feed_shapes = {} # for node in node_to_val_map: # feed_shapes[node] = node_to_val_map[node].shape # # infer shape if feed_shapes changed since last run # # e.g. call run() on test data after trainng # # print feed_shapes # if (not are_feed_shapes_equal(feed_shapes, self.feed_shapes)): # self.infer_shape(feed_shapes) # self.feed_shapes = feed_shapes # if not self.policy == 'vdnn': # # plan memory if using GPU # if (not use_numpy): # self.memory_plan(feed_shapes) # # Traverse graph in topo order and compute values for all nodes. # global FLAG_SHOW_GRAPH # if self.policy == 'swap': # # generate swap queue # if not use_numpy: # for node in self.topo_order: # if node not in node_to_val_map: # # variable in placeholder # for input_node in node.inputs: # if input_node.swap == True: # self.swap_queue.append(input_node) # # variable grad # if node.swap == True: # self.swap_queue.append(node) # node_in_GPU = None # if FLAG_SHOW_GRAPH: # print "Show swap queue:" # for node in self.swap_queue: # print node # elif self.policy == 'vdnn': # # TODO traverse graph to select in-gpu window # window = [0,0] # if not use_numpy: # nvmlInit() # handle = nvmlDeviceGetHandleByIndex(0) # info = nvmlDeviceGetMemoryInfo(handle) # gpu_mem = info.free # nvmlShutdown() # loss_node = self.eval_node_list[0] # window[1] = self.topo_order.index(loss_node)+1 # window[0] = self.topo_order.index(loss_node)+1 # for node in reversed(self.topo_order[:window[1]+1]): # node_size = 4 # float32 # #print node, self.node_to_shape_map[node] # for shape in self.node_to_shape_map[node]: # node_size = node_size * shape # if gpu_mem > node_size: # gpu_mem = gpu_mem - node_size # window[0] = window[0] - 1 # #print "gpu_mem:",gpu_mem # # Traverse graph in topo order and compute values for all nodes. # if FLAG_SHOW_GRAPH: # print "run topo_order" # # Show graph dependency # if FLAG_SHOW_GRAPH: # print "node:",node # print "node.desc:",node.desc # for node in self.topo_order: # if self.policy == 'vdnn': # # Skip placeholder nodes # if node in node_to_val_map: # continue # # H2D before compute # ## Collect inputs # input_vals = [] # for n in node.inputs: # if not use_numpy: # if isinstance(node_to_val_map[n], np.ndarray): # node_to_val_map[n] = ndarray.array(node_to_val_map[n], ctx=self.ctx) # input_vals.append(node_to_val_map[n]) # ## Alloc node space # if use_numpy: # node_val = np.empty(shape=self.node_to_shape_map[node]) # else: # node_val = ndarray.empty(shape=self.node_to_shape_map[node], ctx=self.ctx) # # Compute # # node_val is modified in-place whether np.ndarray or NDArray # node.op.compute(node, input_vals, node_val, use_numpy) # # D2H after compute # if use_numpy: # node_to_val_map[node] = node_val # else: # node_index = self.topo_order.index(node) # if node_index > window[0] and node_index < window[1]: # node_to_val_map[node] = node_val # continue # node_to_val_map[node] = node_val.asnumpy() # del node_val # for n in node.inputs: # if isinstance(node_to_val_map[n], ndarray.NDArray): # tmp_val = node_to_val_map[n].asnumpy() # del node_to_val_map[n] # node_to_val_map[n] = tmp_val # elif self.policy == 'swap': # # Switch in GPU # if not use_numpy: # if self.swap_queue and (node_in_GPU==None): # swap_node = self.swap_queue[0] # if swap_node in node_to_val_map: # node_to_val_map[swap_node] = ndarray.array(node_to_val_map[swap_node], ctx=self.ctx) # else: # self.node_to_arr_map[swap_node] = ndarray.empty(self.node_to_shape_map[swap_node], ctx=self.ctx) # node_in_GPU = swap_node.id # if node in node_to_val_map: # # Skip placeholder nodes. Values already provided by feed_dict. # continue # # Compute # input_vals = [node_to_val_map[n] for n in node.inputs] # if use_numpy: # node_val = np.empty(shape=self.node_to_shape_map[node]) # else: # node_val = self.node_to_arr_map[node] # # node_val is modified in-place whether np.ndarray or NDArray # node.op.compute(node, input_vals, node_val, use_numpy) # if node.swap == True: # node_to_val_map[node] = node_val.asnumpy() # del node_val # del self.node_to_arr_map[node] # del self.swap_queue[0] # node_in_GPU = None # else: # node_to_val_map[node] = node_val # # Switch out GPU # if not use_numpy: # if self.swap_queue: # if self.swap_queue[0] in node.inputs: # out_node = self.swap_queue.pop(0) # if self.swap_queue: # if not self.swap_queue[0].id == node_in_GPU: # tmp_array = node_to_val_map[out_node].asnumpy() # del node_to_val_map[out_node] # node_to_val_map[out_node] = tmp_array # node_in_GPU = None # else: # if node in node_to_val_map: # # Skip placeholder nodes. Values already provided by feed_dict. # continue # input_vals = [node_to_val_map[n] for n in node.inputs] # # print self.node_to_shape_map[node] # if use_numpy: # node_val = np.empty(shape=self.node_to_shape_map[node]) # else: # node_val = self.node_to_arr_map[node] # # node_val is modified in-place whether np.ndarray or NDArray # # if (len(node.inputs) == 1): # # print "computs",node.inputs[0].name # # else: # # print "computs",node.inputs[0].name,node.inputs[1].name # # print node.name # # print node_val.shape # # print "xxx" # # print node.name # node.op.compute(node, input_vals, node_val, use_numpy) # # print "xxx" # node_to_val_map[node] = node_val # # print "xxx" # if FLAG_SHOW_GRAPH: # FLAG_SHOW_GRAPH = False # # Collect node values. # if not use_numpy and convert_to_numpy_ret_vals: # if self.policy == 'swap': # node_values = [] # for n in self.eval_node_list: # if n.swap == True: # node_values.append(node_to_val_map[n]) # else: # node_values.append(node_to_val_map[n].asnumpy()) # return node_values # elif self.policy == 'vdnn': # return [node_to_val_map[n] for n in self.eval_node_list] # else: # return [node_to_val_map[n].asnumpy() for n in self.eval_node_list] # return [node_to_val_map[n] for n in self.eval_node_list] def gradients(output_node, node_list, scheduler_policy=None): """Take gradient of output node with respect to each node in node_list. Parameters ---------- output_node: output node that we are taking derivative of. node_list: list of nodes that we are taking derivative wrt. Returns ------- A list of gradient values, one for each node in node_list respectively. """ from . import OnesLike node_to_output_grads_list = {} node_to_output_grads_list[output_node] = [ OnesLike.oneslike_op(output_node)] node_to_output_grad = {} # Traverse forward graph in reverse topological order reverse_topo_order = reversed(find_topo_sort([output_node])) for node in reverse_topo_order: output_grad = sum_node_list(node_to_output_grads_list[node]) node_to_output_grad[node] = output_grad input_grads_list = node.op.gradient(node, output_grad) #print len(node.name) #print len(node.inputs) #raw_input("\n\nPress the enter key to exit.") for i in range(len(node.inputs)): if node.inputs[i] not in node_to_output_grads_list: node_to_output_grads_list[node.inputs[i]] = [] # Calculate partial adjoint for input nodes. # print node.name node_to_output_grads_list[node.inputs[i]].append( input_grads_list[i]) if scheduler_policy == 'swap': for node in node_list: if node.swap: node_to_output_grad[node].swap = True grad_node_list = [node_to_output_grad[node] for node in node_list] # grad_node_list = [distributed_communicate_op(node_to_output_grad[node]) for node in node_list] return grad_node_list def distributed_gradients(output_node, node_list, scheduler_policy=None): """Take gradient of output node with respect to each node in node_list. Parameters ---------- output_node: output node that we are taking derivative of. node_list: list of nodes that we are taking derivative wrt. Returns ------- A list of gradient values, one for each node in node_list respectively. """ from .OnesLike import oneslike_op node_to_output_grads_list = {} node_to_output_grads_list[output_node] = [oneslike_op(output_node)] node_to_output_grad = {} # Traverse forward graph in reverse topological order reverse_topo_order = reversed(find_topo_sort([output_node])) for node in reverse_topo_order: output_grad = sum_node_list(node_to_output_grads_list[node]) node_to_output_grad[node] = output_grad input_grads_list = node.op.gradient(node, output_grad) #print len(node.name) #print len(node.inputs) #raw_input("\n\nPress the enter key to exit.") for i in range(len(node.inputs)): if node.inputs[i] not in node_to_output_grads_list: node_to_output_grads_list[node.inputs[i]] = [] # Calculate partial adjoint for input nodes. node_to_output_grads_list[node.inputs[i]].append( input_grads_list[i]) if scheduler_policy == 'swap': for node in node_list: if node.swap: node_to_output_grad[node].swap = True # grad_node_list = [node_to_output_grad[node] for node in node_list] grad_node_list = [distributed_communicate_op( node_to_output_grad[node]) for node in node_list] return grad_node_list ################## # Helper Methods # ################## def find_topo_sort(node_list): """Given a list of nodes, return a topo ordering of nodes ending in them. A simple algorithm is to do a post-order DFS traversal on the given nodes, going backwards based on input edges. Since a node is added to the ordering after all its predecessors are traversed due to post-order DFS, we get a topological sort. """ visited = set() topo_order = [] for node in node_list: topo_sort_dfs(node, visited, topo_order) return topo_order def topo_sort_dfs(node, visited, topo_order): """Post-order DFS""" if node in visited: return visited.add(node) for n in node.inputs: topo_sort_dfs(n, visited, topo_order) topo_order.append(node) def sum_node_list(node_list): """Custom sum func to avoid creating redundant nodes in Python sum func.""" from operator import add from functools import reduce return reduce(add, node_list) def broadcast_rule(shape_a, shape_b): """Return output shape of broadcast shape_a, shape_b. e.g. broadcast_rule((3,2), (4,3,2)) returns output_shape = (4,3,2) Check out explanations and more examples at https://docs.scipy.org/doc/numpy-1.10.0/user/basics.broadcasting.html http://eli.thegreenplace.net/2015/broadcasting-arrays-in-numpy/ """ assert(isinstance(shape_a, tuple)) assert(isinstance(shape_b, tuple)) if len(shape_a) > len(shape_b): longer_shape, shorter_shape = shape_a, shape_b else: longer_shape, shorter_shape = shape_b, shape_a len_diff = len(longer_shape) - len(shorter_shape) for i in range(len_diff): # pad with leading 1s shorter_shape = (1,) + shorter_shape assert len(shorter_shape) == len(longer_shape) output_shape = list(longer_shape) for i in range(len(output_shape)): assert (shorter_shape[i] == longer_shape[i]) \ or (shorter_shape[i] == 1) \ or (longer_shape[i] == 1) output_shape[i] = max(shorter_shape[i], longer_shape[i]) return tuple(output_shape)
def infer_shape(self, feed_shapes): """Given shapes of feed_dict nodes, infer shape for all nodes in graph. Implementation note: Iteratively calls node.op.infer_shape to infer shapes. Node shapes stored in self.node_to_shape_map. Parameters ---------- feed_shapes: node->shapes mapping for feed_dict nodes. """ """TODO: Your code here""" self.node_to_shape_map = {} for node in self.topo_order: if node in feed_shapes: self.node_to_shape_map[node] = feed_shapes[node] else: # print(node.name) input_shapes = [self.node_to_shape_map[n] for n in node.inputs] self.node_to_shape_map[node] = node.op.infer_shape( node, input_shapes)
97
118
""" library to take autodiff and execute a computation graph """ from __future__ import absolute_import import numpy as np from .Node import Op from .. import ndarray from ..stream import * import ctypes import os from pynvml import * FLAG_SHOW_GRAPH = False G_NODE_ID = 0 NAME_RULE = 1 def communicate_init(worker_num, worker_id, source_ip, target_ip): global lib_communicate # lib_communicate.DL_Connect_Init(2, 0, "*:4001", "localhost:4002") # lib_communicate.DL_Connect_Init(2, 1, "*:4002", "localhost:4001") curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) lib_path = os.path.join(curr_path, '../../build/lib/') path_to_so_file = os.path.join(lib_path, "lib_communication.so") lib_communicate = ctypes.cdll.LoadLibrary(path_to_so_file) lib_communicate.DL_Connect_Init( worker_num, worker_id, source_ip, target_ip) def communicate_finish(): lib_communicate.DL_Communicate_Close() class Distributed_CommunicateOp(Op): def __call__(self, nodeA): new_node = Op.__call__(self) new_node.inputs = [nodeA] new_node.name = "Distributed_Communicate(%s)" % (nodeA.name) # print nodeA.name return new_node def compute(self, node, input_vals, output_val, use_numpy=True): after_reduce_gradient_cpu = ndarray.empty( shape=output_val.shape, ctx=ndarray.cpu(0)) if use_numpy: gradient_val_cpu = ndarray.array(input_vals[0], ctx=ndarray.cpu(0)) else: gradient_val_cpu = ndarray.array( input_vals[0].asnumpy(), ctx=ndarray.cpu(0)) # print gradient_val_cpu.asnumpy() lib_communicate.DL_Communicate_Init(gradient_val_cpu.handle) lib_communicate.DL_Communicate( gradient_val_cpu.handle, after_reduce_gradient_cpu.handle) # print after_reduce_gradient_cpu.asnumpy() if use_numpy: output_val[:] = after_reduce_gradient_cpu.asnumpy() else: after_reduce_gradient_cpu.copyto(output_val) def gradient(self, node, output_grad): raise NotImplementedError def infer_shape(self, node, input_shapes): return input_shapes[0] distributed_communicate_op = Distributed_CommunicateOp() class StreamExecutor(object): """Executor computes values for given set of nodes in computation graph.""" def __init__(self, eval_node_list, ctx = None, stream = None, policy = None): """ Parameters ---------- eval_node_list: list of nodes whose values need to be computed. ctx: runtime DLContext, default is None which means np.ndarray on cpu topo_order: list of nodes in topological order node_to_shape_map: dict from node to shape of the node node_to_arr_map: dict from node to ndarray.NDArray allocated for node feed_shapes: shapes of feed_dict from last run(...) """ self.eval_node_list = eval_node_list self.ctx = ctx if stream is None: self.stream = create_stream_handle(ctx) else: self.stream = stream self.stream.sync() self.topo_order = find_topo_sort(self.eval_node_list) self.node_to_shape_map = None self.node_to_arr_map = None self.feed_shapes = None self.policy = policy if self.policy == 'swap': self.swap_queue = [] def infer_shape(self, feed_shapes): """Given shapes of feed_dict nodes, infer shape for all nodes in graph. Implementation note: Iteratively calls node.op.infer_shape to infer shapes. Node shapes stored in self.node_to_shape_map. Parameters ---------- feed_shapes: node->shapes mapping for feed_dict nodes. """ """TODO: Your code here""" self.node_to_shape_map = {} for node in self.topo_order: if node in feed_shapes: self.node_to_shape_map[node] = feed_shapes[node] else: # print(node.name) input_shapes = [self.node_to_shape_map[n] for n in node.inputs] self.node_to_shape_map[node] = node.op.infer_shape( node, input_shapes) def memory_plan(self, feed_shapes): """Allocates ndarray.NDArray for every node except feed_dict nodes. Implementation note: Option 1: Alloc a ndarray.NDArray per node that persists across run() Option 2: Implement a memory pool to reuse memory for nodes of same shapes. More details see Lecture 7. For both options, self.node_to_arr_map stores node->NDArray mapping to allow mapping to persist across multiple executor.run(). Hint: use ndarray.empty(shape, ctx=self.ctx) to allocate NDArray. Parameters ---------- feed_shapes: node->shapes mapping for feed_dict nodes. """ """TODO: Your code here""" assert (self.ctx is not None) # self.infer_shape(feed_shapes) self.node_to_arr_map = {} for node, shape in self.node_to_shape_map.items(): if self.policy == 'swap': if not node.swap: self.node_to_arr_map[node] = ndarray.empty( shape, ctx=self.ctx) elif self.policy == 'vdnn': self.node_to_arr_map[node] = np.empty(shape) else: self.node_to_arr_map[node] = ndarray.empty(shape, ctx=self.ctx) def run(self, feed_dict, convert_to_numpy_ret_vals=False): """ Parameters ---------- feed_dict: a dictionary of node->np.ndarray supplied by user. convert_to_numpy_ret_vals: whether to convert ret vals to np.array Returns ------- A list of values for nodes in eval_node_list. NDArray or np.ndarray. """ def are_feed_shapes_equal(sa, sb): if (not isinstance(sa, dict)) or (not isinstance(sb, dict)): return False unmatched_item = set(sa.items()) ^ set(sb.items()) return len(unmatched_item) == 0 # Assume self.ctx is None implies numpy array and numpy ops. use_numpy = self.ctx is None node_to_val_map = {} for node, value in feed_dict.items(): if use_numpy: # all values passed in feed_dict must be np.ndarray assert isinstance(value, np.ndarray) node_to_val_map[node] = value else: # convert values to ndarray.NDArray if necessary if isinstance(value, np.ndarray): node_to_val_map[node] = ndarray.array(value, ctx=self.ctx) elif isinstance(value, ndarray.NDArray): node_to_val_map[node] = value else: assert False, "feed_dict value type not supported" # print"xxxx" # collect shapes for all placeholders # infer shape if feed_shapes changed since last run # e.g. call run() on test data after trainng # print feed_shapes feed_shapes = {} for node in node_to_val_map: feed_shapes[node] = node_to_val_map[node].shape if(not are_feed_shapes_equal(feed_shapes, self.feed_shapes)): self.infer_shape(feed_shapes) self.feed_shapes = feed_shapes if (not use_numpy): self.memory_plan(self.feed_shapes) for node in self.topo_order: if node in node_to_val_map: continue input_vals = [node_to_val_map[n] for n in node.inputs] if use_numpy: node_val = np.empty(shape=self.node_to_shape_map[node]) else: node_val = self.node_to_arr_map[node] # print(node.name) node.op.compute(node, input_vals, node_val, use_numpy, self.stream) node_to_val_map[node] = node_val self.stream.sync() if not use_numpy and convert_to_numpy_ret_vals: return [node_to_val_map[n].asnumpy() for n in self.eval_node_list] return [node_to_val_map[n] for n in self.eval_node_list] # def run(self, feed_dict, convert_to_numpy_ret_vals=False): # """ # Parameters # ---------- # feed_dict: a dictionary of node->np.ndarray supplied by user. # convert_to_numpy_ret_vals: whether to convert ret vals to np.array # Returns # ------- # A list of values for nodes in eval_node_list. NDArray or np.ndarray. # """ # def are_feed_shapes_equal(sa, sb): # if (not isinstance(sa, dict)) or (not isinstance(sb, dict)): # return False # unmatched_item = set(sa.items()) ^ set(sb.items()) # return len(unmatched_item) == 0 # # Assume self.ctx is None implies numpy array and numpy ops. # use_numpy = self.ctx is None # node_to_val_map = {} # for node, value in feed_dict.items(): # if self.policy == 'vdnn': # assert isinstance(value, np.ndarray) # node_to_val_map[node] = value # else: # if use_numpy: # # all values passed in feed_dict must be np.ndarray # assert isinstance(value, np.ndarray) # node_to_val_map[node] = value # else: # # convert values to ndarray.NDArray if necessary # if isinstance(value, np.ndarray): # if self.policy == 'swap': # if node.swap == True: # node_to_val_map[node] = value # else: # node_to_val_map[node] = ndarray.array(value, ctx=self.ctx) # else: # node_to_val_map[node] = ndarray.array(value, ctx=self.ctx) # elif isinstance(value, ndarray.NDArray): # node_to_val_map[node] = value # else: # assert False, "feed_dict value type not supported" # # collect shapes for all placeholders # feed_shapes = {} # for node in node_to_val_map: # feed_shapes[node] = node_to_val_map[node].shape # # infer shape if feed_shapes changed since last run # # e.g. call run() on test data after trainng # # print feed_shapes # if (not are_feed_shapes_equal(feed_shapes, self.feed_shapes)): # self.infer_shape(feed_shapes) # self.feed_shapes = feed_shapes # if not self.policy == 'vdnn': # # plan memory if using GPU # if (not use_numpy): # self.memory_plan(feed_shapes) # # Traverse graph in topo order and compute values for all nodes. # global FLAG_SHOW_GRAPH # if self.policy == 'swap': # # generate swap queue # if not use_numpy: # for node in self.topo_order: # if node not in node_to_val_map: # # variable in placeholder # for input_node in node.inputs: # if input_node.swap == True: # self.swap_queue.append(input_node) # # variable grad # if node.swap == True: # self.swap_queue.append(node) # node_in_GPU = None # if FLAG_SHOW_GRAPH: # print "Show swap queue:" # for node in self.swap_queue: # print node # elif self.policy == 'vdnn': # # TODO traverse graph to select in-gpu window # window = [0,0] # if not use_numpy: # nvmlInit() # handle = nvmlDeviceGetHandleByIndex(0) # info = nvmlDeviceGetMemoryInfo(handle) # gpu_mem = info.free # nvmlShutdown() # loss_node = self.eval_node_list[0] # window[1] = self.topo_order.index(loss_node)+1 # window[0] = self.topo_order.index(loss_node)+1 # for node in reversed(self.topo_order[:window[1]+1]): # node_size = 4 # float32 # #print node, self.node_to_shape_map[node] # for shape in self.node_to_shape_map[node]: # node_size = node_size * shape # if gpu_mem > node_size: # gpu_mem = gpu_mem - node_size # window[0] = window[0] - 1 # #print "gpu_mem:",gpu_mem # # Traverse graph in topo order and compute values for all nodes. # if FLAG_SHOW_GRAPH: # print "run topo_order" # # Show graph dependency # if FLAG_SHOW_GRAPH: # print "node:",node # print "node.desc:",node.desc # for node in self.topo_order: # if self.policy == 'vdnn': # # Skip placeholder nodes # if node in node_to_val_map: # continue # # H2D before compute # ## Collect inputs # input_vals = [] # for n in node.inputs: # if not use_numpy: # if isinstance(node_to_val_map[n], np.ndarray): # node_to_val_map[n] = ndarray.array(node_to_val_map[n], ctx=self.ctx) # input_vals.append(node_to_val_map[n]) # ## Alloc node space # if use_numpy: # node_val = np.empty(shape=self.node_to_shape_map[node]) # else: # node_val = ndarray.empty(shape=self.node_to_shape_map[node], ctx=self.ctx) # # Compute # # node_val is modified in-place whether np.ndarray or NDArray # node.op.compute(node, input_vals, node_val, use_numpy) # # D2H after compute # if use_numpy: # node_to_val_map[node] = node_val # else: # node_index = self.topo_order.index(node) # if node_index > window[0] and node_index < window[1]: # node_to_val_map[node] = node_val # continue # node_to_val_map[node] = node_val.asnumpy() # del node_val # for n in node.inputs: # if isinstance(node_to_val_map[n], ndarray.NDArray): # tmp_val = node_to_val_map[n].asnumpy() # del node_to_val_map[n] # node_to_val_map[n] = tmp_val # elif self.policy == 'swap': # # Switch in GPU # if not use_numpy: # if self.swap_queue and (node_in_GPU==None): # swap_node = self.swap_queue[0] # if swap_node in node_to_val_map: # node_to_val_map[swap_node] = ndarray.array(node_to_val_map[swap_node], ctx=self.ctx) # else: # self.node_to_arr_map[swap_node] = ndarray.empty(self.node_to_shape_map[swap_node], ctx=self.ctx) # node_in_GPU = swap_node.id # if node in node_to_val_map: # # Skip placeholder nodes. Values already provided by feed_dict. # continue # # Compute # input_vals = [node_to_val_map[n] for n in node.inputs] # if use_numpy: # node_val = np.empty(shape=self.node_to_shape_map[node]) # else: # node_val = self.node_to_arr_map[node] # # node_val is modified in-place whether np.ndarray or NDArray # node.op.compute(node, input_vals, node_val, use_numpy) # if node.swap == True: # node_to_val_map[node] = node_val.asnumpy() # del node_val # del self.node_to_arr_map[node] # del self.swap_queue[0] # node_in_GPU = None # else: # node_to_val_map[node] = node_val # # Switch out GPU # if not use_numpy: # if self.swap_queue: # if self.swap_queue[0] in node.inputs: # out_node = self.swap_queue.pop(0) # if self.swap_queue: # if not self.swap_queue[0].id == node_in_GPU: # tmp_array = node_to_val_map[out_node].asnumpy() # del node_to_val_map[out_node] # node_to_val_map[out_node] = tmp_array # node_in_GPU = None # else: # if node in node_to_val_map: # # Skip placeholder nodes. Values already provided by feed_dict. # continue # input_vals = [node_to_val_map[n] for n in node.inputs] # # print self.node_to_shape_map[node] # if use_numpy: # node_val = np.empty(shape=self.node_to_shape_map[node]) # else: # node_val = self.node_to_arr_map[node] # # node_val is modified in-place whether np.ndarray or NDArray # # if (len(node.inputs) == 1): # # print "computs",node.inputs[0].name # # else: # # print "computs",node.inputs[0].name,node.inputs[1].name # # print node.name # # print node_val.shape # # print "xxx" # # print node.name # node.op.compute(node, input_vals, node_val, use_numpy) # # print "xxx" # node_to_val_map[node] = node_val # # print "xxx" # if FLAG_SHOW_GRAPH: # FLAG_SHOW_GRAPH = False # # Collect node values. # if not use_numpy and convert_to_numpy_ret_vals: # if self.policy == 'swap': # node_values = [] # for n in self.eval_node_list: # if n.swap == True: # node_values.append(node_to_val_map[n]) # else: # node_values.append(node_to_val_map[n].asnumpy()) # return node_values # elif self.policy == 'vdnn': # return [node_to_val_map[n] for n in self.eval_node_list] # else: # return [node_to_val_map[n].asnumpy() for n in self.eval_node_list] # return [node_to_val_map[n] for n in self.eval_node_list] def gradients(output_node, node_list, scheduler_policy=None): """Take gradient of output node with respect to each node in node_list. Parameters ---------- output_node: output node that we are taking derivative of. node_list: list of nodes that we are taking derivative wrt. Returns ------- A list of gradient values, one for each node in node_list respectively. """ from . import OnesLike node_to_output_grads_list = {} node_to_output_grads_list[output_node] = [ OnesLike.oneslike_op(output_node)] node_to_output_grad = {} # Traverse forward graph in reverse topological order reverse_topo_order = reversed(find_topo_sort([output_node])) for node in reverse_topo_order: output_grad = sum_node_list(node_to_output_grads_list[node]) node_to_output_grad[node] = output_grad input_grads_list = node.op.gradient(node, output_grad) #print len(node.name) #print len(node.inputs) #raw_input("\n\nPress the enter key to exit.") for i in range(len(node.inputs)): if node.inputs[i] not in node_to_output_grads_list: node_to_output_grads_list[node.inputs[i]] = [] # Calculate partial adjoint for input nodes. # print node.name node_to_output_grads_list[node.inputs[i]].append( input_grads_list[i]) if scheduler_policy == 'swap': for node in node_list: if node.swap: node_to_output_grad[node].swap = True grad_node_list = [node_to_output_grad[node] for node in node_list] # grad_node_list = [distributed_communicate_op(node_to_output_grad[node]) for node in node_list] return grad_node_list def distributed_gradients(output_node, node_list, scheduler_policy=None): """Take gradient of output node with respect to each node in node_list. Parameters ---------- output_node: output node that we are taking derivative of. node_list: list of nodes that we are taking derivative wrt. Returns ------- A list of gradient values, one for each node in node_list respectively. """ from .OnesLike import oneslike_op node_to_output_grads_list = {} node_to_output_grads_list[output_node] = [oneslike_op(output_node)] node_to_output_grad = {} # Traverse forward graph in reverse topological order reverse_topo_order = reversed(find_topo_sort([output_node])) for node in reverse_topo_order: output_grad = sum_node_list(node_to_output_grads_list[node]) node_to_output_grad[node] = output_grad input_grads_list = node.op.gradient(node, output_grad) #print len(node.name) #print len(node.inputs) #raw_input("\n\nPress the enter key to exit.") for i in range(len(node.inputs)): if node.inputs[i] not in node_to_output_grads_list: node_to_output_grads_list[node.inputs[i]] = [] # Calculate partial adjoint for input nodes. node_to_output_grads_list[node.inputs[i]].append( input_grads_list[i]) if scheduler_policy == 'swap': for node in node_list: if node.swap: node_to_output_grad[node].swap = True # grad_node_list = [node_to_output_grad[node] for node in node_list] grad_node_list = [distributed_communicate_op( node_to_output_grad[node]) for node in node_list] return grad_node_list ################## # Helper Methods # ################## def find_topo_sort(node_list): """Given a list of nodes, return a topo ordering of nodes ending in them. A simple algorithm is to do a post-order DFS traversal on the given nodes, going backwards based on input edges. Since a node is added to the ordering after all its predecessors are traversed due to post-order DFS, we get a topological sort. """ visited = set() topo_order = [] for node in node_list: topo_sort_dfs(node, visited, topo_order) return topo_order def topo_sort_dfs(node, visited, topo_order): """Post-order DFS""" if node in visited: return visited.add(node) for n in node.inputs: topo_sort_dfs(n, visited, topo_order) topo_order.append(node) def sum_node_list(node_list): """Custom sum func to avoid creating redundant nodes in Python sum func.""" from operator import add from functools import reduce return reduce(add, node_list) def broadcast_rule(shape_a, shape_b): """Return output shape of broadcast shape_a, shape_b. e.g. broadcast_rule((3,2), (4,3,2)) returns output_shape = (4,3,2) Check out explanations and more examples at https://docs.scipy.org/doc/numpy-1.10.0/user/basics.broadcasting.html http://eli.thegreenplace.net/2015/broadcasting-arrays-in-numpy/ """ assert(isinstance(shape_a, tuple)) assert(isinstance(shape_b, tuple)) if len(shape_a) > len(shape_b): longer_shape, shorter_shape = shape_a, shape_b else: longer_shape, shorter_shape = shape_b, shape_a len_diff = len(longer_shape) - len(shorter_shape) for i in range(len_diff): # pad with leading 1s shorter_shape = (1,) + shorter_shape assert len(shorter_shape) == len(longer_shape) output_shape = list(longer_shape) for i in range(len(output_shape)): assert (shorter_shape[i] == longer_shape[i]) \ or (shorter_shape[i] == 1) \ or (longer_shape[i] == 1) output_shape[i] = max(shorter_shape[i], longer_shape[i]) return tuple(output_shape)
memory_plan
Allocates ndarray.NDArray for every node except feed_dict nodes. Implementation note: Option 1: Alloc a ndarray.NDArray per node that persists across run() Option 2: Implement a memory pool to reuse memory for nodes of same shapes. More details see Lecture 7. For both options, self.node_to_arr_map stores node->NDArray mapping to allow mapping to persist across multiple executor.run(). Hint: use ndarray.empty(shape, ctx=self.ctx) to allocate NDArray. Parameters ---------- feed_shapes: node->shapes mapping for feed_dict nodes.
""" library to take autodiff and execute a computation graph """ from __future__ import absolute_import import numpy as np from .Node import Op from .. import ndarray from ..stream import * import ctypes import os from pynvml import * FLAG_SHOW_GRAPH = False G_NODE_ID = 0 NAME_RULE = 1 def communicate_init(worker_num, worker_id, source_ip, target_ip): global lib_communicate # lib_communicate.DL_Connect_Init(2, 0, "*:4001", "localhost:4002") # lib_communicate.DL_Connect_Init(2, 1, "*:4002", "localhost:4001") curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) lib_path = os.path.join(curr_path, '../../build/lib/') path_to_so_file = os.path.join(lib_path, "lib_communication.so") lib_communicate = ctypes.cdll.LoadLibrary(path_to_so_file) lib_communicate.DL_Connect_Init( worker_num, worker_id, source_ip, target_ip) def communicate_finish(): lib_communicate.DL_Communicate_Close() class Distributed_CommunicateOp(Op): def __call__(self, nodeA): new_node = Op.__call__(self) new_node.inputs = [nodeA] new_node.name = "Distributed_Communicate(%s)" % (nodeA.name) # print nodeA.name return new_node def compute(self, node, input_vals, output_val, use_numpy=True): after_reduce_gradient_cpu = ndarray.empty( shape=output_val.shape, ctx=ndarray.cpu(0)) if use_numpy: gradient_val_cpu = ndarray.array(input_vals[0], ctx=ndarray.cpu(0)) else: gradient_val_cpu = ndarray.array( input_vals[0].asnumpy(), ctx=ndarray.cpu(0)) # print gradient_val_cpu.asnumpy() lib_communicate.DL_Communicate_Init(gradient_val_cpu.handle) lib_communicate.DL_Communicate( gradient_val_cpu.handle, after_reduce_gradient_cpu.handle) # print after_reduce_gradient_cpu.asnumpy() if use_numpy: output_val[:] = after_reduce_gradient_cpu.asnumpy() else: after_reduce_gradient_cpu.copyto(output_val) def gradient(self, node, output_grad): raise NotImplementedError def infer_shape(self, node, input_shapes): return input_shapes[0] distributed_communicate_op = Distributed_CommunicateOp() class StreamExecutor(object): """Executor computes values for given set of nodes in computation graph.""" def __init__(self, eval_node_list, ctx = None, stream = None, policy = None): """ Parameters ---------- eval_node_list: list of nodes whose values need to be computed. ctx: runtime DLContext, default is None which means np.ndarray on cpu topo_order: list of nodes in topological order node_to_shape_map: dict from node to shape of the node node_to_arr_map: dict from node to ndarray.NDArray allocated for node feed_shapes: shapes of feed_dict from last run(...) """ self.eval_node_list = eval_node_list self.ctx = ctx if stream is None: self.stream = create_stream_handle(ctx) else: self.stream = stream self.stream.sync() self.topo_order = find_topo_sort(self.eval_node_list) self.node_to_shape_map = None self.node_to_arr_map = None self.feed_shapes = None self.policy = policy if self.policy == 'swap': self.swap_queue = [] def infer_shape(self, feed_shapes): """Given shapes of feed_dict nodes, infer shape for all nodes in graph. Implementation note: Iteratively calls node.op.infer_shape to infer shapes. Node shapes stored in self.node_to_shape_map. Parameters ---------- feed_shapes: node->shapes mapping for feed_dict nodes. """ """TODO: Your code here""" self.node_to_shape_map = {} for node in self.topo_order: if node in feed_shapes: self.node_to_shape_map[node] = feed_shapes[node] else: # print(node.name) input_shapes = [self.node_to_shape_map[n] for n in node.inputs] self.node_to_shape_map[node] = node.op.infer_shape( node, input_shapes) # MASKED: memory_plan function (lines 120-149) def run(self, feed_dict, convert_to_numpy_ret_vals=False): """ Parameters ---------- feed_dict: a dictionary of node->np.ndarray supplied by user. convert_to_numpy_ret_vals: whether to convert ret vals to np.array Returns ------- A list of values for nodes in eval_node_list. NDArray or np.ndarray. """ def are_feed_shapes_equal(sa, sb): if (not isinstance(sa, dict)) or (not isinstance(sb, dict)): return False unmatched_item = set(sa.items()) ^ set(sb.items()) return len(unmatched_item) == 0 # Assume self.ctx is None implies numpy array and numpy ops. use_numpy = self.ctx is None node_to_val_map = {} for node, value in feed_dict.items(): if use_numpy: # all values passed in feed_dict must be np.ndarray assert isinstance(value, np.ndarray) node_to_val_map[node] = value else: # convert values to ndarray.NDArray if necessary if isinstance(value, np.ndarray): node_to_val_map[node] = ndarray.array(value, ctx=self.ctx) elif isinstance(value, ndarray.NDArray): node_to_val_map[node] = value else: assert False, "feed_dict value type not supported" # print"xxxx" # collect shapes for all placeholders # infer shape if feed_shapes changed since last run # e.g. call run() on test data after trainng # print feed_shapes feed_shapes = {} for node in node_to_val_map: feed_shapes[node] = node_to_val_map[node].shape if(not are_feed_shapes_equal(feed_shapes, self.feed_shapes)): self.infer_shape(feed_shapes) self.feed_shapes = feed_shapes if (not use_numpy): self.memory_plan(self.feed_shapes) for node in self.topo_order: if node in node_to_val_map: continue input_vals = [node_to_val_map[n] for n in node.inputs] if use_numpy: node_val = np.empty(shape=self.node_to_shape_map[node]) else: node_val = self.node_to_arr_map[node] # print(node.name) node.op.compute(node, input_vals, node_val, use_numpy, self.stream) node_to_val_map[node] = node_val self.stream.sync() if not use_numpy and convert_to_numpy_ret_vals: return [node_to_val_map[n].asnumpy() for n in self.eval_node_list] return [node_to_val_map[n] for n in self.eval_node_list] # def run(self, feed_dict, convert_to_numpy_ret_vals=False): # """ # Parameters # ---------- # feed_dict: a dictionary of node->np.ndarray supplied by user. # convert_to_numpy_ret_vals: whether to convert ret vals to np.array # Returns # ------- # A list of values for nodes in eval_node_list. NDArray or np.ndarray. # """ # def are_feed_shapes_equal(sa, sb): # if (not isinstance(sa, dict)) or (not isinstance(sb, dict)): # return False # unmatched_item = set(sa.items()) ^ set(sb.items()) # return len(unmatched_item) == 0 # # Assume self.ctx is None implies numpy array and numpy ops. # use_numpy = self.ctx is None # node_to_val_map = {} # for node, value in feed_dict.items(): # if self.policy == 'vdnn': # assert isinstance(value, np.ndarray) # node_to_val_map[node] = value # else: # if use_numpy: # # all values passed in feed_dict must be np.ndarray # assert isinstance(value, np.ndarray) # node_to_val_map[node] = value # else: # # convert values to ndarray.NDArray if necessary # if isinstance(value, np.ndarray): # if self.policy == 'swap': # if node.swap == True: # node_to_val_map[node] = value # else: # node_to_val_map[node] = ndarray.array(value, ctx=self.ctx) # else: # node_to_val_map[node] = ndarray.array(value, ctx=self.ctx) # elif isinstance(value, ndarray.NDArray): # node_to_val_map[node] = value # else: # assert False, "feed_dict value type not supported" # # collect shapes for all placeholders # feed_shapes = {} # for node in node_to_val_map: # feed_shapes[node] = node_to_val_map[node].shape # # infer shape if feed_shapes changed since last run # # e.g. call run() on test data after trainng # # print feed_shapes # if (not are_feed_shapes_equal(feed_shapes, self.feed_shapes)): # self.infer_shape(feed_shapes) # self.feed_shapes = feed_shapes # if not self.policy == 'vdnn': # # plan memory if using GPU # if (not use_numpy): # self.memory_plan(feed_shapes) # # Traverse graph in topo order and compute values for all nodes. # global FLAG_SHOW_GRAPH # if self.policy == 'swap': # # generate swap queue # if not use_numpy: # for node in self.topo_order: # if node not in node_to_val_map: # # variable in placeholder # for input_node in node.inputs: # if input_node.swap == True: # self.swap_queue.append(input_node) # # variable grad # if node.swap == True: # self.swap_queue.append(node) # node_in_GPU = None # if FLAG_SHOW_GRAPH: # print "Show swap queue:" # for node in self.swap_queue: # print node # elif self.policy == 'vdnn': # # TODO traverse graph to select in-gpu window # window = [0,0] # if not use_numpy: # nvmlInit() # handle = nvmlDeviceGetHandleByIndex(0) # info = nvmlDeviceGetMemoryInfo(handle) # gpu_mem = info.free # nvmlShutdown() # loss_node = self.eval_node_list[0] # window[1] = self.topo_order.index(loss_node)+1 # window[0] = self.topo_order.index(loss_node)+1 # for node in reversed(self.topo_order[:window[1]+1]): # node_size = 4 # float32 # #print node, self.node_to_shape_map[node] # for shape in self.node_to_shape_map[node]: # node_size = node_size * shape # if gpu_mem > node_size: # gpu_mem = gpu_mem - node_size # window[0] = window[0] - 1 # #print "gpu_mem:",gpu_mem # # Traverse graph in topo order and compute values for all nodes. # if FLAG_SHOW_GRAPH: # print "run topo_order" # # Show graph dependency # if FLAG_SHOW_GRAPH: # print "node:",node # print "node.desc:",node.desc # for node in self.topo_order: # if self.policy == 'vdnn': # # Skip placeholder nodes # if node in node_to_val_map: # continue # # H2D before compute # ## Collect inputs # input_vals = [] # for n in node.inputs: # if not use_numpy: # if isinstance(node_to_val_map[n], np.ndarray): # node_to_val_map[n] = ndarray.array(node_to_val_map[n], ctx=self.ctx) # input_vals.append(node_to_val_map[n]) # ## Alloc node space # if use_numpy: # node_val = np.empty(shape=self.node_to_shape_map[node]) # else: # node_val = ndarray.empty(shape=self.node_to_shape_map[node], ctx=self.ctx) # # Compute # # node_val is modified in-place whether np.ndarray or NDArray # node.op.compute(node, input_vals, node_val, use_numpy) # # D2H after compute # if use_numpy: # node_to_val_map[node] = node_val # else: # node_index = self.topo_order.index(node) # if node_index > window[0] and node_index < window[1]: # node_to_val_map[node] = node_val # continue # node_to_val_map[node] = node_val.asnumpy() # del node_val # for n in node.inputs: # if isinstance(node_to_val_map[n], ndarray.NDArray): # tmp_val = node_to_val_map[n].asnumpy() # del node_to_val_map[n] # node_to_val_map[n] = tmp_val # elif self.policy == 'swap': # # Switch in GPU # if not use_numpy: # if self.swap_queue and (node_in_GPU==None): # swap_node = self.swap_queue[0] # if swap_node in node_to_val_map: # node_to_val_map[swap_node] = ndarray.array(node_to_val_map[swap_node], ctx=self.ctx) # else: # self.node_to_arr_map[swap_node] = ndarray.empty(self.node_to_shape_map[swap_node], ctx=self.ctx) # node_in_GPU = swap_node.id # if node in node_to_val_map: # # Skip placeholder nodes. Values already provided by feed_dict. # continue # # Compute # input_vals = [node_to_val_map[n] for n in node.inputs] # if use_numpy: # node_val = np.empty(shape=self.node_to_shape_map[node]) # else: # node_val = self.node_to_arr_map[node] # # node_val is modified in-place whether np.ndarray or NDArray # node.op.compute(node, input_vals, node_val, use_numpy) # if node.swap == True: # node_to_val_map[node] = node_val.asnumpy() # del node_val # del self.node_to_arr_map[node] # del self.swap_queue[0] # node_in_GPU = None # else: # node_to_val_map[node] = node_val # # Switch out GPU # if not use_numpy: # if self.swap_queue: # if self.swap_queue[0] in node.inputs: # out_node = self.swap_queue.pop(0) # if self.swap_queue: # if not self.swap_queue[0].id == node_in_GPU: # tmp_array = node_to_val_map[out_node].asnumpy() # del node_to_val_map[out_node] # node_to_val_map[out_node] = tmp_array # node_in_GPU = None # else: # if node in node_to_val_map: # # Skip placeholder nodes. Values already provided by feed_dict. # continue # input_vals = [node_to_val_map[n] for n in node.inputs] # # print self.node_to_shape_map[node] # if use_numpy: # node_val = np.empty(shape=self.node_to_shape_map[node]) # else: # node_val = self.node_to_arr_map[node] # # node_val is modified in-place whether np.ndarray or NDArray # # if (len(node.inputs) == 1): # # print "computs",node.inputs[0].name # # else: # # print "computs",node.inputs[0].name,node.inputs[1].name # # print node.name # # print node_val.shape # # print "xxx" # # print node.name # node.op.compute(node, input_vals, node_val, use_numpy) # # print "xxx" # node_to_val_map[node] = node_val # # print "xxx" # if FLAG_SHOW_GRAPH: # FLAG_SHOW_GRAPH = False # # Collect node values. # if not use_numpy and convert_to_numpy_ret_vals: # if self.policy == 'swap': # node_values = [] # for n in self.eval_node_list: # if n.swap == True: # node_values.append(node_to_val_map[n]) # else: # node_values.append(node_to_val_map[n].asnumpy()) # return node_values # elif self.policy == 'vdnn': # return [node_to_val_map[n] for n in self.eval_node_list] # else: # return [node_to_val_map[n].asnumpy() for n in self.eval_node_list] # return [node_to_val_map[n] for n in self.eval_node_list] def gradients(output_node, node_list, scheduler_policy=None): """Take gradient of output node with respect to each node in node_list. Parameters ---------- output_node: output node that we are taking derivative of. node_list: list of nodes that we are taking derivative wrt. Returns ------- A list of gradient values, one for each node in node_list respectively. """ from . import OnesLike node_to_output_grads_list = {} node_to_output_grads_list[output_node] = [ OnesLike.oneslike_op(output_node)] node_to_output_grad = {} # Traverse forward graph in reverse topological order reverse_topo_order = reversed(find_topo_sort([output_node])) for node in reverse_topo_order: output_grad = sum_node_list(node_to_output_grads_list[node]) node_to_output_grad[node] = output_grad input_grads_list = node.op.gradient(node, output_grad) #print len(node.name) #print len(node.inputs) #raw_input("\n\nPress the enter key to exit.") for i in range(len(node.inputs)): if node.inputs[i] not in node_to_output_grads_list: node_to_output_grads_list[node.inputs[i]] = [] # Calculate partial adjoint for input nodes. # print node.name node_to_output_grads_list[node.inputs[i]].append( input_grads_list[i]) if scheduler_policy == 'swap': for node in node_list: if node.swap: node_to_output_grad[node].swap = True grad_node_list = [node_to_output_grad[node] for node in node_list] # grad_node_list = [distributed_communicate_op(node_to_output_grad[node]) for node in node_list] return grad_node_list def distributed_gradients(output_node, node_list, scheduler_policy=None): """Take gradient of output node with respect to each node in node_list. Parameters ---------- output_node: output node that we are taking derivative of. node_list: list of nodes that we are taking derivative wrt. Returns ------- A list of gradient values, one for each node in node_list respectively. """ from .OnesLike import oneslike_op node_to_output_grads_list = {} node_to_output_grads_list[output_node] = [oneslike_op(output_node)] node_to_output_grad = {} # Traverse forward graph in reverse topological order reverse_topo_order = reversed(find_topo_sort([output_node])) for node in reverse_topo_order: output_grad = sum_node_list(node_to_output_grads_list[node]) node_to_output_grad[node] = output_grad input_grads_list = node.op.gradient(node, output_grad) #print len(node.name) #print len(node.inputs) #raw_input("\n\nPress the enter key to exit.") for i in range(len(node.inputs)): if node.inputs[i] not in node_to_output_grads_list: node_to_output_grads_list[node.inputs[i]] = [] # Calculate partial adjoint for input nodes. node_to_output_grads_list[node.inputs[i]].append( input_grads_list[i]) if scheduler_policy == 'swap': for node in node_list: if node.swap: node_to_output_grad[node].swap = True # grad_node_list = [node_to_output_grad[node] for node in node_list] grad_node_list = [distributed_communicate_op( node_to_output_grad[node]) for node in node_list] return grad_node_list ################## # Helper Methods # ################## def find_topo_sort(node_list): """Given a list of nodes, return a topo ordering of nodes ending in them. A simple algorithm is to do a post-order DFS traversal on the given nodes, going backwards based on input edges. Since a node is added to the ordering after all its predecessors are traversed due to post-order DFS, we get a topological sort. """ visited = set() topo_order = [] for node in node_list: topo_sort_dfs(node, visited, topo_order) return topo_order def topo_sort_dfs(node, visited, topo_order): """Post-order DFS""" if node in visited: return visited.add(node) for n in node.inputs: topo_sort_dfs(n, visited, topo_order) topo_order.append(node) def sum_node_list(node_list): """Custom sum func to avoid creating redundant nodes in Python sum func.""" from operator import add from functools import reduce return reduce(add, node_list) def broadcast_rule(shape_a, shape_b): """Return output shape of broadcast shape_a, shape_b. e.g. broadcast_rule((3,2), (4,3,2)) returns output_shape = (4,3,2) Check out explanations and more examples at https://docs.scipy.org/doc/numpy-1.10.0/user/basics.broadcasting.html http://eli.thegreenplace.net/2015/broadcasting-arrays-in-numpy/ """ assert(isinstance(shape_a, tuple)) assert(isinstance(shape_b, tuple)) if len(shape_a) > len(shape_b): longer_shape, shorter_shape = shape_a, shape_b else: longer_shape, shorter_shape = shape_b, shape_a len_diff = len(longer_shape) - len(shorter_shape) for i in range(len_diff): # pad with leading 1s shorter_shape = (1,) + shorter_shape assert len(shorter_shape) == len(longer_shape) output_shape = list(longer_shape) for i in range(len(output_shape)): assert (shorter_shape[i] == longer_shape[i]) \ or (shorter_shape[i] == 1) \ or (longer_shape[i] == 1) output_shape[i] = max(shorter_shape[i], longer_shape[i]) return tuple(output_shape)
def memory_plan(self, feed_shapes): """Allocates ndarray.NDArray for every node except feed_dict nodes. Implementation note: Option 1: Alloc a ndarray.NDArray per node that persists across run() Option 2: Implement a memory pool to reuse memory for nodes of same shapes. More details see Lecture 7. For both options, self.node_to_arr_map stores node->NDArray mapping to allow mapping to persist across multiple executor.run(). Hint: use ndarray.empty(shape, ctx=self.ctx) to allocate NDArray. Parameters ---------- feed_shapes: node->shapes mapping for feed_dict nodes. """ """TODO: Your code here""" assert (self.ctx is not None) # self.infer_shape(feed_shapes) self.node_to_arr_map = {} for node, shape in self.node_to_shape_map.items(): if self.policy == 'swap': if not node.swap: self.node_to_arr_map[node] = ndarray.empty( shape, ctx=self.ctx) elif self.policy == 'vdnn': self.node_to_arr_map[node] = np.empty(shape) else: self.node_to_arr_map[node] = ndarray.empty(shape, ctx=self.ctx)
120
149
""" library to take autodiff and execute a computation graph """ from __future__ import absolute_import import numpy as np from .Node import Op from .. import ndarray from ..stream import * import ctypes import os from pynvml import * FLAG_SHOW_GRAPH = False G_NODE_ID = 0 NAME_RULE = 1 def communicate_init(worker_num, worker_id, source_ip, target_ip): global lib_communicate # lib_communicate.DL_Connect_Init(2, 0, "*:4001", "localhost:4002") # lib_communicate.DL_Connect_Init(2, 1, "*:4002", "localhost:4001") curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) lib_path = os.path.join(curr_path, '../../build/lib/') path_to_so_file = os.path.join(lib_path, "lib_communication.so") lib_communicate = ctypes.cdll.LoadLibrary(path_to_so_file) lib_communicate.DL_Connect_Init( worker_num, worker_id, source_ip, target_ip) def communicate_finish(): lib_communicate.DL_Communicate_Close() class Distributed_CommunicateOp(Op): def __call__(self, nodeA): new_node = Op.__call__(self) new_node.inputs = [nodeA] new_node.name = "Distributed_Communicate(%s)" % (nodeA.name) # print nodeA.name return new_node def compute(self, node, input_vals, output_val, use_numpy=True): after_reduce_gradient_cpu = ndarray.empty( shape=output_val.shape, ctx=ndarray.cpu(0)) if use_numpy: gradient_val_cpu = ndarray.array(input_vals[0], ctx=ndarray.cpu(0)) else: gradient_val_cpu = ndarray.array( input_vals[0].asnumpy(), ctx=ndarray.cpu(0)) # print gradient_val_cpu.asnumpy() lib_communicate.DL_Communicate_Init(gradient_val_cpu.handle) lib_communicate.DL_Communicate( gradient_val_cpu.handle, after_reduce_gradient_cpu.handle) # print after_reduce_gradient_cpu.asnumpy() if use_numpy: output_val[:] = after_reduce_gradient_cpu.asnumpy() else: after_reduce_gradient_cpu.copyto(output_val) def gradient(self, node, output_grad): raise NotImplementedError def infer_shape(self, node, input_shapes): return input_shapes[0] distributed_communicate_op = Distributed_CommunicateOp() class StreamExecutor(object): """Executor computes values for given set of nodes in computation graph.""" def __init__(self, eval_node_list, ctx = None, stream = None, policy = None): """ Parameters ---------- eval_node_list: list of nodes whose values need to be computed. ctx: runtime DLContext, default is None which means np.ndarray on cpu topo_order: list of nodes in topological order node_to_shape_map: dict from node to shape of the node node_to_arr_map: dict from node to ndarray.NDArray allocated for node feed_shapes: shapes of feed_dict from last run(...) """ self.eval_node_list = eval_node_list self.ctx = ctx if stream is None: self.stream = create_stream_handle(ctx) else: self.stream = stream self.stream.sync() self.topo_order = find_topo_sort(self.eval_node_list) self.node_to_shape_map = None self.node_to_arr_map = None self.feed_shapes = None self.policy = policy if self.policy == 'swap': self.swap_queue = [] def infer_shape(self, feed_shapes): """Given shapes of feed_dict nodes, infer shape for all nodes in graph. Implementation note: Iteratively calls node.op.infer_shape to infer shapes. Node shapes stored in self.node_to_shape_map. Parameters ---------- feed_shapes: node->shapes mapping for feed_dict nodes. """ """TODO: Your code here""" self.node_to_shape_map = {} for node in self.topo_order: if node in feed_shapes: self.node_to_shape_map[node] = feed_shapes[node] else: # print(node.name) input_shapes = [self.node_to_shape_map[n] for n in node.inputs] self.node_to_shape_map[node] = node.op.infer_shape( node, input_shapes) def memory_plan(self, feed_shapes): """Allocates ndarray.NDArray for every node except feed_dict nodes. Implementation note: Option 1: Alloc a ndarray.NDArray per node that persists across run() Option 2: Implement a memory pool to reuse memory for nodes of same shapes. More details see Lecture 7. For both options, self.node_to_arr_map stores node->NDArray mapping to allow mapping to persist across multiple executor.run(). Hint: use ndarray.empty(shape, ctx=self.ctx) to allocate NDArray. Parameters ---------- feed_shapes: node->shapes mapping for feed_dict nodes. """ """TODO: Your code here""" assert (self.ctx is not None) # self.infer_shape(feed_shapes) self.node_to_arr_map = {} for node, shape in self.node_to_shape_map.items(): if self.policy == 'swap': if not node.swap: self.node_to_arr_map[node] = ndarray.empty( shape, ctx=self.ctx) elif self.policy == 'vdnn': self.node_to_arr_map[node] = np.empty(shape) else: self.node_to_arr_map[node] = ndarray.empty(shape, ctx=self.ctx) def run(self, feed_dict, convert_to_numpy_ret_vals=False): """ Parameters ---------- feed_dict: a dictionary of node->np.ndarray supplied by user. convert_to_numpy_ret_vals: whether to convert ret vals to np.array Returns ------- A list of values for nodes in eval_node_list. NDArray or np.ndarray. """ def are_feed_shapes_equal(sa, sb): if (not isinstance(sa, dict)) or (not isinstance(sb, dict)): return False unmatched_item = set(sa.items()) ^ set(sb.items()) return len(unmatched_item) == 0 # Assume self.ctx is None implies numpy array and numpy ops. use_numpy = self.ctx is None node_to_val_map = {} for node, value in feed_dict.items(): if use_numpy: # all values passed in feed_dict must be np.ndarray assert isinstance(value, np.ndarray) node_to_val_map[node] = value else: # convert values to ndarray.NDArray if necessary if isinstance(value, np.ndarray): node_to_val_map[node] = ndarray.array(value, ctx=self.ctx) elif isinstance(value, ndarray.NDArray): node_to_val_map[node] = value else: assert False, "feed_dict value type not supported" # print"xxxx" # collect shapes for all placeholders # infer shape if feed_shapes changed since last run # e.g. call run() on test data after trainng # print feed_shapes feed_shapes = {} for node in node_to_val_map: feed_shapes[node] = node_to_val_map[node].shape if(not are_feed_shapes_equal(feed_shapes, self.feed_shapes)): self.infer_shape(feed_shapes) self.feed_shapes = feed_shapes if (not use_numpy): self.memory_plan(self.feed_shapes) for node in self.topo_order: if node in node_to_val_map: continue input_vals = [node_to_val_map[n] for n in node.inputs] if use_numpy: node_val = np.empty(shape=self.node_to_shape_map[node]) else: node_val = self.node_to_arr_map[node] # print(node.name) node.op.compute(node, input_vals, node_val, use_numpy, self.stream) node_to_val_map[node] = node_val self.stream.sync() if not use_numpy and convert_to_numpy_ret_vals: return [node_to_val_map[n].asnumpy() for n in self.eval_node_list] return [node_to_val_map[n] for n in self.eval_node_list] # def run(self, feed_dict, convert_to_numpy_ret_vals=False): # """ # Parameters # ---------- # feed_dict: a dictionary of node->np.ndarray supplied by user. # convert_to_numpy_ret_vals: whether to convert ret vals to np.array # Returns # ------- # A list of values for nodes in eval_node_list. NDArray or np.ndarray. # """ # def are_feed_shapes_equal(sa, sb): # if (not isinstance(sa, dict)) or (not isinstance(sb, dict)): # return False # unmatched_item = set(sa.items()) ^ set(sb.items()) # return len(unmatched_item) == 0 # # Assume self.ctx is None implies numpy array and numpy ops. # use_numpy = self.ctx is None # node_to_val_map = {} # for node, value in feed_dict.items(): # if self.policy == 'vdnn': # assert isinstance(value, np.ndarray) # node_to_val_map[node] = value # else: # if use_numpy: # # all values passed in feed_dict must be np.ndarray # assert isinstance(value, np.ndarray) # node_to_val_map[node] = value # else: # # convert values to ndarray.NDArray if necessary # if isinstance(value, np.ndarray): # if self.policy == 'swap': # if node.swap == True: # node_to_val_map[node] = value # else: # node_to_val_map[node] = ndarray.array(value, ctx=self.ctx) # else: # node_to_val_map[node] = ndarray.array(value, ctx=self.ctx) # elif isinstance(value, ndarray.NDArray): # node_to_val_map[node] = value # else: # assert False, "feed_dict value type not supported" # # collect shapes for all placeholders # feed_shapes = {} # for node in node_to_val_map: # feed_shapes[node] = node_to_val_map[node].shape # # infer shape if feed_shapes changed since last run # # e.g. call run() on test data after trainng # # print feed_shapes # if (not are_feed_shapes_equal(feed_shapes, self.feed_shapes)): # self.infer_shape(feed_shapes) # self.feed_shapes = feed_shapes # if not self.policy == 'vdnn': # # plan memory if using GPU # if (not use_numpy): # self.memory_plan(feed_shapes) # # Traverse graph in topo order and compute values for all nodes. # global FLAG_SHOW_GRAPH # if self.policy == 'swap': # # generate swap queue # if not use_numpy: # for node in self.topo_order: # if node not in node_to_val_map: # # variable in placeholder # for input_node in node.inputs: # if input_node.swap == True: # self.swap_queue.append(input_node) # # variable grad # if node.swap == True: # self.swap_queue.append(node) # node_in_GPU = None # if FLAG_SHOW_GRAPH: # print "Show swap queue:" # for node in self.swap_queue: # print node # elif self.policy == 'vdnn': # # TODO traverse graph to select in-gpu window # window = [0,0] # if not use_numpy: # nvmlInit() # handle = nvmlDeviceGetHandleByIndex(0) # info = nvmlDeviceGetMemoryInfo(handle) # gpu_mem = info.free # nvmlShutdown() # loss_node = self.eval_node_list[0] # window[1] = self.topo_order.index(loss_node)+1 # window[0] = self.topo_order.index(loss_node)+1 # for node in reversed(self.topo_order[:window[1]+1]): # node_size = 4 # float32 # #print node, self.node_to_shape_map[node] # for shape in self.node_to_shape_map[node]: # node_size = node_size * shape # if gpu_mem > node_size: # gpu_mem = gpu_mem - node_size # window[0] = window[0] - 1 # #print "gpu_mem:",gpu_mem # # Traverse graph in topo order and compute values for all nodes. # if FLAG_SHOW_GRAPH: # print "run topo_order" # # Show graph dependency # if FLAG_SHOW_GRAPH: # print "node:",node # print "node.desc:",node.desc # for node in self.topo_order: # if self.policy == 'vdnn': # # Skip placeholder nodes # if node in node_to_val_map: # continue # # H2D before compute # ## Collect inputs # input_vals = [] # for n in node.inputs: # if not use_numpy: # if isinstance(node_to_val_map[n], np.ndarray): # node_to_val_map[n] = ndarray.array(node_to_val_map[n], ctx=self.ctx) # input_vals.append(node_to_val_map[n]) # ## Alloc node space # if use_numpy: # node_val = np.empty(shape=self.node_to_shape_map[node]) # else: # node_val = ndarray.empty(shape=self.node_to_shape_map[node], ctx=self.ctx) # # Compute # # node_val is modified in-place whether np.ndarray or NDArray # node.op.compute(node, input_vals, node_val, use_numpy) # # D2H after compute # if use_numpy: # node_to_val_map[node] = node_val # else: # node_index = self.topo_order.index(node) # if node_index > window[0] and node_index < window[1]: # node_to_val_map[node] = node_val # continue # node_to_val_map[node] = node_val.asnumpy() # del node_val # for n in node.inputs: # if isinstance(node_to_val_map[n], ndarray.NDArray): # tmp_val = node_to_val_map[n].asnumpy() # del node_to_val_map[n] # node_to_val_map[n] = tmp_val # elif self.policy == 'swap': # # Switch in GPU # if not use_numpy: # if self.swap_queue and (node_in_GPU==None): # swap_node = self.swap_queue[0] # if swap_node in node_to_val_map: # node_to_val_map[swap_node] = ndarray.array(node_to_val_map[swap_node], ctx=self.ctx) # else: # self.node_to_arr_map[swap_node] = ndarray.empty(self.node_to_shape_map[swap_node], ctx=self.ctx) # node_in_GPU = swap_node.id # if node in node_to_val_map: # # Skip placeholder nodes. Values already provided by feed_dict. # continue # # Compute # input_vals = [node_to_val_map[n] for n in node.inputs] # if use_numpy: # node_val = np.empty(shape=self.node_to_shape_map[node]) # else: # node_val = self.node_to_arr_map[node] # # node_val is modified in-place whether np.ndarray or NDArray # node.op.compute(node, input_vals, node_val, use_numpy) # if node.swap == True: # node_to_val_map[node] = node_val.asnumpy() # del node_val # del self.node_to_arr_map[node] # del self.swap_queue[0] # node_in_GPU = None # else: # node_to_val_map[node] = node_val # # Switch out GPU # if not use_numpy: # if self.swap_queue: # if self.swap_queue[0] in node.inputs: # out_node = self.swap_queue.pop(0) # if self.swap_queue: # if not self.swap_queue[0].id == node_in_GPU: # tmp_array = node_to_val_map[out_node].asnumpy() # del node_to_val_map[out_node] # node_to_val_map[out_node] = tmp_array # node_in_GPU = None # else: # if node in node_to_val_map: # # Skip placeholder nodes. Values already provided by feed_dict. # continue # input_vals = [node_to_val_map[n] for n in node.inputs] # # print self.node_to_shape_map[node] # if use_numpy: # node_val = np.empty(shape=self.node_to_shape_map[node]) # else: # node_val = self.node_to_arr_map[node] # # node_val is modified in-place whether np.ndarray or NDArray # # if (len(node.inputs) == 1): # # print "computs",node.inputs[0].name # # else: # # print "computs",node.inputs[0].name,node.inputs[1].name # # print node.name # # print node_val.shape # # print "xxx" # # print node.name # node.op.compute(node, input_vals, node_val, use_numpy) # # print "xxx" # node_to_val_map[node] = node_val # # print "xxx" # if FLAG_SHOW_GRAPH: # FLAG_SHOW_GRAPH = False # # Collect node values. # if not use_numpy and convert_to_numpy_ret_vals: # if self.policy == 'swap': # node_values = [] # for n in self.eval_node_list: # if n.swap == True: # node_values.append(node_to_val_map[n]) # else: # node_values.append(node_to_val_map[n].asnumpy()) # return node_values # elif self.policy == 'vdnn': # return [node_to_val_map[n] for n in self.eval_node_list] # else: # return [node_to_val_map[n].asnumpy() for n in self.eval_node_list] # return [node_to_val_map[n] for n in self.eval_node_list] def gradients(output_node, node_list, scheduler_policy=None): """Take gradient of output node with respect to each node in node_list. Parameters ---------- output_node: output node that we are taking derivative of. node_list: list of nodes that we are taking derivative wrt. Returns ------- A list of gradient values, one for each node in node_list respectively. """ from . import OnesLike node_to_output_grads_list = {} node_to_output_grads_list[output_node] = [ OnesLike.oneslike_op(output_node)] node_to_output_grad = {} # Traverse forward graph in reverse topological order reverse_topo_order = reversed(find_topo_sort([output_node])) for node in reverse_topo_order: output_grad = sum_node_list(node_to_output_grads_list[node]) node_to_output_grad[node] = output_grad input_grads_list = node.op.gradient(node, output_grad) #print len(node.name) #print len(node.inputs) #raw_input("\n\nPress the enter key to exit.") for i in range(len(node.inputs)): if node.inputs[i] not in node_to_output_grads_list: node_to_output_grads_list[node.inputs[i]] = [] # Calculate partial adjoint for input nodes. # print node.name node_to_output_grads_list[node.inputs[i]].append( input_grads_list[i]) if scheduler_policy == 'swap': for node in node_list: if node.swap: node_to_output_grad[node].swap = True grad_node_list = [node_to_output_grad[node] for node in node_list] # grad_node_list = [distributed_communicate_op(node_to_output_grad[node]) for node in node_list] return grad_node_list def distributed_gradients(output_node, node_list, scheduler_policy=None): """Take gradient of output node with respect to each node in node_list. Parameters ---------- output_node: output node that we are taking derivative of. node_list: list of nodes that we are taking derivative wrt. Returns ------- A list of gradient values, one for each node in node_list respectively. """ from .OnesLike import oneslike_op node_to_output_grads_list = {} node_to_output_grads_list[output_node] = [oneslike_op(output_node)] node_to_output_grad = {} # Traverse forward graph in reverse topological order reverse_topo_order = reversed(find_topo_sort([output_node])) for node in reverse_topo_order: output_grad = sum_node_list(node_to_output_grads_list[node]) node_to_output_grad[node] = output_grad input_grads_list = node.op.gradient(node, output_grad) #print len(node.name) #print len(node.inputs) #raw_input("\n\nPress the enter key to exit.") for i in range(len(node.inputs)): if node.inputs[i] not in node_to_output_grads_list: node_to_output_grads_list[node.inputs[i]] = [] # Calculate partial adjoint for input nodes. node_to_output_grads_list[node.inputs[i]].append( input_grads_list[i]) if scheduler_policy == 'swap': for node in node_list: if node.swap: node_to_output_grad[node].swap = True # grad_node_list = [node_to_output_grad[node] for node in node_list] grad_node_list = [distributed_communicate_op( node_to_output_grad[node]) for node in node_list] return grad_node_list ################## # Helper Methods # ################## def find_topo_sort(node_list): """Given a list of nodes, return a topo ordering of nodes ending in them. A simple algorithm is to do a post-order DFS traversal on the given nodes, going backwards based on input edges. Since a node is added to the ordering after all its predecessors are traversed due to post-order DFS, we get a topological sort. """ visited = set() topo_order = [] for node in node_list: topo_sort_dfs(node, visited, topo_order) return topo_order def topo_sort_dfs(node, visited, topo_order): """Post-order DFS""" if node in visited: return visited.add(node) for n in node.inputs: topo_sort_dfs(n, visited, topo_order) topo_order.append(node) def sum_node_list(node_list): """Custom sum func to avoid creating redundant nodes in Python sum func.""" from operator import add from functools import reduce return reduce(add, node_list) def broadcast_rule(shape_a, shape_b): """Return output shape of broadcast shape_a, shape_b. e.g. broadcast_rule((3,2), (4,3,2)) returns output_shape = (4,3,2) Check out explanations and more examples at https://docs.scipy.org/doc/numpy-1.10.0/user/basics.broadcasting.html http://eli.thegreenplace.net/2015/broadcasting-arrays-in-numpy/ """ assert(isinstance(shape_a, tuple)) assert(isinstance(shape_b, tuple)) if len(shape_a) > len(shape_b): longer_shape, shorter_shape = shape_a, shape_b else: longer_shape, shorter_shape = shape_b, shape_a len_diff = len(longer_shape) - len(shorter_shape) for i in range(len_diff): # pad with leading 1s shorter_shape = (1,) + shorter_shape assert len(shorter_shape) == len(longer_shape) output_shape = list(longer_shape) for i in range(len(output_shape)): assert (shorter_shape[i] == longer_shape[i]) \ or (shorter_shape[i] == 1) \ or (longer_shape[i] == 1) output_shape[i] = max(shorter_shape[i], longer_shape[i]) return tuple(output_shape)
run
Parameters ---------- feed_dict: a dictionary of node->np.ndarray supplied by user. convert_to_numpy_ret_vals: whether to convert ret vals to np.array Returns ------- A list of values for nodes in eval_node_list. NDArray or np.ndarray.
""" library to take autodiff and execute a computation graph """ from __future__ import absolute_import import numpy as np from .Node import Op from .. import ndarray from ..stream import * import ctypes import os from pynvml import * FLAG_SHOW_GRAPH = False G_NODE_ID = 0 NAME_RULE = 1 def communicate_init(worker_num, worker_id, source_ip, target_ip): global lib_communicate # lib_communicate.DL_Connect_Init(2, 0, "*:4001", "localhost:4002") # lib_communicate.DL_Connect_Init(2, 1, "*:4002", "localhost:4001") curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) lib_path = os.path.join(curr_path, '../../build/lib/') path_to_so_file = os.path.join(lib_path, "lib_communication.so") lib_communicate = ctypes.cdll.LoadLibrary(path_to_so_file) lib_communicate.DL_Connect_Init( worker_num, worker_id, source_ip, target_ip) def communicate_finish(): lib_communicate.DL_Communicate_Close() class Distributed_CommunicateOp(Op): def __call__(self, nodeA): new_node = Op.__call__(self) new_node.inputs = [nodeA] new_node.name = "Distributed_Communicate(%s)" % (nodeA.name) # print nodeA.name return new_node def compute(self, node, input_vals, output_val, use_numpy=True): after_reduce_gradient_cpu = ndarray.empty( shape=output_val.shape, ctx=ndarray.cpu(0)) if use_numpy: gradient_val_cpu = ndarray.array(input_vals[0], ctx=ndarray.cpu(0)) else: gradient_val_cpu = ndarray.array( input_vals[0].asnumpy(), ctx=ndarray.cpu(0)) # print gradient_val_cpu.asnumpy() lib_communicate.DL_Communicate_Init(gradient_val_cpu.handle) lib_communicate.DL_Communicate( gradient_val_cpu.handle, after_reduce_gradient_cpu.handle) # print after_reduce_gradient_cpu.asnumpy() if use_numpy: output_val[:] = after_reduce_gradient_cpu.asnumpy() else: after_reduce_gradient_cpu.copyto(output_val) def gradient(self, node, output_grad): raise NotImplementedError def infer_shape(self, node, input_shapes): return input_shapes[0] distributed_communicate_op = Distributed_CommunicateOp() class StreamExecutor(object): """Executor computes values for given set of nodes in computation graph.""" def __init__(self, eval_node_list, ctx = None, stream = None, policy = None): """ Parameters ---------- eval_node_list: list of nodes whose values need to be computed. ctx: runtime DLContext, default is None which means np.ndarray on cpu topo_order: list of nodes in topological order node_to_shape_map: dict from node to shape of the node node_to_arr_map: dict from node to ndarray.NDArray allocated for node feed_shapes: shapes of feed_dict from last run(...) """ self.eval_node_list = eval_node_list self.ctx = ctx if stream is None: self.stream = create_stream_handle(ctx) else: self.stream = stream self.stream.sync() self.topo_order = find_topo_sort(self.eval_node_list) self.node_to_shape_map = None self.node_to_arr_map = None self.feed_shapes = None self.policy = policy if self.policy == 'swap': self.swap_queue = [] def infer_shape(self, feed_shapes): """Given shapes of feed_dict nodes, infer shape for all nodes in graph. Implementation note: Iteratively calls node.op.infer_shape to infer shapes. Node shapes stored in self.node_to_shape_map. Parameters ---------- feed_shapes: node->shapes mapping for feed_dict nodes. """ """TODO: Your code here""" self.node_to_shape_map = {} for node in self.topo_order: if node in feed_shapes: self.node_to_shape_map[node] = feed_shapes[node] else: # print(node.name) input_shapes = [self.node_to_shape_map[n] for n in node.inputs] self.node_to_shape_map[node] = node.op.infer_shape( node, input_shapes) def memory_plan(self, feed_shapes): """Allocates ndarray.NDArray for every node except feed_dict nodes. Implementation note: Option 1: Alloc a ndarray.NDArray per node that persists across run() Option 2: Implement a memory pool to reuse memory for nodes of same shapes. More details see Lecture 7. For both options, self.node_to_arr_map stores node->NDArray mapping to allow mapping to persist across multiple executor.run(). Hint: use ndarray.empty(shape, ctx=self.ctx) to allocate NDArray. Parameters ---------- feed_shapes: node->shapes mapping for feed_dict nodes. """ """TODO: Your code here""" assert (self.ctx is not None) # self.infer_shape(feed_shapes) self.node_to_arr_map = {} for node, shape in self.node_to_shape_map.items(): if self.policy == 'swap': if not node.swap: self.node_to_arr_map[node] = ndarray.empty( shape, ctx=self.ctx) elif self.policy == 'vdnn': self.node_to_arr_map[node] = np.empty(shape) else: self.node_to_arr_map[node] = ndarray.empty(shape, ctx=self.ctx) # MASKED: run function (lines 151-214) # def run(self, feed_dict, convert_to_numpy_ret_vals=False): # """ # Parameters # ---------- # feed_dict: a dictionary of node->np.ndarray supplied by user. # convert_to_numpy_ret_vals: whether to convert ret vals to np.array # Returns # ------- # A list of values for nodes in eval_node_list. NDArray or np.ndarray. # """ # def are_feed_shapes_equal(sa, sb): # if (not isinstance(sa, dict)) or (not isinstance(sb, dict)): # return False # unmatched_item = set(sa.items()) ^ set(sb.items()) # return len(unmatched_item) == 0 # # Assume self.ctx is None implies numpy array and numpy ops. # use_numpy = self.ctx is None # node_to_val_map = {} # for node, value in feed_dict.items(): # if self.policy == 'vdnn': # assert isinstance(value, np.ndarray) # node_to_val_map[node] = value # else: # if use_numpy: # # all values passed in feed_dict must be np.ndarray # assert isinstance(value, np.ndarray) # node_to_val_map[node] = value # else: # # convert values to ndarray.NDArray if necessary # if isinstance(value, np.ndarray): # if self.policy == 'swap': # if node.swap == True: # node_to_val_map[node] = value # else: # node_to_val_map[node] = ndarray.array(value, ctx=self.ctx) # else: # node_to_val_map[node] = ndarray.array(value, ctx=self.ctx) # elif isinstance(value, ndarray.NDArray): # node_to_val_map[node] = value # else: # assert False, "feed_dict value type not supported" # # collect shapes for all placeholders # feed_shapes = {} # for node in node_to_val_map: # feed_shapes[node] = node_to_val_map[node].shape # # infer shape if feed_shapes changed since last run # # e.g. call run() on test data after trainng # # print feed_shapes # if (not are_feed_shapes_equal(feed_shapes, self.feed_shapes)): # self.infer_shape(feed_shapes) # self.feed_shapes = feed_shapes # if not self.policy == 'vdnn': # # plan memory if using GPU # if (not use_numpy): # self.memory_plan(feed_shapes) # # Traverse graph in topo order and compute values for all nodes. # global FLAG_SHOW_GRAPH # if self.policy == 'swap': # # generate swap queue # if not use_numpy: # for node in self.topo_order: # if node not in node_to_val_map: # # variable in placeholder # for input_node in node.inputs: # if input_node.swap == True: # self.swap_queue.append(input_node) # # variable grad # if node.swap == True: # self.swap_queue.append(node) # node_in_GPU = None # if FLAG_SHOW_GRAPH: # print "Show swap queue:" # for node in self.swap_queue: # print node # elif self.policy == 'vdnn': # # TODO traverse graph to select in-gpu window # window = [0,0] # if not use_numpy: # nvmlInit() # handle = nvmlDeviceGetHandleByIndex(0) # info = nvmlDeviceGetMemoryInfo(handle) # gpu_mem = info.free # nvmlShutdown() # loss_node = self.eval_node_list[0] # window[1] = self.topo_order.index(loss_node)+1 # window[0] = self.topo_order.index(loss_node)+1 # for node in reversed(self.topo_order[:window[1]+1]): # node_size = 4 # float32 # #print node, self.node_to_shape_map[node] # for shape in self.node_to_shape_map[node]: # node_size = node_size * shape # if gpu_mem > node_size: # gpu_mem = gpu_mem - node_size # window[0] = window[0] - 1 # #print "gpu_mem:",gpu_mem # # Traverse graph in topo order and compute values for all nodes. # if FLAG_SHOW_GRAPH: # print "run topo_order" # # Show graph dependency # if FLAG_SHOW_GRAPH: # print "node:",node # print "node.desc:",node.desc # for node in self.topo_order: # if self.policy == 'vdnn': # # Skip placeholder nodes # if node in node_to_val_map: # continue # # H2D before compute # ## Collect inputs # input_vals = [] # for n in node.inputs: # if not use_numpy: # if isinstance(node_to_val_map[n], np.ndarray): # node_to_val_map[n] = ndarray.array(node_to_val_map[n], ctx=self.ctx) # input_vals.append(node_to_val_map[n]) # ## Alloc node space # if use_numpy: # node_val = np.empty(shape=self.node_to_shape_map[node]) # else: # node_val = ndarray.empty(shape=self.node_to_shape_map[node], ctx=self.ctx) # # Compute # # node_val is modified in-place whether np.ndarray or NDArray # node.op.compute(node, input_vals, node_val, use_numpy) # # D2H after compute # if use_numpy: # node_to_val_map[node] = node_val # else: # node_index = self.topo_order.index(node) # if node_index > window[0] and node_index < window[1]: # node_to_val_map[node] = node_val # continue # node_to_val_map[node] = node_val.asnumpy() # del node_val # for n in node.inputs: # if isinstance(node_to_val_map[n], ndarray.NDArray): # tmp_val = node_to_val_map[n].asnumpy() # del node_to_val_map[n] # node_to_val_map[n] = tmp_val # elif self.policy == 'swap': # # Switch in GPU # if not use_numpy: # if self.swap_queue and (node_in_GPU==None): # swap_node = self.swap_queue[0] # if swap_node in node_to_val_map: # node_to_val_map[swap_node] = ndarray.array(node_to_val_map[swap_node], ctx=self.ctx) # else: # self.node_to_arr_map[swap_node] = ndarray.empty(self.node_to_shape_map[swap_node], ctx=self.ctx) # node_in_GPU = swap_node.id # if node in node_to_val_map: # # Skip placeholder nodes. Values already provided by feed_dict. # continue # # Compute # input_vals = [node_to_val_map[n] for n in node.inputs] # if use_numpy: # node_val = np.empty(shape=self.node_to_shape_map[node]) # else: # node_val = self.node_to_arr_map[node] # # node_val is modified in-place whether np.ndarray or NDArray # node.op.compute(node, input_vals, node_val, use_numpy) # if node.swap == True: # node_to_val_map[node] = node_val.asnumpy() # del node_val # del self.node_to_arr_map[node] # del self.swap_queue[0] # node_in_GPU = None # else: # node_to_val_map[node] = node_val # # Switch out GPU # if not use_numpy: # if self.swap_queue: # if self.swap_queue[0] in node.inputs: # out_node = self.swap_queue.pop(0) # if self.swap_queue: # if not self.swap_queue[0].id == node_in_GPU: # tmp_array = node_to_val_map[out_node].asnumpy() # del node_to_val_map[out_node] # node_to_val_map[out_node] = tmp_array # node_in_GPU = None # else: # if node in node_to_val_map: # # Skip placeholder nodes. Values already provided by feed_dict. # continue # input_vals = [node_to_val_map[n] for n in node.inputs] # # print self.node_to_shape_map[node] # if use_numpy: # node_val = np.empty(shape=self.node_to_shape_map[node]) # else: # node_val = self.node_to_arr_map[node] # # node_val is modified in-place whether np.ndarray or NDArray # # if (len(node.inputs) == 1): # # print "computs",node.inputs[0].name # # else: # # print "computs",node.inputs[0].name,node.inputs[1].name # # print node.name # # print node_val.shape # # print "xxx" # # print node.name # node.op.compute(node, input_vals, node_val, use_numpy) # # print "xxx" # node_to_val_map[node] = node_val # # print "xxx" # if FLAG_SHOW_GRAPH: # FLAG_SHOW_GRAPH = False # # Collect node values. # if not use_numpy and convert_to_numpy_ret_vals: # if self.policy == 'swap': # node_values = [] # for n in self.eval_node_list: # if n.swap == True: # node_values.append(node_to_val_map[n]) # else: # node_values.append(node_to_val_map[n].asnumpy()) # return node_values # elif self.policy == 'vdnn': # return [node_to_val_map[n] for n in self.eval_node_list] # else: # return [node_to_val_map[n].asnumpy() for n in self.eval_node_list] # return [node_to_val_map[n] for n in self.eval_node_list] def gradients(output_node, node_list, scheduler_policy=None): """Take gradient of output node with respect to each node in node_list. Parameters ---------- output_node: output node that we are taking derivative of. node_list: list of nodes that we are taking derivative wrt. Returns ------- A list of gradient values, one for each node in node_list respectively. """ from . import OnesLike node_to_output_grads_list = {} node_to_output_grads_list[output_node] = [ OnesLike.oneslike_op(output_node)] node_to_output_grad = {} # Traverse forward graph in reverse topological order reverse_topo_order = reversed(find_topo_sort([output_node])) for node in reverse_topo_order: output_grad = sum_node_list(node_to_output_grads_list[node]) node_to_output_grad[node] = output_grad input_grads_list = node.op.gradient(node, output_grad) #print len(node.name) #print len(node.inputs) #raw_input("\n\nPress the enter key to exit.") for i in range(len(node.inputs)): if node.inputs[i] not in node_to_output_grads_list: node_to_output_grads_list[node.inputs[i]] = [] # Calculate partial adjoint for input nodes. # print node.name node_to_output_grads_list[node.inputs[i]].append( input_grads_list[i]) if scheduler_policy == 'swap': for node in node_list: if node.swap: node_to_output_grad[node].swap = True grad_node_list = [node_to_output_grad[node] for node in node_list] # grad_node_list = [distributed_communicate_op(node_to_output_grad[node]) for node in node_list] return grad_node_list def distributed_gradients(output_node, node_list, scheduler_policy=None): """Take gradient of output node with respect to each node in node_list. Parameters ---------- output_node: output node that we are taking derivative of. node_list: list of nodes that we are taking derivative wrt. Returns ------- A list of gradient values, one for each node in node_list respectively. """ from .OnesLike import oneslike_op node_to_output_grads_list = {} node_to_output_grads_list[output_node] = [oneslike_op(output_node)] node_to_output_grad = {} # Traverse forward graph in reverse topological order reverse_topo_order = reversed(find_topo_sort([output_node])) for node in reverse_topo_order: output_grad = sum_node_list(node_to_output_grads_list[node]) node_to_output_grad[node] = output_grad input_grads_list = node.op.gradient(node, output_grad) #print len(node.name) #print len(node.inputs) #raw_input("\n\nPress the enter key to exit.") for i in range(len(node.inputs)): if node.inputs[i] not in node_to_output_grads_list: node_to_output_grads_list[node.inputs[i]] = [] # Calculate partial adjoint for input nodes. node_to_output_grads_list[node.inputs[i]].append( input_grads_list[i]) if scheduler_policy == 'swap': for node in node_list: if node.swap: node_to_output_grad[node].swap = True # grad_node_list = [node_to_output_grad[node] for node in node_list] grad_node_list = [distributed_communicate_op( node_to_output_grad[node]) for node in node_list] return grad_node_list ################## # Helper Methods # ################## def find_topo_sort(node_list): """Given a list of nodes, return a topo ordering of nodes ending in them. A simple algorithm is to do a post-order DFS traversal on the given nodes, going backwards based on input edges. Since a node is added to the ordering after all its predecessors are traversed due to post-order DFS, we get a topological sort. """ visited = set() topo_order = [] for node in node_list: topo_sort_dfs(node, visited, topo_order) return topo_order def topo_sort_dfs(node, visited, topo_order): """Post-order DFS""" if node in visited: return visited.add(node) for n in node.inputs: topo_sort_dfs(n, visited, topo_order) topo_order.append(node) def sum_node_list(node_list): """Custom sum func to avoid creating redundant nodes in Python sum func.""" from operator import add from functools import reduce return reduce(add, node_list) def broadcast_rule(shape_a, shape_b): """Return output shape of broadcast shape_a, shape_b. e.g. broadcast_rule((3,2), (4,3,2)) returns output_shape = (4,3,2) Check out explanations and more examples at https://docs.scipy.org/doc/numpy-1.10.0/user/basics.broadcasting.html http://eli.thegreenplace.net/2015/broadcasting-arrays-in-numpy/ """ assert(isinstance(shape_a, tuple)) assert(isinstance(shape_b, tuple)) if len(shape_a) > len(shape_b): longer_shape, shorter_shape = shape_a, shape_b else: longer_shape, shorter_shape = shape_b, shape_a len_diff = len(longer_shape) - len(shorter_shape) for i in range(len_diff): # pad with leading 1s shorter_shape = (1,) + shorter_shape assert len(shorter_shape) == len(longer_shape) output_shape = list(longer_shape) for i in range(len(output_shape)): assert (shorter_shape[i] == longer_shape[i]) \ or (shorter_shape[i] == 1) \ or (longer_shape[i] == 1) output_shape[i] = max(shorter_shape[i], longer_shape[i]) return tuple(output_shape)
def run(self, feed_dict, convert_to_numpy_ret_vals=False): """ Parameters ---------- feed_dict: a dictionary of node->np.ndarray supplied by user. convert_to_numpy_ret_vals: whether to convert ret vals to np.array Returns ------- A list of values for nodes in eval_node_list. NDArray or np.ndarray. """ def are_feed_shapes_equal(sa, sb): if (not isinstance(sa, dict)) or (not isinstance(sb, dict)): return False unmatched_item = set(sa.items()) ^ set(sb.items()) return len(unmatched_item) == 0 # Assume self.ctx is None implies numpy array and numpy ops. use_numpy = self.ctx is None node_to_val_map = {} for node, value in feed_dict.items(): if use_numpy: # all values passed in feed_dict must be np.ndarray assert isinstance(value, np.ndarray) node_to_val_map[node] = value else: # convert values to ndarray.NDArray if necessary if isinstance(value, np.ndarray): node_to_val_map[node] = ndarray.array(value, ctx=self.ctx) elif isinstance(value, ndarray.NDArray): node_to_val_map[node] = value else: assert False, "feed_dict value type not supported" # print"xxxx" # collect shapes for all placeholders # infer shape if feed_shapes changed since last run # e.g. call run() on test data after trainng # print feed_shapes feed_shapes = {} for node in node_to_val_map: feed_shapes[node] = node_to_val_map[node].shape if(not are_feed_shapes_equal(feed_shapes, self.feed_shapes)): self.infer_shape(feed_shapes) self.feed_shapes = feed_shapes if (not use_numpy): self.memory_plan(self.feed_shapes) for node in self.topo_order: if node in node_to_val_map: continue input_vals = [node_to_val_map[n] for n in node.inputs] if use_numpy: node_val = np.empty(shape=self.node_to_shape_map[node]) else: node_val = self.node_to_arr_map[node] # print(node.name) node.op.compute(node, input_vals, node_val, use_numpy, self.stream) node_to_val_map[node] = node_val self.stream.sync() if not use_numpy and convert_to_numpy_ret_vals: return [node_to_val_map[n].asnumpy() for n in self.eval_node_list] return [node_to_val_map[n] for n in self.eval_node_list]
151
214
""" library to take autodiff and execute a computation graph """ from __future__ import absolute_import import numpy as np from .Node import Op from .. import ndarray from ..stream import * import ctypes import os from pynvml import * FLAG_SHOW_GRAPH = False G_NODE_ID = 0 NAME_RULE = 1 def communicate_init(worker_num, worker_id, source_ip, target_ip): global lib_communicate # lib_communicate.DL_Connect_Init(2, 0, "*:4001", "localhost:4002") # lib_communicate.DL_Connect_Init(2, 1, "*:4002", "localhost:4001") curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) lib_path = os.path.join(curr_path, '../../build/lib/') path_to_so_file = os.path.join(lib_path, "lib_communication.so") lib_communicate = ctypes.cdll.LoadLibrary(path_to_so_file) lib_communicate.DL_Connect_Init( worker_num, worker_id, source_ip, target_ip) def communicate_finish(): lib_communicate.DL_Communicate_Close() class Distributed_CommunicateOp(Op): def __call__(self, nodeA): new_node = Op.__call__(self) new_node.inputs = [nodeA] new_node.name = "Distributed_Communicate(%s)" % (nodeA.name) # print nodeA.name return new_node def compute(self, node, input_vals, output_val, use_numpy=True): after_reduce_gradient_cpu = ndarray.empty( shape=output_val.shape, ctx=ndarray.cpu(0)) if use_numpy: gradient_val_cpu = ndarray.array(input_vals[0], ctx=ndarray.cpu(0)) else: gradient_val_cpu = ndarray.array( input_vals[0].asnumpy(), ctx=ndarray.cpu(0)) # print gradient_val_cpu.asnumpy() lib_communicate.DL_Communicate_Init(gradient_val_cpu.handle) lib_communicate.DL_Communicate( gradient_val_cpu.handle, after_reduce_gradient_cpu.handle) # print after_reduce_gradient_cpu.asnumpy() if use_numpy: output_val[:] = after_reduce_gradient_cpu.asnumpy() else: after_reduce_gradient_cpu.copyto(output_val) def gradient(self, node, output_grad): raise NotImplementedError def infer_shape(self, node, input_shapes): return input_shapes[0] distributed_communicate_op = Distributed_CommunicateOp() class StreamExecutor(object): """Executor computes values for given set of nodes in computation graph.""" def __init__(self, eval_node_list, ctx = None, stream = None, policy = None): """ Parameters ---------- eval_node_list: list of nodes whose values need to be computed. ctx: runtime DLContext, default is None which means np.ndarray on cpu topo_order: list of nodes in topological order node_to_shape_map: dict from node to shape of the node node_to_arr_map: dict from node to ndarray.NDArray allocated for node feed_shapes: shapes of feed_dict from last run(...) """ self.eval_node_list = eval_node_list self.ctx = ctx if stream is None: self.stream = create_stream_handle(ctx) else: self.stream = stream self.stream.sync() self.topo_order = find_topo_sort(self.eval_node_list) self.node_to_shape_map = None self.node_to_arr_map = None self.feed_shapes = None self.policy = policy if self.policy == 'swap': self.swap_queue = [] def infer_shape(self, feed_shapes): """Given shapes of feed_dict nodes, infer shape for all nodes in graph. Implementation note: Iteratively calls node.op.infer_shape to infer shapes. Node shapes stored in self.node_to_shape_map. Parameters ---------- feed_shapes: node->shapes mapping for feed_dict nodes. """ """TODO: Your code here""" self.node_to_shape_map = {} for node in self.topo_order: if node in feed_shapes: self.node_to_shape_map[node] = feed_shapes[node] else: # print(node.name) input_shapes = [self.node_to_shape_map[n] for n in node.inputs] self.node_to_shape_map[node] = node.op.infer_shape( node, input_shapes) def memory_plan(self, feed_shapes): """Allocates ndarray.NDArray for every node except feed_dict nodes. Implementation note: Option 1: Alloc a ndarray.NDArray per node that persists across run() Option 2: Implement a memory pool to reuse memory for nodes of same shapes. More details see Lecture 7. For both options, self.node_to_arr_map stores node->NDArray mapping to allow mapping to persist across multiple executor.run(). Hint: use ndarray.empty(shape, ctx=self.ctx) to allocate NDArray. Parameters ---------- feed_shapes: node->shapes mapping for feed_dict nodes. """ """TODO: Your code here""" assert (self.ctx is not None) # self.infer_shape(feed_shapes) self.node_to_arr_map = {} for node, shape in self.node_to_shape_map.items(): if self.policy == 'swap': if not node.swap: self.node_to_arr_map[node] = ndarray.empty( shape, ctx=self.ctx) elif self.policy == 'vdnn': self.node_to_arr_map[node] = np.empty(shape) else: self.node_to_arr_map[node] = ndarray.empty(shape, ctx=self.ctx) def run(self, feed_dict, convert_to_numpy_ret_vals=False): """ Parameters ---------- feed_dict: a dictionary of node->np.ndarray supplied by user. convert_to_numpy_ret_vals: whether to convert ret vals to np.array Returns ------- A list of values for nodes in eval_node_list. NDArray or np.ndarray. """ def are_feed_shapes_equal(sa, sb): if (not isinstance(sa, dict)) or (not isinstance(sb, dict)): return False unmatched_item = set(sa.items()) ^ set(sb.items()) return len(unmatched_item) == 0 # Assume self.ctx is None implies numpy array and numpy ops. use_numpy = self.ctx is None node_to_val_map = {} for node, value in feed_dict.items(): if use_numpy: # all values passed in feed_dict must be np.ndarray assert isinstance(value, np.ndarray) node_to_val_map[node] = value else: # convert values to ndarray.NDArray if necessary if isinstance(value, np.ndarray): node_to_val_map[node] = ndarray.array(value, ctx=self.ctx) elif isinstance(value, ndarray.NDArray): node_to_val_map[node] = value else: assert False, "feed_dict value type not supported" # print"xxxx" # collect shapes for all placeholders # infer shape if feed_shapes changed since last run # e.g. call run() on test data after trainng # print feed_shapes feed_shapes = {} for node in node_to_val_map: feed_shapes[node] = node_to_val_map[node].shape if(not are_feed_shapes_equal(feed_shapes, self.feed_shapes)): self.infer_shape(feed_shapes) self.feed_shapes = feed_shapes if (not use_numpy): self.memory_plan(self.feed_shapes) for node in self.topo_order: if node in node_to_val_map: continue input_vals = [node_to_val_map[n] for n in node.inputs] if use_numpy: node_val = np.empty(shape=self.node_to_shape_map[node]) else: node_val = self.node_to_arr_map[node] # print(node.name) node.op.compute(node, input_vals, node_val, use_numpy, self.stream) node_to_val_map[node] = node_val self.stream.sync() if not use_numpy and convert_to_numpy_ret_vals: return [node_to_val_map[n].asnumpy() for n in self.eval_node_list] return [node_to_val_map[n] for n in self.eval_node_list] # def run(self, feed_dict, convert_to_numpy_ret_vals=False): # """ # Parameters # ---------- # feed_dict: a dictionary of node->np.ndarray supplied by user. # convert_to_numpy_ret_vals: whether to convert ret vals to np.array # Returns # ------- # A list of values for nodes in eval_node_list. NDArray or np.ndarray. # """ # def are_feed_shapes_equal(sa, sb): # if (not isinstance(sa, dict)) or (not isinstance(sb, dict)): # return False # unmatched_item = set(sa.items()) ^ set(sb.items()) # return len(unmatched_item) == 0 # # Assume self.ctx is None implies numpy array and numpy ops. # use_numpy = self.ctx is None # node_to_val_map = {} # for node, value in feed_dict.items(): # if self.policy == 'vdnn': # assert isinstance(value, np.ndarray) # node_to_val_map[node] = value # else: # if use_numpy: # # all values passed in feed_dict must be np.ndarray # assert isinstance(value, np.ndarray) # node_to_val_map[node] = value # else: # # convert values to ndarray.NDArray if necessary # if isinstance(value, np.ndarray): # if self.policy == 'swap': # if node.swap == True: # node_to_val_map[node] = value # else: # node_to_val_map[node] = ndarray.array(value, ctx=self.ctx) # else: # node_to_val_map[node] = ndarray.array(value, ctx=self.ctx) # elif isinstance(value, ndarray.NDArray): # node_to_val_map[node] = value # else: # assert False, "feed_dict value type not supported" # # collect shapes for all placeholders # feed_shapes = {} # for node in node_to_val_map: # feed_shapes[node] = node_to_val_map[node].shape # # infer shape if feed_shapes changed since last run # # e.g. call run() on test data after trainng # # print feed_shapes # if (not are_feed_shapes_equal(feed_shapes, self.feed_shapes)): # self.infer_shape(feed_shapes) # self.feed_shapes = feed_shapes # if not self.policy == 'vdnn': # # plan memory if using GPU # if (not use_numpy): # self.memory_plan(feed_shapes) # # Traverse graph in topo order and compute values for all nodes. # global FLAG_SHOW_GRAPH # if self.policy == 'swap': # # generate swap queue # if not use_numpy: # for node in self.topo_order: # if node not in node_to_val_map: # # variable in placeholder # for input_node in node.inputs: # if input_node.swap == True: # self.swap_queue.append(input_node) # # variable grad # if node.swap == True: # self.swap_queue.append(node) # node_in_GPU = None # if FLAG_SHOW_GRAPH: # print "Show swap queue:" # for node in self.swap_queue: # print node # elif self.policy == 'vdnn': # # TODO traverse graph to select in-gpu window # window = [0,0] # if not use_numpy: # nvmlInit() # handle = nvmlDeviceGetHandleByIndex(0) # info = nvmlDeviceGetMemoryInfo(handle) # gpu_mem = info.free # nvmlShutdown() # loss_node = self.eval_node_list[0] # window[1] = self.topo_order.index(loss_node)+1 # window[0] = self.topo_order.index(loss_node)+1 # for node in reversed(self.topo_order[:window[1]+1]): # node_size = 4 # float32 # #print node, self.node_to_shape_map[node] # for shape in self.node_to_shape_map[node]: # node_size = node_size * shape # if gpu_mem > node_size: # gpu_mem = gpu_mem - node_size # window[0] = window[0] - 1 # #print "gpu_mem:",gpu_mem # # Traverse graph in topo order and compute values for all nodes. # if FLAG_SHOW_GRAPH: # print "run topo_order" # # Show graph dependency # if FLAG_SHOW_GRAPH: # print "node:",node # print "node.desc:",node.desc # for node in self.topo_order: # if self.policy == 'vdnn': # # Skip placeholder nodes # if node in node_to_val_map: # continue # # H2D before compute # ## Collect inputs # input_vals = [] # for n in node.inputs: # if not use_numpy: # if isinstance(node_to_val_map[n], np.ndarray): # node_to_val_map[n] = ndarray.array(node_to_val_map[n], ctx=self.ctx) # input_vals.append(node_to_val_map[n]) # ## Alloc node space # if use_numpy: # node_val = np.empty(shape=self.node_to_shape_map[node]) # else: # node_val = ndarray.empty(shape=self.node_to_shape_map[node], ctx=self.ctx) # # Compute # # node_val is modified in-place whether np.ndarray or NDArray # node.op.compute(node, input_vals, node_val, use_numpy) # # D2H after compute # if use_numpy: # node_to_val_map[node] = node_val # else: # node_index = self.topo_order.index(node) # if node_index > window[0] and node_index < window[1]: # node_to_val_map[node] = node_val # continue # node_to_val_map[node] = node_val.asnumpy() # del node_val # for n in node.inputs: # if isinstance(node_to_val_map[n], ndarray.NDArray): # tmp_val = node_to_val_map[n].asnumpy() # del node_to_val_map[n] # node_to_val_map[n] = tmp_val # elif self.policy == 'swap': # # Switch in GPU # if not use_numpy: # if self.swap_queue and (node_in_GPU==None): # swap_node = self.swap_queue[0] # if swap_node in node_to_val_map: # node_to_val_map[swap_node] = ndarray.array(node_to_val_map[swap_node], ctx=self.ctx) # else: # self.node_to_arr_map[swap_node] = ndarray.empty(self.node_to_shape_map[swap_node], ctx=self.ctx) # node_in_GPU = swap_node.id # if node in node_to_val_map: # # Skip placeholder nodes. Values already provided by feed_dict. # continue # # Compute # input_vals = [node_to_val_map[n] for n in node.inputs] # if use_numpy: # node_val = np.empty(shape=self.node_to_shape_map[node]) # else: # node_val = self.node_to_arr_map[node] # # node_val is modified in-place whether np.ndarray or NDArray # node.op.compute(node, input_vals, node_val, use_numpy) # if node.swap == True: # node_to_val_map[node] = node_val.asnumpy() # del node_val # del self.node_to_arr_map[node] # del self.swap_queue[0] # node_in_GPU = None # else: # node_to_val_map[node] = node_val # # Switch out GPU # if not use_numpy: # if self.swap_queue: # if self.swap_queue[0] in node.inputs: # out_node = self.swap_queue.pop(0) # if self.swap_queue: # if not self.swap_queue[0].id == node_in_GPU: # tmp_array = node_to_val_map[out_node].asnumpy() # del node_to_val_map[out_node] # node_to_val_map[out_node] = tmp_array # node_in_GPU = None # else: # if node in node_to_val_map: # # Skip placeholder nodes. Values already provided by feed_dict. # continue # input_vals = [node_to_val_map[n] for n in node.inputs] # # print self.node_to_shape_map[node] # if use_numpy: # node_val = np.empty(shape=self.node_to_shape_map[node]) # else: # node_val = self.node_to_arr_map[node] # # node_val is modified in-place whether np.ndarray or NDArray # # if (len(node.inputs) == 1): # # print "computs",node.inputs[0].name # # else: # # print "computs",node.inputs[0].name,node.inputs[1].name # # print node.name # # print node_val.shape # # print "xxx" # # print node.name # node.op.compute(node, input_vals, node_val, use_numpy) # # print "xxx" # node_to_val_map[node] = node_val # # print "xxx" # if FLAG_SHOW_GRAPH: # FLAG_SHOW_GRAPH = False # # Collect node values. # if not use_numpy and convert_to_numpy_ret_vals: # if self.policy == 'swap': # node_values = [] # for n in self.eval_node_list: # if n.swap == True: # node_values.append(node_to_val_map[n]) # else: # node_values.append(node_to_val_map[n].asnumpy()) # return node_values # elif self.policy == 'vdnn': # return [node_to_val_map[n] for n in self.eval_node_list] # else: # return [node_to_val_map[n].asnumpy() for n in self.eval_node_list] # return [node_to_val_map[n] for n in self.eval_node_list] def gradients(output_node, node_list, scheduler_policy=None): """Take gradient of output node with respect to each node in node_list. Parameters ---------- output_node: output node that we are taking derivative of. node_list: list of nodes that we are taking derivative wrt. Returns ------- A list of gradient values, one for each node in node_list respectively. """ from . import OnesLike node_to_output_grads_list = {} node_to_output_grads_list[output_node] = [ OnesLike.oneslike_op(output_node)] node_to_output_grad = {} # Traverse forward graph in reverse topological order reverse_topo_order = reversed(find_topo_sort([output_node])) for node in reverse_topo_order: output_grad = sum_node_list(node_to_output_grads_list[node]) node_to_output_grad[node] = output_grad input_grads_list = node.op.gradient(node, output_grad) #print len(node.name) #print len(node.inputs) #raw_input("\n\nPress the enter key to exit.") for i in range(len(node.inputs)): if node.inputs[i] not in node_to_output_grads_list: node_to_output_grads_list[node.inputs[i]] = [] # Calculate partial adjoint for input nodes. # print node.name node_to_output_grads_list[node.inputs[i]].append( input_grads_list[i]) if scheduler_policy == 'swap': for node in node_list: if node.swap: node_to_output_grad[node].swap = True grad_node_list = [node_to_output_grad[node] for node in node_list] # grad_node_list = [distributed_communicate_op(node_to_output_grad[node]) for node in node_list] return grad_node_list def distributed_gradients(output_node, node_list, scheduler_policy=None): """Take gradient of output node with respect to each node in node_list. Parameters ---------- output_node: output node that we are taking derivative of. node_list: list of nodes that we are taking derivative wrt. Returns ------- A list of gradient values, one for each node in node_list respectively. """ from .OnesLike import oneslike_op node_to_output_grads_list = {} node_to_output_grads_list[output_node] = [oneslike_op(output_node)] node_to_output_grad = {} # Traverse forward graph in reverse topological order reverse_topo_order = reversed(find_topo_sort([output_node])) for node in reverse_topo_order: output_grad = sum_node_list(node_to_output_grads_list[node]) node_to_output_grad[node] = output_grad input_grads_list = node.op.gradient(node, output_grad) #print len(node.name) #print len(node.inputs) #raw_input("\n\nPress the enter key to exit.") for i in range(len(node.inputs)): if node.inputs[i] not in node_to_output_grads_list: node_to_output_grads_list[node.inputs[i]] = [] # Calculate partial adjoint for input nodes. node_to_output_grads_list[node.inputs[i]].append( input_grads_list[i]) if scheduler_policy == 'swap': for node in node_list: if node.swap: node_to_output_grad[node].swap = True # grad_node_list = [node_to_output_grad[node] for node in node_list] grad_node_list = [distributed_communicate_op( node_to_output_grad[node]) for node in node_list] return grad_node_list ################## # Helper Methods # ################## def find_topo_sort(node_list): """Given a list of nodes, return a topo ordering of nodes ending in them. A simple algorithm is to do a post-order DFS traversal on the given nodes, going backwards based on input edges. Since a node is added to the ordering after all its predecessors are traversed due to post-order DFS, we get a topological sort. """ visited = set() topo_order = [] for node in node_list: topo_sort_dfs(node, visited, topo_order) return topo_order def topo_sort_dfs(node, visited, topo_order): """Post-order DFS""" if node in visited: return visited.add(node) for n in node.inputs: topo_sort_dfs(n, visited, topo_order) topo_order.append(node) def sum_node_list(node_list): """Custom sum func to avoid creating redundant nodes in Python sum func.""" from operator import add from functools import reduce return reduce(add, node_list) def broadcast_rule(shape_a, shape_b): """Return output shape of broadcast shape_a, shape_b. e.g. broadcast_rule((3,2), (4,3,2)) returns output_shape = (4,3,2) Check out explanations and more examples at https://docs.scipy.org/doc/numpy-1.10.0/user/basics.broadcasting.html http://eli.thegreenplace.net/2015/broadcasting-arrays-in-numpy/ """ assert(isinstance(shape_a, tuple)) assert(isinstance(shape_b, tuple)) if len(shape_a) > len(shape_b): longer_shape, shorter_shape = shape_a, shape_b else: longer_shape, shorter_shape = shape_b, shape_a len_diff = len(longer_shape) - len(shorter_shape) for i in range(len_diff): # pad with leading 1s shorter_shape = (1,) + shorter_shape assert len(shorter_shape) == len(longer_shape) output_shape = list(longer_shape) for i in range(len(output_shape)): assert (shorter_shape[i] == longer_shape[i]) \ or (shorter_shape[i] == 1) \ or (longer_shape[i] == 1) output_shape[i] = max(shorter_shape[i], longer_shape[i]) return tuple(output_shape)
_callback
This will be called repeatedly every `self.interval` seconds. `self.subscriptions` contain tuples of (obj, args, kwargs) for each subscribing object. If overloading, this callback is expected to handle all subscriptions when it is triggered. It should not return anything and should not traceback on poorly designed hooks. The callback should ideally work under @inlineCallbacks so it can yield appropriately. The _hook_key, which is passed down through the handler via kwargs is used here to identify which hook method to call.
""" TickerHandler This implements an efficient Ticker which uses a subscription model to 'tick' subscribed objects at regular intervals. The ticker mechanism is used by importing and accessing the instantiated TICKER_HANDLER instance in this module. This instance is run by the server; it will save its status across server reloads and be started automaticall on boot. Example: ```python from evennia.scripts.tickerhandler import TICKER_HANDLER # call tick myobj.at_tick(*args, **kwargs) every 15 seconds TICKER_HANDLER.add(15, myobj.at_tick, *args, **kwargs) ``` You supply the interval to tick and a callable to call regularly with any extra args/kwargs. The handler will transparently set up and add new timers behind the scenes to tick at given intervals, using a TickerPool - all callables with the same interval will share the interval ticker. To remove: ```python TICKER_HANDLER.remove(15, myobj.at_tick) ``` Both interval and callable must be given since a single object can be subscribed to many different tickers at the same time. You can also supply `idstring` as an identifying string if you ever want to tick the callable at the same interval but with different arguments (args/kwargs are not used for identifying the ticker). There is also `persistent=False` if you don't want to make a ticker that don't survive a reload. If either or both `idstring` or `persistent` has been changed from their defaults, they must be supplied to the `TICKER_HANDLER.remove` call to properly identify the ticker to remove. The TickerHandler's functionality can be overloaded by modifying the Ticker class and then changing TickerPool and TickerHandler to use the custom classes ```python class MyTicker(Ticker): # [doing custom stuff] class MyTickerPool(TickerPool): ticker_class = MyTicker class MyTickerHandler(TickerHandler): ticker_pool_class = MyTickerPool ``` If one wants to duplicate TICKER_HANDLER's auto-saving feature in a custom handler one can make a custom `AT_STARTSTOP_MODULE` entry to call the handler's `save()` and `restore()` methods when the server reboots. """ import inspect from builtins import object from twisted.internet.defer import inlineCallbacks from django.core.exceptions import ObjectDoesNotExist from evennia.scripts.scripts import ExtendedLoopingCall from evennia.server.models import ServerConfig from evennia.utils.logger import log_trace, log_err from evennia.utils.dbserialize import dbserialize, dbunserialize, pack_dbobj, unpack_dbobj from evennia.utils import variable_from_module _GA = object.__getattribute__ _SA = object.__setattr__ _ERROR_ADD_TICKER = \ """TickerHandler: Tried to add an invalid ticker: {storekey} Ticker was not added.""" class Ticker(object): """ Represents a repeatedly running task that calls hooks repeatedly. Overload `_callback` to change the way it operates. """ # MASKED: _callback function (lines 88-139) def __init__(self, interval): """ Set up the ticker Args: interval (int): The stepping interval. """ self.interval = interval self.subscriptions = {} self._is_ticking = False self._to_remove = [] self._to_add = [] # set up a twisted asynchronous repeat call self.task = ExtendedLoopingCall(self._callback) def validate(self, start_delay=None): """ Start/stop the task depending on how many subscribers we have using it. Args: start_delay (int): Time to way before starting. """ subs = self.subscriptions if self.task.running: if not subs: self.task.stop() elif subs: self.task.start(self.interval, now=False, start_delay=start_delay) def add(self, store_key, *args, **kwargs): """ Sign up a subscriber to this ticker. Args: store_key (str): Unique storage hash for this ticker subscription. args (any, optional): Arguments to call the hook method with. Kwargs: _start_delay (int): If set, this will be used to delay the start of the trigger instead of `interval`. """ if self._is_ticking: # protects the subscription dict from # updating while it is looping self._to_start.append((store_key, (args, kwargs))) else: start_delay = kwargs.pop("_start_delay", None) self.subscriptions[store_key] = (args, kwargs) self.validate(start_delay=start_delay) def remove(self, store_key): """ Unsubscribe object from this ticker Args: store_key (str): Unique store key. """ if self._is_ticking: # this protects the subscription dict from # updating while it is looping self._to_remove.append(store_key) else: self.subscriptions.pop(store_key, False) self.validate() def stop(self): """ Kill the Task, regardless of subscriptions. """ self.subscriptions = {} self.validate() class TickerPool(object): """ This maintains a pool of `evennia.scripts.scripts.ExtendedLoopingCall` tasks for calling subscribed objects at given times. """ ticker_class = Ticker def __init__(self): """ Initialize the pool. """ self.tickers = {} def add(self, store_key, *args, **kwargs): """ Add new ticker subscriber. Args: store_key (str): Unique storage hash. args (any, optional): Arguments to send to the hook method. """ _, _, _, interval, _, _ = store_key if not interval: log_err(_ERROR_ADD_TICKER.format(store_key=store_key)) return if interval not in self.tickers: self.tickers[interval] = self.ticker_class(interval) self.tickers[interval].add(store_key, *args, **kwargs) def remove(self, store_key): """ Remove subscription from pool. Args: store_key (str): Unique storage hash to remove """ _, _, _, interval, _, _ = store_key if interval in self.tickers: self.tickers[interval].remove(store_key) if not self.tickers[interval]: del self.tickers[interval] def stop(self, interval=None): """ Stop all scripts in pool. This is done at server reload since restoring the pool will automatically re-populate the pool. Args: interval (int, optional): Only stop tickers with this interval. """ if interval and interval in self.tickers: self.tickers[interval].stop() else: for ticker in self.tickers.values(): ticker.stop() class TickerHandler(object): """ The Tickerhandler maintains a pool of tasks for subscribing objects to various tick rates. The pool maintains creation instructions and and re-applies them at a server restart. """ ticker_pool_class = TickerPool def __init__(self, save_name="ticker_storage"): """ Initialize handler save_name (str, optional): The name of the ServerConfig instance to store the handler state persistently. """ self.ticker_storage = {} self.save_name = save_name self.ticker_pool = self.ticker_pool_class() def _get_callback(self, callback): """ Analyze callback and determine its consituents Args: callback (function or method): This is either a stand-alone function or class method on a typeclassed entitye (that is, an entity that can be saved to the database). Returns: ret (tuple): This is a tuple of the form `(obj, path, callfunc)`, where `obj` is the database object the callback is defined on if it's a method (otherwise `None`) and vice-versa, `path` is the python-path to the stand-alone function (`None` if a method). The `callfunc` is either the name of the method to call or the callable function object itself. """ outobj, outpath, outcallfunc = None, None, None if callable(callback): if inspect.ismethod(callback): outobj = callback.im_self outcallfunc = callback.im_func.func_name elif inspect.isfunction(callback): outpath = "%s.%s" % (callback.__module__, callback.func_name) outcallfunc = callback else: raise TypeError("%s is not a callable function or method." % callback) return outobj, outpath, outcallfunc def _store_key(self, obj, path, interval, callfunc, idstring="", persistent=True): """ Tries to create a store_key for the object. Args: obj (Object, tuple or None): Subscribing object if any. If a tuple, this is a packed_obj tuple from dbserialize. path (str or None): Python-path to callable, if any. interval (int): Ticker interval. callfunc (callable or str): This is either the callable function or the name of the method to call. Note that the callable is never stored in the key; that is uniquely identified with the python-path. idstring (str, optional): Additional separator between different subscription types. persistent (bool, optional): If this ticker should survive a system shutdown or not. Returns: store_key (tuple): A tuple `(packed_obj, methodname, outpath, interval, idstring, persistent)` that uniquely identifies the ticker. Here, `packed_obj` is the unique string representation of the object or `None`. The `methodname` is the string name of the method on `packed_obj` to call, or `None` if `packed_obj` is unset. `path` is the Python-path to a non-method callable, or `None`. Finally, `interval` `idstring` and `persistent` are integers, strings and bools respectively. """ interval = int(interval) persistent = bool(persistent) packed_obj = pack_dbobj(obj) methodname = callfunc if callfunc and isinstance(callfunc, basestring) else None outpath = path if path and isinstance(path, basestring) else None return (packed_obj, methodname, outpath, interval, idstring, persistent) def save(self): """ Save ticker_storage as a serialized string into a temporary ServerConf field. Whereas saving is done on the fly, if called by server when it shuts down, the current timer of each ticker will be saved so it can start over from that point. """ if self.ticker_storage: # get the current times so the tickers can be restarted with a delay later start_delays = dict((interval, ticker.task.next_call_time()) for interval, ticker in self.ticker_pool.tickers.items()) # remove any subscriptions that lost its object in the interim to_save = {store_key: (args, kwargs) for store_key, (args, kwargs) in self.ticker_storage.items() if ((store_key[1] and ("_obj" in kwargs and kwargs["_obj"].pk) and hasattr(kwargs["_obj"], store_key[1])) or # a valid method with existing obj store_key[2])} # a path given # update the timers for the tickers for store_key, (args, kwargs) in to_save.items(): interval = store_key[1] # this is a mutable, so it's updated in-place in ticker_storage kwargs["_start_delay"] = start_delays.get(interval, None) ServerConfig.objects.conf(key=self.save_name, value=dbserialize(to_save)) else: # make sure we have nothing lingering in the database ServerConfig.objects.conf(key=self.save_name, delete=True) def restore(self, server_reload=True): """ Restore ticker_storage from database and re-initialize the handler from storage. This is triggered by the server at restart. Args: server_reload (bool, optional): If this is False, it means the server went through a cold reboot and all non-persistent tickers must be killed. """ # load stored command instructions and use them to re-initialize handler restored_tickers = ServerConfig.objects.conf(key=self.save_name) if restored_tickers: # the dbunserialize will convert all serialized dbobjs to real objects restored_tickers = dbunserialize(restored_tickers) self.ticker_storage = {} for store_key, (args, kwargs) in restored_tickers.iteritems(): try: # at this point obj is the actual object (or None) due to how # the dbunserialize works obj, callfunc, path, interval, idstring, persistent = store_key if not persistent and not server_reload: # this ticker will not be restarted continue if isinstance(callfunc, basestring) and not obj: # methods must have an existing object continue # we must rebuild the store_key here since obj must not be # stored as the object itself for the store_key to be hashable. store_key = self._store_key(obj, path, interval, callfunc, idstring, persistent) if obj and callfunc: kwargs["_callback"] = callfunc kwargs["_obj"] = obj elif path: modname, varname = path.rsplit(".", 1) callback = variable_from_module(modname, varname) kwargs["_callback"] = callback kwargs["_obj"] = None else: # Neither object nor path - discard this ticker log_err("Tickerhandler: Removing malformed ticker: %s" % str(store_key)) continue except Exception: # this suggests a malformed save or missing objects log_trace("Tickerhandler: Removing malformed ticker: %s" % str(store_key)) continue # if we get here we should create a new ticker self.ticker_storage[store_key] = (args, kwargs) self.ticker_pool.add(store_key, *args, **kwargs) def add(self, interval=60, callback=None, idstring="", persistent=True, *args, **kwargs): """ Add subscription to tickerhandler Args: interval (int, optional): Interval in seconds between calling `callable(*args, **kwargs)` callable (callable function or method, optional): This should either be a stand-alone function or a method on a typeclassed entity (that is, one that can be saved to the database). idstring (str, optional): Identifier for separating this ticker-subscription from others with the same interval. Allows for managing multiple calls with the same time interval and callback. persistent (bool, optional): A ticker will always survive a server reload. If this is unset, the ticker will be deleted by a server shutdown. args, kwargs (optional): These will be passed into the callback every time it is called. Notes: The callback will be identified by type and stored either as as combination of serialized database object + methodname or as a python-path to the module + funcname. These strings will be combined iwth `interval` and `idstring` to define a unique storage key for saving. These must thus all be supplied when wanting to modify/remove the ticker later. """ if isinstance(callback, int): raise RuntimeError("TICKER_HANDLER.add has changed: " "the interval is now the first argument, callback the second.") obj, path, callfunc = self._get_callback(callback) store_key = self._store_key(obj, path, interval, callfunc, idstring, persistent) kwargs["_obj"] = obj kwargs["_callback"] = callfunc # either method-name or callable self.ticker_storage[store_key] = (args, kwargs) self.ticker_pool.add(store_key, *args, **kwargs) self.save() def remove(self, interval=60, callback=None, idstring="", persistent=True): """ Remove object from ticker or only remove it from tickers with a given interval. Args: interval (int, optional): Interval of ticker to remove. callback (callable function or method): Either a function or the method of a typeclassed object. idstring (str, optional): Identifier id of ticker to remove. """ if isinstance(callback, int): raise RuntimeError("TICKER_HANDLER.remove has changed: " "the interval is now the first argument, callback the second.") obj, path, callfunc = self._get_callback(callback) store_key = self._store_key(obj, path, interval, callfunc, idstring, persistent) to_remove = self.ticker_storage.pop(store_key, None) if to_remove: self.ticker_pool.remove(store_key) self.save() def clear(self, interval=None): """ Stop/remove tickers from handler. Args: interval (int): Only stop tickers with this interval. Notes: This is the only supported way to kill tickers related to non-db objects. """ self.ticker_pool.stop(interval) if interval: self.ticker_storage = dict((store_key, store_key) for store_key in self.ticker_storage if store_key[1] != interval) else: self.ticker_storage = {} self.save() def all(self, interval=None): """ Get all subscriptions. Args: interval (int): Limit match to tickers with this interval. Returns: tickers (list): If `interval` was given, this is a list of tickers using that interval. tickerpool_layout (dict): If `interval` was *not* given, this is a dict {interval1: [ticker1, ticker2, ...], ...} """ if interval is None: # return dict of all, ordered by interval return dict((interval, ticker.subscriptions) for interval, ticker in self.ticker_pool.tickers.iteritems()) else: # get individual interval ticker = self.ticker_pool.tickers.get(interval, None) if ticker: return {interval: ticker.subscriptions} def all_display(self): """ Get all tickers on an easily displayable form. Returns: tickers (dict): A list of all storekeys """ store_keys = [] for ticker in self.ticker_pool.tickers.itervalues(): for (objtup, callfunc, path, interval, idstring, persistent), (args, kwargs) in ticker.subscriptions.iteritems(): store_keys.append((kwargs.get("_obj", None), callfunc, path, interval, idstring, persistent)) return store_keys # main tickerhandler TICKER_HANDLER = TickerHandler()
@inlineCallbacks def _callback(self): """ This will be called repeatedly every `self.interval` seconds. `self.subscriptions` contain tuples of (obj, args, kwargs) for each subscribing object. If overloading, this callback is expected to handle all subscriptions when it is triggered. It should not return anything and should not traceback on poorly designed hooks. The callback should ideally work under @inlineCallbacks so it can yield appropriately. The _hook_key, which is passed down through the handler via kwargs is used here to identify which hook method to call. """ self._to_add = [] self._to_remove = [] self._is_ticking = True for store_key, (args, kwargs) in self.subscriptions.iteritems(): callback = yield kwargs.pop("_callback", "at_tick") obj = yield kwargs.pop("_obj", None) try: if callable(callback): # call directly yield callback(*args, **kwargs) continue # try object method if not obj or not obj.pk: # object was deleted between calls self._to_remove.append(store_key) continue else: yield _GA(obj, callback)(*args, **kwargs) except ObjectDoesNotExist: log_trace("Removing ticker.") self._to_remove.append(store_key) except Exception: log_trace() finally: # make sure to re-store kwargs["_callback"] = callback kwargs["_obj"] = obj # cleanup - we do this here to avoid changing the subscription dict while it loops self._is_ticking = False for store_key in self._to_remove: self.remove(store_key) for store_key, (args, kwargs) in self._to_add: self.add(store_key, *args, **kwargs) self._to_remove = [] self._to_add = []
88
139
""" TickerHandler This implements an efficient Ticker which uses a subscription model to 'tick' subscribed objects at regular intervals. The ticker mechanism is used by importing and accessing the instantiated TICKER_HANDLER instance in this module. This instance is run by the server; it will save its status across server reloads and be started automaticall on boot. Example: ```python from evennia.scripts.tickerhandler import TICKER_HANDLER # call tick myobj.at_tick(*args, **kwargs) every 15 seconds TICKER_HANDLER.add(15, myobj.at_tick, *args, **kwargs) ``` You supply the interval to tick and a callable to call regularly with any extra args/kwargs. The handler will transparently set up and add new timers behind the scenes to tick at given intervals, using a TickerPool - all callables with the same interval will share the interval ticker. To remove: ```python TICKER_HANDLER.remove(15, myobj.at_tick) ``` Both interval and callable must be given since a single object can be subscribed to many different tickers at the same time. You can also supply `idstring` as an identifying string if you ever want to tick the callable at the same interval but with different arguments (args/kwargs are not used for identifying the ticker). There is also `persistent=False` if you don't want to make a ticker that don't survive a reload. If either or both `idstring` or `persistent` has been changed from their defaults, they must be supplied to the `TICKER_HANDLER.remove` call to properly identify the ticker to remove. The TickerHandler's functionality can be overloaded by modifying the Ticker class and then changing TickerPool and TickerHandler to use the custom classes ```python class MyTicker(Ticker): # [doing custom stuff] class MyTickerPool(TickerPool): ticker_class = MyTicker class MyTickerHandler(TickerHandler): ticker_pool_class = MyTickerPool ``` If one wants to duplicate TICKER_HANDLER's auto-saving feature in a custom handler one can make a custom `AT_STARTSTOP_MODULE` entry to call the handler's `save()` and `restore()` methods when the server reboots. """ import inspect from builtins import object from twisted.internet.defer import inlineCallbacks from django.core.exceptions import ObjectDoesNotExist from evennia.scripts.scripts import ExtendedLoopingCall from evennia.server.models import ServerConfig from evennia.utils.logger import log_trace, log_err from evennia.utils.dbserialize import dbserialize, dbunserialize, pack_dbobj, unpack_dbobj from evennia.utils import variable_from_module _GA = object.__getattribute__ _SA = object.__setattr__ _ERROR_ADD_TICKER = \ """TickerHandler: Tried to add an invalid ticker: {storekey} Ticker was not added.""" class Ticker(object): """ Represents a repeatedly running task that calls hooks repeatedly. Overload `_callback` to change the way it operates. """ @inlineCallbacks def _callback(self): """ This will be called repeatedly every `self.interval` seconds. `self.subscriptions` contain tuples of (obj, args, kwargs) for each subscribing object. If overloading, this callback is expected to handle all subscriptions when it is triggered. It should not return anything and should not traceback on poorly designed hooks. The callback should ideally work under @inlineCallbacks so it can yield appropriately. The _hook_key, which is passed down through the handler via kwargs is used here to identify which hook method to call. """ self._to_add = [] self._to_remove = [] self._is_ticking = True for store_key, (args, kwargs) in self.subscriptions.iteritems(): callback = yield kwargs.pop("_callback", "at_tick") obj = yield kwargs.pop("_obj", None) try: if callable(callback): # call directly yield callback(*args, **kwargs) continue # try object method if not obj or not obj.pk: # object was deleted between calls self._to_remove.append(store_key) continue else: yield _GA(obj, callback)(*args, **kwargs) except ObjectDoesNotExist: log_trace("Removing ticker.") self._to_remove.append(store_key) except Exception: log_trace() finally: # make sure to re-store kwargs["_callback"] = callback kwargs["_obj"] = obj # cleanup - we do this here to avoid changing the subscription dict while it loops self._is_ticking = False for store_key in self._to_remove: self.remove(store_key) for store_key, (args, kwargs) in self._to_add: self.add(store_key, *args, **kwargs) self._to_remove = [] self._to_add = [] def __init__(self, interval): """ Set up the ticker Args: interval (int): The stepping interval. """ self.interval = interval self.subscriptions = {} self._is_ticking = False self._to_remove = [] self._to_add = [] # set up a twisted asynchronous repeat call self.task = ExtendedLoopingCall(self._callback) def validate(self, start_delay=None): """ Start/stop the task depending on how many subscribers we have using it. Args: start_delay (int): Time to way before starting. """ subs = self.subscriptions if self.task.running: if not subs: self.task.stop() elif subs: self.task.start(self.interval, now=False, start_delay=start_delay) def add(self, store_key, *args, **kwargs): """ Sign up a subscriber to this ticker. Args: store_key (str): Unique storage hash for this ticker subscription. args (any, optional): Arguments to call the hook method with. Kwargs: _start_delay (int): If set, this will be used to delay the start of the trigger instead of `interval`. """ if self._is_ticking: # protects the subscription dict from # updating while it is looping self._to_start.append((store_key, (args, kwargs))) else: start_delay = kwargs.pop("_start_delay", None) self.subscriptions[store_key] = (args, kwargs) self.validate(start_delay=start_delay) def remove(self, store_key): """ Unsubscribe object from this ticker Args: store_key (str): Unique store key. """ if self._is_ticking: # this protects the subscription dict from # updating while it is looping self._to_remove.append(store_key) else: self.subscriptions.pop(store_key, False) self.validate() def stop(self): """ Kill the Task, regardless of subscriptions. """ self.subscriptions = {} self.validate() class TickerPool(object): """ This maintains a pool of `evennia.scripts.scripts.ExtendedLoopingCall` tasks for calling subscribed objects at given times. """ ticker_class = Ticker def __init__(self): """ Initialize the pool. """ self.tickers = {} def add(self, store_key, *args, **kwargs): """ Add new ticker subscriber. Args: store_key (str): Unique storage hash. args (any, optional): Arguments to send to the hook method. """ _, _, _, interval, _, _ = store_key if not interval: log_err(_ERROR_ADD_TICKER.format(store_key=store_key)) return if interval not in self.tickers: self.tickers[interval] = self.ticker_class(interval) self.tickers[interval].add(store_key, *args, **kwargs) def remove(self, store_key): """ Remove subscription from pool. Args: store_key (str): Unique storage hash to remove """ _, _, _, interval, _, _ = store_key if interval in self.tickers: self.tickers[interval].remove(store_key) if not self.tickers[interval]: del self.tickers[interval] def stop(self, interval=None): """ Stop all scripts in pool. This is done at server reload since restoring the pool will automatically re-populate the pool. Args: interval (int, optional): Only stop tickers with this interval. """ if interval and interval in self.tickers: self.tickers[interval].stop() else: for ticker in self.tickers.values(): ticker.stop() class TickerHandler(object): """ The Tickerhandler maintains a pool of tasks for subscribing objects to various tick rates. The pool maintains creation instructions and and re-applies them at a server restart. """ ticker_pool_class = TickerPool def __init__(self, save_name="ticker_storage"): """ Initialize handler save_name (str, optional): The name of the ServerConfig instance to store the handler state persistently. """ self.ticker_storage = {} self.save_name = save_name self.ticker_pool = self.ticker_pool_class() def _get_callback(self, callback): """ Analyze callback and determine its consituents Args: callback (function or method): This is either a stand-alone function or class method on a typeclassed entitye (that is, an entity that can be saved to the database). Returns: ret (tuple): This is a tuple of the form `(obj, path, callfunc)`, where `obj` is the database object the callback is defined on if it's a method (otherwise `None`) and vice-versa, `path` is the python-path to the stand-alone function (`None` if a method). The `callfunc` is either the name of the method to call or the callable function object itself. """ outobj, outpath, outcallfunc = None, None, None if callable(callback): if inspect.ismethod(callback): outobj = callback.im_self outcallfunc = callback.im_func.func_name elif inspect.isfunction(callback): outpath = "%s.%s" % (callback.__module__, callback.func_name) outcallfunc = callback else: raise TypeError("%s is not a callable function or method." % callback) return outobj, outpath, outcallfunc def _store_key(self, obj, path, interval, callfunc, idstring="", persistent=True): """ Tries to create a store_key for the object. Args: obj (Object, tuple or None): Subscribing object if any. If a tuple, this is a packed_obj tuple from dbserialize. path (str or None): Python-path to callable, if any. interval (int): Ticker interval. callfunc (callable or str): This is either the callable function or the name of the method to call. Note that the callable is never stored in the key; that is uniquely identified with the python-path. idstring (str, optional): Additional separator between different subscription types. persistent (bool, optional): If this ticker should survive a system shutdown or not. Returns: store_key (tuple): A tuple `(packed_obj, methodname, outpath, interval, idstring, persistent)` that uniquely identifies the ticker. Here, `packed_obj` is the unique string representation of the object or `None`. The `methodname` is the string name of the method on `packed_obj` to call, or `None` if `packed_obj` is unset. `path` is the Python-path to a non-method callable, or `None`. Finally, `interval` `idstring` and `persistent` are integers, strings and bools respectively. """ interval = int(interval) persistent = bool(persistent) packed_obj = pack_dbobj(obj) methodname = callfunc if callfunc and isinstance(callfunc, basestring) else None outpath = path if path and isinstance(path, basestring) else None return (packed_obj, methodname, outpath, interval, idstring, persistent) def save(self): """ Save ticker_storage as a serialized string into a temporary ServerConf field. Whereas saving is done on the fly, if called by server when it shuts down, the current timer of each ticker will be saved so it can start over from that point. """ if self.ticker_storage: # get the current times so the tickers can be restarted with a delay later start_delays = dict((interval, ticker.task.next_call_time()) for interval, ticker in self.ticker_pool.tickers.items()) # remove any subscriptions that lost its object in the interim to_save = {store_key: (args, kwargs) for store_key, (args, kwargs) in self.ticker_storage.items() if ((store_key[1] and ("_obj" in kwargs and kwargs["_obj"].pk) and hasattr(kwargs["_obj"], store_key[1])) or # a valid method with existing obj store_key[2])} # a path given # update the timers for the tickers for store_key, (args, kwargs) in to_save.items(): interval = store_key[1] # this is a mutable, so it's updated in-place in ticker_storage kwargs["_start_delay"] = start_delays.get(interval, None) ServerConfig.objects.conf(key=self.save_name, value=dbserialize(to_save)) else: # make sure we have nothing lingering in the database ServerConfig.objects.conf(key=self.save_name, delete=True) def restore(self, server_reload=True): """ Restore ticker_storage from database and re-initialize the handler from storage. This is triggered by the server at restart. Args: server_reload (bool, optional): If this is False, it means the server went through a cold reboot and all non-persistent tickers must be killed. """ # load stored command instructions and use them to re-initialize handler restored_tickers = ServerConfig.objects.conf(key=self.save_name) if restored_tickers: # the dbunserialize will convert all serialized dbobjs to real objects restored_tickers = dbunserialize(restored_tickers) self.ticker_storage = {} for store_key, (args, kwargs) in restored_tickers.iteritems(): try: # at this point obj is the actual object (or None) due to how # the dbunserialize works obj, callfunc, path, interval, idstring, persistent = store_key if not persistent and not server_reload: # this ticker will not be restarted continue if isinstance(callfunc, basestring) and not obj: # methods must have an existing object continue # we must rebuild the store_key here since obj must not be # stored as the object itself for the store_key to be hashable. store_key = self._store_key(obj, path, interval, callfunc, idstring, persistent) if obj and callfunc: kwargs["_callback"] = callfunc kwargs["_obj"] = obj elif path: modname, varname = path.rsplit(".", 1) callback = variable_from_module(modname, varname) kwargs["_callback"] = callback kwargs["_obj"] = None else: # Neither object nor path - discard this ticker log_err("Tickerhandler: Removing malformed ticker: %s" % str(store_key)) continue except Exception: # this suggests a malformed save or missing objects log_trace("Tickerhandler: Removing malformed ticker: %s" % str(store_key)) continue # if we get here we should create a new ticker self.ticker_storage[store_key] = (args, kwargs) self.ticker_pool.add(store_key, *args, **kwargs) def add(self, interval=60, callback=None, idstring="", persistent=True, *args, **kwargs): """ Add subscription to tickerhandler Args: interval (int, optional): Interval in seconds between calling `callable(*args, **kwargs)` callable (callable function or method, optional): This should either be a stand-alone function or a method on a typeclassed entity (that is, one that can be saved to the database). idstring (str, optional): Identifier for separating this ticker-subscription from others with the same interval. Allows for managing multiple calls with the same time interval and callback. persistent (bool, optional): A ticker will always survive a server reload. If this is unset, the ticker will be deleted by a server shutdown. args, kwargs (optional): These will be passed into the callback every time it is called. Notes: The callback will be identified by type and stored either as as combination of serialized database object + methodname or as a python-path to the module + funcname. These strings will be combined iwth `interval` and `idstring` to define a unique storage key for saving. These must thus all be supplied when wanting to modify/remove the ticker later. """ if isinstance(callback, int): raise RuntimeError("TICKER_HANDLER.add has changed: " "the interval is now the first argument, callback the second.") obj, path, callfunc = self._get_callback(callback) store_key = self._store_key(obj, path, interval, callfunc, idstring, persistent) kwargs["_obj"] = obj kwargs["_callback"] = callfunc # either method-name or callable self.ticker_storage[store_key] = (args, kwargs) self.ticker_pool.add(store_key, *args, **kwargs) self.save() def remove(self, interval=60, callback=None, idstring="", persistent=True): """ Remove object from ticker or only remove it from tickers with a given interval. Args: interval (int, optional): Interval of ticker to remove. callback (callable function or method): Either a function or the method of a typeclassed object. idstring (str, optional): Identifier id of ticker to remove. """ if isinstance(callback, int): raise RuntimeError("TICKER_HANDLER.remove has changed: " "the interval is now the first argument, callback the second.") obj, path, callfunc = self._get_callback(callback) store_key = self._store_key(obj, path, interval, callfunc, idstring, persistent) to_remove = self.ticker_storage.pop(store_key, None) if to_remove: self.ticker_pool.remove(store_key) self.save() def clear(self, interval=None): """ Stop/remove tickers from handler. Args: interval (int): Only stop tickers with this interval. Notes: This is the only supported way to kill tickers related to non-db objects. """ self.ticker_pool.stop(interval) if interval: self.ticker_storage = dict((store_key, store_key) for store_key in self.ticker_storage if store_key[1] != interval) else: self.ticker_storage = {} self.save() def all(self, interval=None): """ Get all subscriptions. Args: interval (int): Limit match to tickers with this interval. Returns: tickers (list): If `interval` was given, this is a list of tickers using that interval. tickerpool_layout (dict): If `interval` was *not* given, this is a dict {interval1: [ticker1, ticker2, ...], ...} """ if interval is None: # return dict of all, ordered by interval return dict((interval, ticker.subscriptions) for interval, ticker in self.ticker_pool.tickers.iteritems()) else: # get individual interval ticker = self.ticker_pool.tickers.get(interval, None) if ticker: return {interval: ticker.subscriptions} def all_display(self): """ Get all tickers on an easily displayable form. Returns: tickers (dict): A list of all storekeys """ store_keys = [] for ticker in self.ticker_pool.tickers.itervalues(): for (objtup, callfunc, path, interval, idstring, persistent), (args, kwargs) in ticker.subscriptions.iteritems(): store_keys.append((kwargs.get("_obj", None), callfunc, path, interval, idstring, persistent)) return store_keys # main tickerhandler TICKER_HANDLER = TickerHandler()
__init__
Set up the ticker Args: interval (int): The stepping interval.
""" TickerHandler This implements an efficient Ticker which uses a subscription model to 'tick' subscribed objects at regular intervals. The ticker mechanism is used by importing and accessing the instantiated TICKER_HANDLER instance in this module. This instance is run by the server; it will save its status across server reloads and be started automaticall on boot. Example: ```python from evennia.scripts.tickerhandler import TICKER_HANDLER # call tick myobj.at_tick(*args, **kwargs) every 15 seconds TICKER_HANDLER.add(15, myobj.at_tick, *args, **kwargs) ``` You supply the interval to tick and a callable to call regularly with any extra args/kwargs. The handler will transparently set up and add new timers behind the scenes to tick at given intervals, using a TickerPool - all callables with the same interval will share the interval ticker. To remove: ```python TICKER_HANDLER.remove(15, myobj.at_tick) ``` Both interval and callable must be given since a single object can be subscribed to many different tickers at the same time. You can also supply `idstring` as an identifying string if you ever want to tick the callable at the same interval but with different arguments (args/kwargs are not used for identifying the ticker). There is also `persistent=False` if you don't want to make a ticker that don't survive a reload. If either or both `idstring` or `persistent` has been changed from their defaults, they must be supplied to the `TICKER_HANDLER.remove` call to properly identify the ticker to remove. The TickerHandler's functionality can be overloaded by modifying the Ticker class and then changing TickerPool and TickerHandler to use the custom classes ```python class MyTicker(Ticker): # [doing custom stuff] class MyTickerPool(TickerPool): ticker_class = MyTicker class MyTickerHandler(TickerHandler): ticker_pool_class = MyTickerPool ``` If one wants to duplicate TICKER_HANDLER's auto-saving feature in a custom handler one can make a custom `AT_STARTSTOP_MODULE` entry to call the handler's `save()` and `restore()` methods when the server reboots. """ import inspect from builtins import object from twisted.internet.defer import inlineCallbacks from django.core.exceptions import ObjectDoesNotExist from evennia.scripts.scripts import ExtendedLoopingCall from evennia.server.models import ServerConfig from evennia.utils.logger import log_trace, log_err from evennia.utils.dbserialize import dbserialize, dbunserialize, pack_dbobj, unpack_dbobj from evennia.utils import variable_from_module _GA = object.__getattribute__ _SA = object.__setattr__ _ERROR_ADD_TICKER = \ """TickerHandler: Tried to add an invalid ticker: {storekey} Ticker was not added.""" class Ticker(object): """ Represents a repeatedly running task that calls hooks repeatedly. Overload `_callback` to change the way it operates. """ @inlineCallbacks def _callback(self): """ This will be called repeatedly every `self.interval` seconds. `self.subscriptions` contain tuples of (obj, args, kwargs) for each subscribing object. If overloading, this callback is expected to handle all subscriptions when it is triggered. It should not return anything and should not traceback on poorly designed hooks. The callback should ideally work under @inlineCallbacks so it can yield appropriately. The _hook_key, which is passed down through the handler via kwargs is used here to identify which hook method to call. """ self._to_add = [] self._to_remove = [] self._is_ticking = True for store_key, (args, kwargs) in self.subscriptions.iteritems(): callback = yield kwargs.pop("_callback", "at_tick") obj = yield kwargs.pop("_obj", None) try: if callable(callback): # call directly yield callback(*args, **kwargs) continue # try object method if not obj or not obj.pk: # object was deleted between calls self._to_remove.append(store_key) continue else: yield _GA(obj, callback)(*args, **kwargs) except ObjectDoesNotExist: log_trace("Removing ticker.") self._to_remove.append(store_key) except Exception: log_trace() finally: # make sure to re-store kwargs["_callback"] = callback kwargs["_obj"] = obj # cleanup - we do this here to avoid changing the subscription dict while it loops self._is_ticking = False for store_key in self._to_remove: self.remove(store_key) for store_key, (args, kwargs) in self._to_add: self.add(store_key, *args, **kwargs) self._to_remove = [] self._to_add = [] # MASKED: __init__ function (lines 142-156) def validate(self, start_delay=None): """ Start/stop the task depending on how many subscribers we have using it. Args: start_delay (int): Time to way before starting. """ subs = self.subscriptions if self.task.running: if not subs: self.task.stop() elif subs: self.task.start(self.interval, now=False, start_delay=start_delay) def add(self, store_key, *args, **kwargs): """ Sign up a subscriber to this ticker. Args: store_key (str): Unique storage hash for this ticker subscription. args (any, optional): Arguments to call the hook method with. Kwargs: _start_delay (int): If set, this will be used to delay the start of the trigger instead of `interval`. """ if self._is_ticking: # protects the subscription dict from # updating while it is looping self._to_start.append((store_key, (args, kwargs))) else: start_delay = kwargs.pop("_start_delay", None) self.subscriptions[store_key] = (args, kwargs) self.validate(start_delay=start_delay) def remove(self, store_key): """ Unsubscribe object from this ticker Args: store_key (str): Unique store key. """ if self._is_ticking: # this protects the subscription dict from # updating while it is looping self._to_remove.append(store_key) else: self.subscriptions.pop(store_key, False) self.validate() def stop(self): """ Kill the Task, regardless of subscriptions. """ self.subscriptions = {} self.validate() class TickerPool(object): """ This maintains a pool of `evennia.scripts.scripts.ExtendedLoopingCall` tasks for calling subscribed objects at given times. """ ticker_class = Ticker def __init__(self): """ Initialize the pool. """ self.tickers = {} def add(self, store_key, *args, **kwargs): """ Add new ticker subscriber. Args: store_key (str): Unique storage hash. args (any, optional): Arguments to send to the hook method. """ _, _, _, interval, _, _ = store_key if not interval: log_err(_ERROR_ADD_TICKER.format(store_key=store_key)) return if interval not in self.tickers: self.tickers[interval] = self.ticker_class(interval) self.tickers[interval].add(store_key, *args, **kwargs) def remove(self, store_key): """ Remove subscription from pool. Args: store_key (str): Unique storage hash to remove """ _, _, _, interval, _, _ = store_key if interval in self.tickers: self.tickers[interval].remove(store_key) if not self.tickers[interval]: del self.tickers[interval] def stop(self, interval=None): """ Stop all scripts in pool. This is done at server reload since restoring the pool will automatically re-populate the pool. Args: interval (int, optional): Only stop tickers with this interval. """ if interval and interval in self.tickers: self.tickers[interval].stop() else: for ticker in self.tickers.values(): ticker.stop() class TickerHandler(object): """ The Tickerhandler maintains a pool of tasks for subscribing objects to various tick rates. The pool maintains creation instructions and and re-applies them at a server restart. """ ticker_pool_class = TickerPool def __init__(self, save_name="ticker_storage"): """ Initialize handler save_name (str, optional): The name of the ServerConfig instance to store the handler state persistently. """ self.ticker_storage = {} self.save_name = save_name self.ticker_pool = self.ticker_pool_class() def _get_callback(self, callback): """ Analyze callback and determine its consituents Args: callback (function or method): This is either a stand-alone function or class method on a typeclassed entitye (that is, an entity that can be saved to the database). Returns: ret (tuple): This is a tuple of the form `(obj, path, callfunc)`, where `obj` is the database object the callback is defined on if it's a method (otherwise `None`) and vice-versa, `path` is the python-path to the stand-alone function (`None` if a method). The `callfunc` is either the name of the method to call or the callable function object itself. """ outobj, outpath, outcallfunc = None, None, None if callable(callback): if inspect.ismethod(callback): outobj = callback.im_self outcallfunc = callback.im_func.func_name elif inspect.isfunction(callback): outpath = "%s.%s" % (callback.__module__, callback.func_name) outcallfunc = callback else: raise TypeError("%s is not a callable function or method." % callback) return outobj, outpath, outcallfunc def _store_key(self, obj, path, interval, callfunc, idstring="", persistent=True): """ Tries to create a store_key for the object. Args: obj (Object, tuple or None): Subscribing object if any. If a tuple, this is a packed_obj tuple from dbserialize. path (str or None): Python-path to callable, if any. interval (int): Ticker interval. callfunc (callable or str): This is either the callable function or the name of the method to call. Note that the callable is never stored in the key; that is uniquely identified with the python-path. idstring (str, optional): Additional separator between different subscription types. persistent (bool, optional): If this ticker should survive a system shutdown or not. Returns: store_key (tuple): A tuple `(packed_obj, methodname, outpath, interval, idstring, persistent)` that uniquely identifies the ticker. Here, `packed_obj` is the unique string representation of the object or `None`. The `methodname` is the string name of the method on `packed_obj` to call, or `None` if `packed_obj` is unset. `path` is the Python-path to a non-method callable, or `None`. Finally, `interval` `idstring` and `persistent` are integers, strings and bools respectively. """ interval = int(interval) persistent = bool(persistent) packed_obj = pack_dbobj(obj) methodname = callfunc if callfunc and isinstance(callfunc, basestring) else None outpath = path if path and isinstance(path, basestring) else None return (packed_obj, methodname, outpath, interval, idstring, persistent) def save(self): """ Save ticker_storage as a serialized string into a temporary ServerConf field. Whereas saving is done on the fly, if called by server when it shuts down, the current timer of each ticker will be saved so it can start over from that point. """ if self.ticker_storage: # get the current times so the tickers can be restarted with a delay later start_delays = dict((interval, ticker.task.next_call_time()) for interval, ticker in self.ticker_pool.tickers.items()) # remove any subscriptions that lost its object in the interim to_save = {store_key: (args, kwargs) for store_key, (args, kwargs) in self.ticker_storage.items() if ((store_key[1] and ("_obj" in kwargs and kwargs["_obj"].pk) and hasattr(kwargs["_obj"], store_key[1])) or # a valid method with existing obj store_key[2])} # a path given # update the timers for the tickers for store_key, (args, kwargs) in to_save.items(): interval = store_key[1] # this is a mutable, so it's updated in-place in ticker_storage kwargs["_start_delay"] = start_delays.get(interval, None) ServerConfig.objects.conf(key=self.save_name, value=dbserialize(to_save)) else: # make sure we have nothing lingering in the database ServerConfig.objects.conf(key=self.save_name, delete=True) def restore(self, server_reload=True): """ Restore ticker_storage from database and re-initialize the handler from storage. This is triggered by the server at restart. Args: server_reload (bool, optional): If this is False, it means the server went through a cold reboot and all non-persistent tickers must be killed. """ # load stored command instructions and use them to re-initialize handler restored_tickers = ServerConfig.objects.conf(key=self.save_name) if restored_tickers: # the dbunserialize will convert all serialized dbobjs to real objects restored_tickers = dbunserialize(restored_tickers) self.ticker_storage = {} for store_key, (args, kwargs) in restored_tickers.iteritems(): try: # at this point obj is the actual object (or None) due to how # the dbunserialize works obj, callfunc, path, interval, idstring, persistent = store_key if not persistent and not server_reload: # this ticker will not be restarted continue if isinstance(callfunc, basestring) and not obj: # methods must have an existing object continue # we must rebuild the store_key here since obj must not be # stored as the object itself for the store_key to be hashable. store_key = self._store_key(obj, path, interval, callfunc, idstring, persistent) if obj and callfunc: kwargs["_callback"] = callfunc kwargs["_obj"] = obj elif path: modname, varname = path.rsplit(".", 1) callback = variable_from_module(modname, varname) kwargs["_callback"] = callback kwargs["_obj"] = None else: # Neither object nor path - discard this ticker log_err("Tickerhandler: Removing malformed ticker: %s" % str(store_key)) continue except Exception: # this suggests a malformed save or missing objects log_trace("Tickerhandler: Removing malformed ticker: %s" % str(store_key)) continue # if we get here we should create a new ticker self.ticker_storage[store_key] = (args, kwargs) self.ticker_pool.add(store_key, *args, **kwargs) def add(self, interval=60, callback=None, idstring="", persistent=True, *args, **kwargs): """ Add subscription to tickerhandler Args: interval (int, optional): Interval in seconds between calling `callable(*args, **kwargs)` callable (callable function or method, optional): This should either be a stand-alone function or a method on a typeclassed entity (that is, one that can be saved to the database). idstring (str, optional): Identifier for separating this ticker-subscription from others with the same interval. Allows for managing multiple calls with the same time interval and callback. persistent (bool, optional): A ticker will always survive a server reload. If this is unset, the ticker will be deleted by a server shutdown. args, kwargs (optional): These will be passed into the callback every time it is called. Notes: The callback will be identified by type and stored either as as combination of serialized database object + methodname or as a python-path to the module + funcname. These strings will be combined iwth `interval` and `idstring` to define a unique storage key for saving. These must thus all be supplied when wanting to modify/remove the ticker later. """ if isinstance(callback, int): raise RuntimeError("TICKER_HANDLER.add has changed: " "the interval is now the first argument, callback the second.") obj, path, callfunc = self._get_callback(callback) store_key = self._store_key(obj, path, interval, callfunc, idstring, persistent) kwargs["_obj"] = obj kwargs["_callback"] = callfunc # either method-name or callable self.ticker_storage[store_key] = (args, kwargs) self.ticker_pool.add(store_key, *args, **kwargs) self.save() def remove(self, interval=60, callback=None, idstring="", persistent=True): """ Remove object from ticker or only remove it from tickers with a given interval. Args: interval (int, optional): Interval of ticker to remove. callback (callable function or method): Either a function or the method of a typeclassed object. idstring (str, optional): Identifier id of ticker to remove. """ if isinstance(callback, int): raise RuntimeError("TICKER_HANDLER.remove has changed: " "the interval is now the first argument, callback the second.") obj, path, callfunc = self._get_callback(callback) store_key = self._store_key(obj, path, interval, callfunc, idstring, persistent) to_remove = self.ticker_storage.pop(store_key, None) if to_remove: self.ticker_pool.remove(store_key) self.save() def clear(self, interval=None): """ Stop/remove tickers from handler. Args: interval (int): Only stop tickers with this interval. Notes: This is the only supported way to kill tickers related to non-db objects. """ self.ticker_pool.stop(interval) if interval: self.ticker_storage = dict((store_key, store_key) for store_key in self.ticker_storage if store_key[1] != interval) else: self.ticker_storage = {} self.save() def all(self, interval=None): """ Get all subscriptions. Args: interval (int): Limit match to tickers with this interval. Returns: tickers (list): If `interval` was given, this is a list of tickers using that interval. tickerpool_layout (dict): If `interval` was *not* given, this is a dict {interval1: [ticker1, ticker2, ...], ...} """ if interval is None: # return dict of all, ordered by interval return dict((interval, ticker.subscriptions) for interval, ticker in self.ticker_pool.tickers.iteritems()) else: # get individual interval ticker = self.ticker_pool.tickers.get(interval, None) if ticker: return {interval: ticker.subscriptions} def all_display(self): """ Get all tickers on an easily displayable form. Returns: tickers (dict): A list of all storekeys """ store_keys = [] for ticker in self.ticker_pool.tickers.itervalues(): for (objtup, callfunc, path, interval, idstring, persistent), (args, kwargs) in ticker.subscriptions.iteritems(): store_keys.append((kwargs.get("_obj", None), callfunc, path, interval, idstring, persistent)) return store_keys # main tickerhandler TICKER_HANDLER = TickerHandler()
def __init__(self, interval): """ Set up the ticker Args: interval (int): The stepping interval. """ self.interval = interval self.subscriptions = {} self._is_ticking = False self._to_remove = [] self._to_add = [] # set up a twisted asynchronous repeat call self.task = ExtendedLoopingCall(self._callback)
142
156
""" TickerHandler This implements an efficient Ticker which uses a subscription model to 'tick' subscribed objects at regular intervals. The ticker mechanism is used by importing and accessing the instantiated TICKER_HANDLER instance in this module. This instance is run by the server; it will save its status across server reloads and be started automaticall on boot. Example: ```python from evennia.scripts.tickerhandler import TICKER_HANDLER # call tick myobj.at_tick(*args, **kwargs) every 15 seconds TICKER_HANDLER.add(15, myobj.at_tick, *args, **kwargs) ``` You supply the interval to tick and a callable to call regularly with any extra args/kwargs. The handler will transparently set up and add new timers behind the scenes to tick at given intervals, using a TickerPool - all callables with the same interval will share the interval ticker. To remove: ```python TICKER_HANDLER.remove(15, myobj.at_tick) ``` Both interval and callable must be given since a single object can be subscribed to many different tickers at the same time. You can also supply `idstring` as an identifying string if you ever want to tick the callable at the same interval but with different arguments (args/kwargs are not used for identifying the ticker). There is also `persistent=False` if you don't want to make a ticker that don't survive a reload. If either or both `idstring` or `persistent` has been changed from their defaults, they must be supplied to the `TICKER_HANDLER.remove` call to properly identify the ticker to remove. The TickerHandler's functionality can be overloaded by modifying the Ticker class and then changing TickerPool and TickerHandler to use the custom classes ```python class MyTicker(Ticker): # [doing custom stuff] class MyTickerPool(TickerPool): ticker_class = MyTicker class MyTickerHandler(TickerHandler): ticker_pool_class = MyTickerPool ``` If one wants to duplicate TICKER_HANDLER's auto-saving feature in a custom handler one can make a custom `AT_STARTSTOP_MODULE` entry to call the handler's `save()` and `restore()` methods when the server reboots. """ import inspect from builtins import object from twisted.internet.defer import inlineCallbacks from django.core.exceptions import ObjectDoesNotExist from evennia.scripts.scripts import ExtendedLoopingCall from evennia.server.models import ServerConfig from evennia.utils.logger import log_trace, log_err from evennia.utils.dbserialize import dbserialize, dbunserialize, pack_dbobj, unpack_dbobj from evennia.utils import variable_from_module _GA = object.__getattribute__ _SA = object.__setattr__ _ERROR_ADD_TICKER = \ """TickerHandler: Tried to add an invalid ticker: {storekey} Ticker was not added.""" class Ticker(object): """ Represents a repeatedly running task that calls hooks repeatedly. Overload `_callback` to change the way it operates. """ @inlineCallbacks def _callback(self): """ This will be called repeatedly every `self.interval` seconds. `self.subscriptions` contain tuples of (obj, args, kwargs) for each subscribing object. If overloading, this callback is expected to handle all subscriptions when it is triggered. It should not return anything and should not traceback on poorly designed hooks. The callback should ideally work under @inlineCallbacks so it can yield appropriately. The _hook_key, which is passed down through the handler via kwargs is used here to identify which hook method to call. """ self._to_add = [] self._to_remove = [] self._is_ticking = True for store_key, (args, kwargs) in self.subscriptions.iteritems(): callback = yield kwargs.pop("_callback", "at_tick") obj = yield kwargs.pop("_obj", None) try: if callable(callback): # call directly yield callback(*args, **kwargs) continue # try object method if not obj or not obj.pk: # object was deleted between calls self._to_remove.append(store_key) continue else: yield _GA(obj, callback)(*args, **kwargs) except ObjectDoesNotExist: log_trace("Removing ticker.") self._to_remove.append(store_key) except Exception: log_trace() finally: # make sure to re-store kwargs["_callback"] = callback kwargs["_obj"] = obj # cleanup - we do this here to avoid changing the subscription dict while it loops self._is_ticking = False for store_key in self._to_remove: self.remove(store_key) for store_key, (args, kwargs) in self._to_add: self.add(store_key, *args, **kwargs) self._to_remove = [] self._to_add = [] def __init__(self, interval): """ Set up the ticker Args: interval (int): The stepping interval. """ self.interval = interval self.subscriptions = {} self._is_ticking = False self._to_remove = [] self._to_add = [] # set up a twisted asynchronous repeat call self.task = ExtendedLoopingCall(self._callback) def validate(self, start_delay=None): """ Start/stop the task depending on how many subscribers we have using it. Args: start_delay (int): Time to way before starting. """ subs = self.subscriptions if self.task.running: if not subs: self.task.stop() elif subs: self.task.start(self.interval, now=False, start_delay=start_delay) def add(self, store_key, *args, **kwargs): """ Sign up a subscriber to this ticker. Args: store_key (str): Unique storage hash for this ticker subscription. args (any, optional): Arguments to call the hook method with. Kwargs: _start_delay (int): If set, this will be used to delay the start of the trigger instead of `interval`. """ if self._is_ticking: # protects the subscription dict from # updating while it is looping self._to_start.append((store_key, (args, kwargs))) else: start_delay = kwargs.pop("_start_delay", None) self.subscriptions[store_key] = (args, kwargs) self.validate(start_delay=start_delay) def remove(self, store_key): """ Unsubscribe object from this ticker Args: store_key (str): Unique store key. """ if self._is_ticking: # this protects the subscription dict from # updating while it is looping self._to_remove.append(store_key) else: self.subscriptions.pop(store_key, False) self.validate() def stop(self): """ Kill the Task, regardless of subscriptions. """ self.subscriptions = {} self.validate() class TickerPool(object): """ This maintains a pool of `evennia.scripts.scripts.ExtendedLoopingCall` tasks for calling subscribed objects at given times. """ ticker_class = Ticker def __init__(self): """ Initialize the pool. """ self.tickers = {} def add(self, store_key, *args, **kwargs): """ Add new ticker subscriber. Args: store_key (str): Unique storage hash. args (any, optional): Arguments to send to the hook method. """ _, _, _, interval, _, _ = store_key if not interval: log_err(_ERROR_ADD_TICKER.format(store_key=store_key)) return if interval not in self.tickers: self.tickers[interval] = self.ticker_class(interval) self.tickers[interval].add(store_key, *args, **kwargs) def remove(self, store_key): """ Remove subscription from pool. Args: store_key (str): Unique storage hash to remove """ _, _, _, interval, _, _ = store_key if interval in self.tickers: self.tickers[interval].remove(store_key) if not self.tickers[interval]: del self.tickers[interval] def stop(self, interval=None): """ Stop all scripts in pool. This is done at server reload since restoring the pool will automatically re-populate the pool. Args: interval (int, optional): Only stop tickers with this interval. """ if interval and interval in self.tickers: self.tickers[interval].stop() else: for ticker in self.tickers.values(): ticker.stop() class TickerHandler(object): """ The Tickerhandler maintains a pool of tasks for subscribing objects to various tick rates. The pool maintains creation instructions and and re-applies them at a server restart. """ ticker_pool_class = TickerPool def __init__(self, save_name="ticker_storage"): """ Initialize handler save_name (str, optional): The name of the ServerConfig instance to store the handler state persistently. """ self.ticker_storage = {} self.save_name = save_name self.ticker_pool = self.ticker_pool_class() def _get_callback(self, callback): """ Analyze callback and determine its consituents Args: callback (function or method): This is either a stand-alone function or class method on a typeclassed entitye (that is, an entity that can be saved to the database). Returns: ret (tuple): This is a tuple of the form `(obj, path, callfunc)`, where `obj` is the database object the callback is defined on if it's a method (otherwise `None`) and vice-versa, `path` is the python-path to the stand-alone function (`None` if a method). The `callfunc` is either the name of the method to call or the callable function object itself. """ outobj, outpath, outcallfunc = None, None, None if callable(callback): if inspect.ismethod(callback): outobj = callback.im_self outcallfunc = callback.im_func.func_name elif inspect.isfunction(callback): outpath = "%s.%s" % (callback.__module__, callback.func_name) outcallfunc = callback else: raise TypeError("%s is not a callable function or method." % callback) return outobj, outpath, outcallfunc def _store_key(self, obj, path, interval, callfunc, idstring="", persistent=True): """ Tries to create a store_key for the object. Args: obj (Object, tuple or None): Subscribing object if any. If a tuple, this is a packed_obj tuple from dbserialize. path (str or None): Python-path to callable, if any. interval (int): Ticker interval. callfunc (callable or str): This is either the callable function or the name of the method to call. Note that the callable is never stored in the key; that is uniquely identified with the python-path. idstring (str, optional): Additional separator between different subscription types. persistent (bool, optional): If this ticker should survive a system shutdown or not. Returns: store_key (tuple): A tuple `(packed_obj, methodname, outpath, interval, idstring, persistent)` that uniquely identifies the ticker. Here, `packed_obj` is the unique string representation of the object or `None`. The `methodname` is the string name of the method on `packed_obj` to call, or `None` if `packed_obj` is unset. `path` is the Python-path to a non-method callable, or `None`. Finally, `interval` `idstring` and `persistent` are integers, strings and bools respectively. """ interval = int(interval) persistent = bool(persistent) packed_obj = pack_dbobj(obj) methodname = callfunc if callfunc and isinstance(callfunc, basestring) else None outpath = path if path and isinstance(path, basestring) else None return (packed_obj, methodname, outpath, interval, idstring, persistent) def save(self): """ Save ticker_storage as a serialized string into a temporary ServerConf field. Whereas saving is done on the fly, if called by server when it shuts down, the current timer of each ticker will be saved so it can start over from that point. """ if self.ticker_storage: # get the current times so the tickers can be restarted with a delay later start_delays = dict((interval, ticker.task.next_call_time()) for interval, ticker in self.ticker_pool.tickers.items()) # remove any subscriptions that lost its object in the interim to_save = {store_key: (args, kwargs) for store_key, (args, kwargs) in self.ticker_storage.items() if ((store_key[1] and ("_obj" in kwargs and kwargs["_obj"].pk) and hasattr(kwargs["_obj"], store_key[1])) or # a valid method with existing obj store_key[2])} # a path given # update the timers for the tickers for store_key, (args, kwargs) in to_save.items(): interval = store_key[1] # this is a mutable, so it's updated in-place in ticker_storage kwargs["_start_delay"] = start_delays.get(interval, None) ServerConfig.objects.conf(key=self.save_name, value=dbserialize(to_save)) else: # make sure we have nothing lingering in the database ServerConfig.objects.conf(key=self.save_name, delete=True) def restore(self, server_reload=True): """ Restore ticker_storage from database and re-initialize the handler from storage. This is triggered by the server at restart. Args: server_reload (bool, optional): If this is False, it means the server went through a cold reboot and all non-persistent tickers must be killed. """ # load stored command instructions and use them to re-initialize handler restored_tickers = ServerConfig.objects.conf(key=self.save_name) if restored_tickers: # the dbunserialize will convert all serialized dbobjs to real objects restored_tickers = dbunserialize(restored_tickers) self.ticker_storage = {} for store_key, (args, kwargs) in restored_tickers.iteritems(): try: # at this point obj is the actual object (or None) due to how # the dbunserialize works obj, callfunc, path, interval, idstring, persistent = store_key if not persistent and not server_reload: # this ticker will not be restarted continue if isinstance(callfunc, basestring) and not obj: # methods must have an existing object continue # we must rebuild the store_key here since obj must not be # stored as the object itself for the store_key to be hashable. store_key = self._store_key(obj, path, interval, callfunc, idstring, persistent) if obj and callfunc: kwargs["_callback"] = callfunc kwargs["_obj"] = obj elif path: modname, varname = path.rsplit(".", 1) callback = variable_from_module(modname, varname) kwargs["_callback"] = callback kwargs["_obj"] = None else: # Neither object nor path - discard this ticker log_err("Tickerhandler: Removing malformed ticker: %s" % str(store_key)) continue except Exception: # this suggests a malformed save or missing objects log_trace("Tickerhandler: Removing malformed ticker: %s" % str(store_key)) continue # if we get here we should create a new ticker self.ticker_storage[store_key] = (args, kwargs) self.ticker_pool.add(store_key, *args, **kwargs) def add(self, interval=60, callback=None, idstring="", persistent=True, *args, **kwargs): """ Add subscription to tickerhandler Args: interval (int, optional): Interval in seconds between calling `callable(*args, **kwargs)` callable (callable function or method, optional): This should either be a stand-alone function or a method on a typeclassed entity (that is, one that can be saved to the database). idstring (str, optional): Identifier for separating this ticker-subscription from others with the same interval. Allows for managing multiple calls with the same time interval and callback. persistent (bool, optional): A ticker will always survive a server reload. If this is unset, the ticker will be deleted by a server shutdown. args, kwargs (optional): These will be passed into the callback every time it is called. Notes: The callback will be identified by type and stored either as as combination of serialized database object + methodname or as a python-path to the module + funcname. These strings will be combined iwth `interval` and `idstring` to define a unique storage key for saving. These must thus all be supplied when wanting to modify/remove the ticker later. """ if isinstance(callback, int): raise RuntimeError("TICKER_HANDLER.add has changed: " "the interval is now the first argument, callback the second.") obj, path, callfunc = self._get_callback(callback) store_key = self._store_key(obj, path, interval, callfunc, idstring, persistent) kwargs["_obj"] = obj kwargs["_callback"] = callfunc # either method-name or callable self.ticker_storage[store_key] = (args, kwargs) self.ticker_pool.add(store_key, *args, **kwargs) self.save() def remove(self, interval=60, callback=None, idstring="", persistent=True): """ Remove object from ticker or only remove it from tickers with a given interval. Args: interval (int, optional): Interval of ticker to remove. callback (callable function or method): Either a function or the method of a typeclassed object. idstring (str, optional): Identifier id of ticker to remove. """ if isinstance(callback, int): raise RuntimeError("TICKER_HANDLER.remove has changed: " "the interval is now the first argument, callback the second.") obj, path, callfunc = self._get_callback(callback) store_key = self._store_key(obj, path, interval, callfunc, idstring, persistent) to_remove = self.ticker_storage.pop(store_key, None) if to_remove: self.ticker_pool.remove(store_key) self.save() def clear(self, interval=None): """ Stop/remove tickers from handler. Args: interval (int): Only stop tickers with this interval. Notes: This is the only supported way to kill tickers related to non-db objects. """ self.ticker_pool.stop(interval) if interval: self.ticker_storage = dict((store_key, store_key) for store_key in self.ticker_storage if store_key[1] != interval) else: self.ticker_storage = {} self.save() def all(self, interval=None): """ Get all subscriptions. Args: interval (int): Limit match to tickers with this interval. Returns: tickers (list): If `interval` was given, this is a list of tickers using that interval. tickerpool_layout (dict): If `interval` was *not* given, this is a dict {interval1: [ticker1, ticker2, ...], ...} """ if interval is None: # return dict of all, ordered by interval return dict((interval, ticker.subscriptions) for interval, ticker in self.ticker_pool.tickers.iteritems()) else: # get individual interval ticker = self.ticker_pool.tickers.get(interval, None) if ticker: return {interval: ticker.subscriptions} def all_display(self): """ Get all tickers on an easily displayable form. Returns: tickers (dict): A list of all storekeys """ store_keys = [] for ticker in self.ticker_pool.tickers.itervalues(): for (objtup, callfunc, path, interval, idstring, persistent), (args, kwargs) in ticker.subscriptions.iteritems(): store_keys.append((kwargs.get("_obj", None), callfunc, path, interval, idstring, persistent)) return store_keys # main tickerhandler TICKER_HANDLER = TickerHandler()
validate
Start/stop the task depending on how many subscribers we have using it. Args: start_delay (int): Time to way before starting.
""" TickerHandler This implements an efficient Ticker which uses a subscription model to 'tick' subscribed objects at regular intervals. The ticker mechanism is used by importing and accessing the instantiated TICKER_HANDLER instance in this module. This instance is run by the server; it will save its status across server reloads and be started automaticall on boot. Example: ```python from evennia.scripts.tickerhandler import TICKER_HANDLER # call tick myobj.at_tick(*args, **kwargs) every 15 seconds TICKER_HANDLER.add(15, myobj.at_tick, *args, **kwargs) ``` You supply the interval to tick and a callable to call regularly with any extra args/kwargs. The handler will transparently set up and add new timers behind the scenes to tick at given intervals, using a TickerPool - all callables with the same interval will share the interval ticker. To remove: ```python TICKER_HANDLER.remove(15, myobj.at_tick) ``` Both interval and callable must be given since a single object can be subscribed to many different tickers at the same time. You can also supply `idstring` as an identifying string if you ever want to tick the callable at the same interval but with different arguments (args/kwargs are not used for identifying the ticker). There is also `persistent=False` if you don't want to make a ticker that don't survive a reload. If either or both `idstring` or `persistent` has been changed from their defaults, they must be supplied to the `TICKER_HANDLER.remove` call to properly identify the ticker to remove. The TickerHandler's functionality can be overloaded by modifying the Ticker class and then changing TickerPool and TickerHandler to use the custom classes ```python class MyTicker(Ticker): # [doing custom stuff] class MyTickerPool(TickerPool): ticker_class = MyTicker class MyTickerHandler(TickerHandler): ticker_pool_class = MyTickerPool ``` If one wants to duplicate TICKER_HANDLER's auto-saving feature in a custom handler one can make a custom `AT_STARTSTOP_MODULE` entry to call the handler's `save()` and `restore()` methods when the server reboots. """ import inspect from builtins import object from twisted.internet.defer import inlineCallbacks from django.core.exceptions import ObjectDoesNotExist from evennia.scripts.scripts import ExtendedLoopingCall from evennia.server.models import ServerConfig from evennia.utils.logger import log_trace, log_err from evennia.utils.dbserialize import dbserialize, dbunserialize, pack_dbobj, unpack_dbobj from evennia.utils import variable_from_module _GA = object.__getattribute__ _SA = object.__setattr__ _ERROR_ADD_TICKER = \ """TickerHandler: Tried to add an invalid ticker: {storekey} Ticker was not added.""" class Ticker(object): """ Represents a repeatedly running task that calls hooks repeatedly. Overload `_callback` to change the way it operates. """ @inlineCallbacks def _callback(self): """ This will be called repeatedly every `self.interval` seconds. `self.subscriptions` contain tuples of (obj, args, kwargs) for each subscribing object. If overloading, this callback is expected to handle all subscriptions when it is triggered. It should not return anything and should not traceback on poorly designed hooks. The callback should ideally work under @inlineCallbacks so it can yield appropriately. The _hook_key, which is passed down through the handler via kwargs is used here to identify which hook method to call. """ self._to_add = [] self._to_remove = [] self._is_ticking = True for store_key, (args, kwargs) in self.subscriptions.iteritems(): callback = yield kwargs.pop("_callback", "at_tick") obj = yield kwargs.pop("_obj", None) try: if callable(callback): # call directly yield callback(*args, **kwargs) continue # try object method if not obj or not obj.pk: # object was deleted between calls self._to_remove.append(store_key) continue else: yield _GA(obj, callback)(*args, **kwargs) except ObjectDoesNotExist: log_trace("Removing ticker.") self._to_remove.append(store_key) except Exception: log_trace() finally: # make sure to re-store kwargs["_callback"] = callback kwargs["_obj"] = obj # cleanup - we do this here to avoid changing the subscription dict while it loops self._is_ticking = False for store_key in self._to_remove: self.remove(store_key) for store_key, (args, kwargs) in self._to_add: self.add(store_key, *args, **kwargs) self._to_remove = [] self._to_add = [] def __init__(self, interval): """ Set up the ticker Args: interval (int): The stepping interval. """ self.interval = interval self.subscriptions = {} self._is_ticking = False self._to_remove = [] self._to_add = [] # set up a twisted asynchronous repeat call self.task = ExtendedLoopingCall(self._callback) # MASKED: validate function (lines 158-172) def add(self, store_key, *args, **kwargs): """ Sign up a subscriber to this ticker. Args: store_key (str): Unique storage hash for this ticker subscription. args (any, optional): Arguments to call the hook method with. Kwargs: _start_delay (int): If set, this will be used to delay the start of the trigger instead of `interval`. """ if self._is_ticking: # protects the subscription dict from # updating while it is looping self._to_start.append((store_key, (args, kwargs))) else: start_delay = kwargs.pop("_start_delay", None) self.subscriptions[store_key] = (args, kwargs) self.validate(start_delay=start_delay) def remove(self, store_key): """ Unsubscribe object from this ticker Args: store_key (str): Unique store key. """ if self._is_ticking: # this protects the subscription dict from # updating while it is looping self._to_remove.append(store_key) else: self.subscriptions.pop(store_key, False) self.validate() def stop(self): """ Kill the Task, regardless of subscriptions. """ self.subscriptions = {} self.validate() class TickerPool(object): """ This maintains a pool of `evennia.scripts.scripts.ExtendedLoopingCall` tasks for calling subscribed objects at given times. """ ticker_class = Ticker def __init__(self): """ Initialize the pool. """ self.tickers = {} def add(self, store_key, *args, **kwargs): """ Add new ticker subscriber. Args: store_key (str): Unique storage hash. args (any, optional): Arguments to send to the hook method. """ _, _, _, interval, _, _ = store_key if not interval: log_err(_ERROR_ADD_TICKER.format(store_key=store_key)) return if interval not in self.tickers: self.tickers[interval] = self.ticker_class(interval) self.tickers[interval].add(store_key, *args, **kwargs) def remove(self, store_key): """ Remove subscription from pool. Args: store_key (str): Unique storage hash to remove """ _, _, _, interval, _, _ = store_key if interval in self.tickers: self.tickers[interval].remove(store_key) if not self.tickers[interval]: del self.tickers[interval] def stop(self, interval=None): """ Stop all scripts in pool. This is done at server reload since restoring the pool will automatically re-populate the pool. Args: interval (int, optional): Only stop tickers with this interval. """ if interval and interval in self.tickers: self.tickers[interval].stop() else: for ticker in self.tickers.values(): ticker.stop() class TickerHandler(object): """ The Tickerhandler maintains a pool of tasks for subscribing objects to various tick rates. The pool maintains creation instructions and and re-applies them at a server restart. """ ticker_pool_class = TickerPool def __init__(self, save_name="ticker_storage"): """ Initialize handler save_name (str, optional): The name of the ServerConfig instance to store the handler state persistently. """ self.ticker_storage = {} self.save_name = save_name self.ticker_pool = self.ticker_pool_class() def _get_callback(self, callback): """ Analyze callback and determine its consituents Args: callback (function or method): This is either a stand-alone function or class method on a typeclassed entitye (that is, an entity that can be saved to the database). Returns: ret (tuple): This is a tuple of the form `(obj, path, callfunc)`, where `obj` is the database object the callback is defined on if it's a method (otherwise `None`) and vice-versa, `path` is the python-path to the stand-alone function (`None` if a method). The `callfunc` is either the name of the method to call or the callable function object itself. """ outobj, outpath, outcallfunc = None, None, None if callable(callback): if inspect.ismethod(callback): outobj = callback.im_self outcallfunc = callback.im_func.func_name elif inspect.isfunction(callback): outpath = "%s.%s" % (callback.__module__, callback.func_name) outcallfunc = callback else: raise TypeError("%s is not a callable function or method." % callback) return outobj, outpath, outcallfunc def _store_key(self, obj, path, interval, callfunc, idstring="", persistent=True): """ Tries to create a store_key for the object. Args: obj (Object, tuple or None): Subscribing object if any. If a tuple, this is a packed_obj tuple from dbserialize. path (str or None): Python-path to callable, if any. interval (int): Ticker interval. callfunc (callable or str): This is either the callable function or the name of the method to call. Note that the callable is never stored in the key; that is uniquely identified with the python-path. idstring (str, optional): Additional separator between different subscription types. persistent (bool, optional): If this ticker should survive a system shutdown or not. Returns: store_key (tuple): A tuple `(packed_obj, methodname, outpath, interval, idstring, persistent)` that uniquely identifies the ticker. Here, `packed_obj` is the unique string representation of the object or `None`. The `methodname` is the string name of the method on `packed_obj` to call, or `None` if `packed_obj` is unset. `path` is the Python-path to a non-method callable, or `None`. Finally, `interval` `idstring` and `persistent` are integers, strings and bools respectively. """ interval = int(interval) persistent = bool(persistent) packed_obj = pack_dbobj(obj) methodname = callfunc if callfunc and isinstance(callfunc, basestring) else None outpath = path if path and isinstance(path, basestring) else None return (packed_obj, methodname, outpath, interval, idstring, persistent) def save(self): """ Save ticker_storage as a serialized string into a temporary ServerConf field. Whereas saving is done on the fly, if called by server when it shuts down, the current timer of each ticker will be saved so it can start over from that point. """ if self.ticker_storage: # get the current times so the tickers can be restarted with a delay later start_delays = dict((interval, ticker.task.next_call_time()) for interval, ticker in self.ticker_pool.tickers.items()) # remove any subscriptions that lost its object in the interim to_save = {store_key: (args, kwargs) for store_key, (args, kwargs) in self.ticker_storage.items() if ((store_key[1] and ("_obj" in kwargs and kwargs["_obj"].pk) and hasattr(kwargs["_obj"], store_key[1])) or # a valid method with existing obj store_key[2])} # a path given # update the timers for the tickers for store_key, (args, kwargs) in to_save.items(): interval = store_key[1] # this is a mutable, so it's updated in-place in ticker_storage kwargs["_start_delay"] = start_delays.get(interval, None) ServerConfig.objects.conf(key=self.save_name, value=dbserialize(to_save)) else: # make sure we have nothing lingering in the database ServerConfig.objects.conf(key=self.save_name, delete=True) def restore(self, server_reload=True): """ Restore ticker_storage from database and re-initialize the handler from storage. This is triggered by the server at restart. Args: server_reload (bool, optional): If this is False, it means the server went through a cold reboot and all non-persistent tickers must be killed. """ # load stored command instructions and use them to re-initialize handler restored_tickers = ServerConfig.objects.conf(key=self.save_name) if restored_tickers: # the dbunserialize will convert all serialized dbobjs to real objects restored_tickers = dbunserialize(restored_tickers) self.ticker_storage = {} for store_key, (args, kwargs) in restored_tickers.iteritems(): try: # at this point obj is the actual object (or None) due to how # the dbunserialize works obj, callfunc, path, interval, idstring, persistent = store_key if not persistent and not server_reload: # this ticker will not be restarted continue if isinstance(callfunc, basestring) and not obj: # methods must have an existing object continue # we must rebuild the store_key here since obj must not be # stored as the object itself for the store_key to be hashable. store_key = self._store_key(obj, path, interval, callfunc, idstring, persistent) if obj and callfunc: kwargs["_callback"] = callfunc kwargs["_obj"] = obj elif path: modname, varname = path.rsplit(".", 1) callback = variable_from_module(modname, varname) kwargs["_callback"] = callback kwargs["_obj"] = None else: # Neither object nor path - discard this ticker log_err("Tickerhandler: Removing malformed ticker: %s" % str(store_key)) continue except Exception: # this suggests a malformed save or missing objects log_trace("Tickerhandler: Removing malformed ticker: %s" % str(store_key)) continue # if we get here we should create a new ticker self.ticker_storage[store_key] = (args, kwargs) self.ticker_pool.add(store_key, *args, **kwargs) def add(self, interval=60, callback=None, idstring="", persistent=True, *args, **kwargs): """ Add subscription to tickerhandler Args: interval (int, optional): Interval in seconds between calling `callable(*args, **kwargs)` callable (callable function or method, optional): This should either be a stand-alone function or a method on a typeclassed entity (that is, one that can be saved to the database). idstring (str, optional): Identifier for separating this ticker-subscription from others with the same interval. Allows for managing multiple calls with the same time interval and callback. persistent (bool, optional): A ticker will always survive a server reload. If this is unset, the ticker will be deleted by a server shutdown. args, kwargs (optional): These will be passed into the callback every time it is called. Notes: The callback will be identified by type and stored either as as combination of serialized database object + methodname or as a python-path to the module + funcname. These strings will be combined iwth `interval` and `idstring` to define a unique storage key for saving. These must thus all be supplied when wanting to modify/remove the ticker later. """ if isinstance(callback, int): raise RuntimeError("TICKER_HANDLER.add has changed: " "the interval is now the first argument, callback the second.") obj, path, callfunc = self._get_callback(callback) store_key = self._store_key(obj, path, interval, callfunc, idstring, persistent) kwargs["_obj"] = obj kwargs["_callback"] = callfunc # either method-name or callable self.ticker_storage[store_key] = (args, kwargs) self.ticker_pool.add(store_key, *args, **kwargs) self.save() def remove(self, interval=60, callback=None, idstring="", persistent=True): """ Remove object from ticker or only remove it from tickers with a given interval. Args: interval (int, optional): Interval of ticker to remove. callback (callable function or method): Either a function or the method of a typeclassed object. idstring (str, optional): Identifier id of ticker to remove. """ if isinstance(callback, int): raise RuntimeError("TICKER_HANDLER.remove has changed: " "the interval is now the first argument, callback the second.") obj, path, callfunc = self._get_callback(callback) store_key = self._store_key(obj, path, interval, callfunc, idstring, persistent) to_remove = self.ticker_storage.pop(store_key, None) if to_remove: self.ticker_pool.remove(store_key) self.save() def clear(self, interval=None): """ Stop/remove tickers from handler. Args: interval (int): Only stop tickers with this interval. Notes: This is the only supported way to kill tickers related to non-db objects. """ self.ticker_pool.stop(interval) if interval: self.ticker_storage = dict((store_key, store_key) for store_key in self.ticker_storage if store_key[1] != interval) else: self.ticker_storage = {} self.save() def all(self, interval=None): """ Get all subscriptions. Args: interval (int): Limit match to tickers with this interval. Returns: tickers (list): If `interval` was given, this is a list of tickers using that interval. tickerpool_layout (dict): If `interval` was *not* given, this is a dict {interval1: [ticker1, ticker2, ...], ...} """ if interval is None: # return dict of all, ordered by interval return dict((interval, ticker.subscriptions) for interval, ticker in self.ticker_pool.tickers.iteritems()) else: # get individual interval ticker = self.ticker_pool.tickers.get(interval, None) if ticker: return {interval: ticker.subscriptions} def all_display(self): """ Get all tickers on an easily displayable form. Returns: tickers (dict): A list of all storekeys """ store_keys = [] for ticker in self.ticker_pool.tickers.itervalues(): for (objtup, callfunc, path, interval, idstring, persistent), (args, kwargs) in ticker.subscriptions.iteritems(): store_keys.append((kwargs.get("_obj", None), callfunc, path, interval, idstring, persistent)) return store_keys # main tickerhandler TICKER_HANDLER = TickerHandler()
def validate(self, start_delay=None): """ Start/stop the task depending on how many subscribers we have using it. Args: start_delay (int): Time to way before starting. """ subs = self.subscriptions if self.task.running: if not subs: self.task.stop() elif subs: self.task.start(self.interval, now=False, start_delay=start_delay)
158
172
""" TickerHandler This implements an efficient Ticker which uses a subscription model to 'tick' subscribed objects at regular intervals. The ticker mechanism is used by importing and accessing the instantiated TICKER_HANDLER instance in this module. This instance is run by the server; it will save its status across server reloads and be started automaticall on boot. Example: ```python from evennia.scripts.tickerhandler import TICKER_HANDLER # call tick myobj.at_tick(*args, **kwargs) every 15 seconds TICKER_HANDLER.add(15, myobj.at_tick, *args, **kwargs) ``` You supply the interval to tick and a callable to call regularly with any extra args/kwargs. The handler will transparently set up and add new timers behind the scenes to tick at given intervals, using a TickerPool - all callables with the same interval will share the interval ticker. To remove: ```python TICKER_HANDLER.remove(15, myobj.at_tick) ``` Both interval and callable must be given since a single object can be subscribed to many different tickers at the same time. You can also supply `idstring` as an identifying string if you ever want to tick the callable at the same interval but with different arguments (args/kwargs are not used for identifying the ticker). There is also `persistent=False` if you don't want to make a ticker that don't survive a reload. If either or both `idstring` or `persistent` has been changed from their defaults, they must be supplied to the `TICKER_HANDLER.remove` call to properly identify the ticker to remove. The TickerHandler's functionality can be overloaded by modifying the Ticker class and then changing TickerPool and TickerHandler to use the custom classes ```python class MyTicker(Ticker): # [doing custom stuff] class MyTickerPool(TickerPool): ticker_class = MyTicker class MyTickerHandler(TickerHandler): ticker_pool_class = MyTickerPool ``` If one wants to duplicate TICKER_HANDLER's auto-saving feature in a custom handler one can make a custom `AT_STARTSTOP_MODULE` entry to call the handler's `save()` and `restore()` methods when the server reboots. """ import inspect from builtins import object from twisted.internet.defer import inlineCallbacks from django.core.exceptions import ObjectDoesNotExist from evennia.scripts.scripts import ExtendedLoopingCall from evennia.server.models import ServerConfig from evennia.utils.logger import log_trace, log_err from evennia.utils.dbserialize import dbserialize, dbunserialize, pack_dbobj, unpack_dbobj from evennia.utils import variable_from_module _GA = object.__getattribute__ _SA = object.__setattr__ _ERROR_ADD_TICKER = \ """TickerHandler: Tried to add an invalid ticker: {storekey} Ticker was not added.""" class Ticker(object): """ Represents a repeatedly running task that calls hooks repeatedly. Overload `_callback` to change the way it operates. """ @inlineCallbacks def _callback(self): """ This will be called repeatedly every `self.interval` seconds. `self.subscriptions` contain tuples of (obj, args, kwargs) for each subscribing object. If overloading, this callback is expected to handle all subscriptions when it is triggered. It should not return anything and should not traceback on poorly designed hooks. The callback should ideally work under @inlineCallbacks so it can yield appropriately. The _hook_key, which is passed down through the handler via kwargs is used here to identify which hook method to call. """ self._to_add = [] self._to_remove = [] self._is_ticking = True for store_key, (args, kwargs) in self.subscriptions.iteritems(): callback = yield kwargs.pop("_callback", "at_tick") obj = yield kwargs.pop("_obj", None) try: if callable(callback): # call directly yield callback(*args, **kwargs) continue # try object method if not obj or not obj.pk: # object was deleted between calls self._to_remove.append(store_key) continue else: yield _GA(obj, callback)(*args, **kwargs) except ObjectDoesNotExist: log_trace("Removing ticker.") self._to_remove.append(store_key) except Exception: log_trace() finally: # make sure to re-store kwargs["_callback"] = callback kwargs["_obj"] = obj # cleanup - we do this here to avoid changing the subscription dict while it loops self._is_ticking = False for store_key in self._to_remove: self.remove(store_key) for store_key, (args, kwargs) in self._to_add: self.add(store_key, *args, **kwargs) self._to_remove = [] self._to_add = [] def __init__(self, interval): """ Set up the ticker Args: interval (int): The stepping interval. """ self.interval = interval self.subscriptions = {} self._is_ticking = False self._to_remove = [] self._to_add = [] # set up a twisted asynchronous repeat call self.task = ExtendedLoopingCall(self._callback) def validate(self, start_delay=None): """ Start/stop the task depending on how many subscribers we have using it. Args: start_delay (int): Time to way before starting. """ subs = self.subscriptions if self.task.running: if not subs: self.task.stop() elif subs: self.task.start(self.interval, now=False, start_delay=start_delay) def add(self, store_key, *args, **kwargs): """ Sign up a subscriber to this ticker. Args: store_key (str): Unique storage hash for this ticker subscription. args (any, optional): Arguments to call the hook method with. Kwargs: _start_delay (int): If set, this will be used to delay the start of the trigger instead of `interval`. """ if self._is_ticking: # protects the subscription dict from # updating while it is looping self._to_start.append((store_key, (args, kwargs))) else: start_delay = kwargs.pop("_start_delay", None) self.subscriptions[store_key] = (args, kwargs) self.validate(start_delay=start_delay) def remove(self, store_key): """ Unsubscribe object from this ticker Args: store_key (str): Unique store key. """ if self._is_ticking: # this protects the subscription dict from # updating while it is looping self._to_remove.append(store_key) else: self.subscriptions.pop(store_key, False) self.validate() def stop(self): """ Kill the Task, regardless of subscriptions. """ self.subscriptions = {} self.validate() class TickerPool(object): """ This maintains a pool of `evennia.scripts.scripts.ExtendedLoopingCall` tasks for calling subscribed objects at given times. """ ticker_class = Ticker def __init__(self): """ Initialize the pool. """ self.tickers = {} def add(self, store_key, *args, **kwargs): """ Add new ticker subscriber. Args: store_key (str): Unique storage hash. args (any, optional): Arguments to send to the hook method. """ _, _, _, interval, _, _ = store_key if not interval: log_err(_ERROR_ADD_TICKER.format(store_key=store_key)) return if interval not in self.tickers: self.tickers[interval] = self.ticker_class(interval) self.tickers[interval].add(store_key, *args, **kwargs) def remove(self, store_key): """ Remove subscription from pool. Args: store_key (str): Unique storage hash to remove """ _, _, _, interval, _, _ = store_key if interval in self.tickers: self.tickers[interval].remove(store_key) if not self.tickers[interval]: del self.tickers[interval] def stop(self, interval=None): """ Stop all scripts in pool. This is done at server reload since restoring the pool will automatically re-populate the pool. Args: interval (int, optional): Only stop tickers with this interval. """ if interval and interval in self.tickers: self.tickers[interval].stop() else: for ticker in self.tickers.values(): ticker.stop() class TickerHandler(object): """ The Tickerhandler maintains a pool of tasks for subscribing objects to various tick rates. The pool maintains creation instructions and and re-applies them at a server restart. """ ticker_pool_class = TickerPool def __init__(self, save_name="ticker_storage"): """ Initialize handler save_name (str, optional): The name of the ServerConfig instance to store the handler state persistently. """ self.ticker_storage = {} self.save_name = save_name self.ticker_pool = self.ticker_pool_class() def _get_callback(self, callback): """ Analyze callback and determine its consituents Args: callback (function or method): This is either a stand-alone function or class method on a typeclassed entitye (that is, an entity that can be saved to the database). Returns: ret (tuple): This is a tuple of the form `(obj, path, callfunc)`, where `obj` is the database object the callback is defined on if it's a method (otherwise `None`) and vice-versa, `path` is the python-path to the stand-alone function (`None` if a method). The `callfunc` is either the name of the method to call or the callable function object itself. """ outobj, outpath, outcallfunc = None, None, None if callable(callback): if inspect.ismethod(callback): outobj = callback.im_self outcallfunc = callback.im_func.func_name elif inspect.isfunction(callback): outpath = "%s.%s" % (callback.__module__, callback.func_name) outcallfunc = callback else: raise TypeError("%s is not a callable function or method." % callback) return outobj, outpath, outcallfunc def _store_key(self, obj, path, interval, callfunc, idstring="", persistent=True): """ Tries to create a store_key for the object. Args: obj (Object, tuple or None): Subscribing object if any. If a tuple, this is a packed_obj tuple from dbserialize. path (str or None): Python-path to callable, if any. interval (int): Ticker interval. callfunc (callable or str): This is either the callable function or the name of the method to call. Note that the callable is never stored in the key; that is uniquely identified with the python-path. idstring (str, optional): Additional separator between different subscription types. persistent (bool, optional): If this ticker should survive a system shutdown or not. Returns: store_key (tuple): A tuple `(packed_obj, methodname, outpath, interval, idstring, persistent)` that uniquely identifies the ticker. Here, `packed_obj` is the unique string representation of the object or `None`. The `methodname` is the string name of the method on `packed_obj` to call, or `None` if `packed_obj` is unset. `path` is the Python-path to a non-method callable, or `None`. Finally, `interval` `idstring` and `persistent` are integers, strings and bools respectively. """ interval = int(interval) persistent = bool(persistent) packed_obj = pack_dbobj(obj) methodname = callfunc if callfunc and isinstance(callfunc, basestring) else None outpath = path if path and isinstance(path, basestring) else None return (packed_obj, methodname, outpath, interval, idstring, persistent) def save(self): """ Save ticker_storage as a serialized string into a temporary ServerConf field. Whereas saving is done on the fly, if called by server when it shuts down, the current timer of each ticker will be saved so it can start over from that point. """ if self.ticker_storage: # get the current times so the tickers can be restarted with a delay later start_delays = dict((interval, ticker.task.next_call_time()) for interval, ticker in self.ticker_pool.tickers.items()) # remove any subscriptions that lost its object in the interim to_save = {store_key: (args, kwargs) for store_key, (args, kwargs) in self.ticker_storage.items() if ((store_key[1] and ("_obj" in kwargs and kwargs["_obj"].pk) and hasattr(kwargs["_obj"], store_key[1])) or # a valid method with existing obj store_key[2])} # a path given # update the timers for the tickers for store_key, (args, kwargs) in to_save.items(): interval = store_key[1] # this is a mutable, so it's updated in-place in ticker_storage kwargs["_start_delay"] = start_delays.get(interval, None) ServerConfig.objects.conf(key=self.save_name, value=dbserialize(to_save)) else: # make sure we have nothing lingering in the database ServerConfig.objects.conf(key=self.save_name, delete=True) def restore(self, server_reload=True): """ Restore ticker_storage from database and re-initialize the handler from storage. This is triggered by the server at restart. Args: server_reload (bool, optional): If this is False, it means the server went through a cold reboot and all non-persistent tickers must be killed. """ # load stored command instructions and use them to re-initialize handler restored_tickers = ServerConfig.objects.conf(key=self.save_name) if restored_tickers: # the dbunserialize will convert all serialized dbobjs to real objects restored_tickers = dbunserialize(restored_tickers) self.ticker_storage = {} for store_key, (args, kwargs) in restored_tickers.iteritems(): try: # at this point obj is the actual object (or None) due to how # the dbunserialize works obj, callfunc, path, interval, idstring, persistent = store_key if not persistent and not server_reload: # this ticker will not be restarted continue if isinstance(callfunc, basestring) and not obj: # methods must have an existing object continue # we must rebuild the store_key here since obj must not be # stored as the object itself for the store_key to be hashable. store_key = self._store_key(obj, path, interval, callfunc, idstring, persistent) if obj and callfunc: kwargs["_callback"] = callfunc kwargs["_obj"] = obj elif path: modname, varname = path.rsplit(".", 1) callback = variable_from_module(modname, varname) kwargs["_callback"] = callback kwargs["_obj"] = None else: # Neither object nor path - discard this ticker log_err("Tickerhandler: Removing malformed ticker: %s" % str(store_key)) continue except Exception: # this suggests a malformed save or missing objects log_trace("Tickerhandler: Removing malformed ticker: %s" % str(store_key)) continue # if we get here we should create a new ticker self.ticker_storage[store_key] = (args, kwargs) self.ticker_pool.add(store_key, *args, **kwargs) def add(self, interval=60, callback=None, idstring="", persistent=True, *args, **kwargs): """ Add subscription to tickerhandler Args: interval (int, optional): Interval in seconds between calling `callable(*args, **kwargs)` callable (callable function or method, optional): This should either be a stand-alone function or a method on a typeclassed entity (that is, one that can be saved to the database). idstring (str, optional): Identifier for separating this ticker-subscription from others with the same interval. Allows for managing multiple calls with the same time interval and callback. persistent (bool, optional): A ticker will always survive a server reload. If this is unset, the ticker will be deleted by a server shutdown. args, kwargs (optional): These will be passed into the callback every time it is called. Notes: The callback will be identified by type and stored either as as combination of serialized database object + methodname or as a python-path to the module + funcname. These strings will be combined iwth `interval` and `idstring` to define a unique storage key for saving. These must thus all be supplied when wanting to modify/remove the ticker later. """ if isinstance(callback, int): raise RuntimeError("TICKER_HANDLER.add has changed: " "the interval is now the first argument, callback the second.") obj, path, callfunc = self._get_callback(callback) store_key = self._store_key(obj, path, interval, callfunc, idstring, persistent) kwargs["_obj"] = obj kwargs["_callback"] = callfunc # either method-name or callable self.ticker_storage[store_key] = (args, kwargs) self.ticker_pool.add(store_key, *args, **kwargs) self.save() def remove(self, interval=60, callback=None, idstring="", persistent=True): """ Remove object from ticker or only remove it from tickers with a given interval. Args: interval (int, optional): Interval of ticker to remove. callback (callable function or method): Either a function or the method of a typeclassed object. idstring (str, optional): Identifier id of ticker to remove. """ if isinstance(callback, int): raise RuntimeError("TICKER_HANDLER.remove has changed: " "the interval is now the first argument, callback the second.") obj, path, callfunc = self._get_callback(callback) store_key = self._store_key(obj, path, interval, callfunc, idstring, persistent) to_remove = self.ticker_storage.pop(store_key, None) if to_remove: self.ticker_pool.remove(store_key) self.save() def clear(self, interval=None): """ Stop/remove tickers from handler. Args: interval (int): Only stop tickers with this interval. Notes: This is the only supported way to kill tickers related to non-db objects. """ self.ticker_pool.stop(interval) if interval: self.ticker_storage = dict((store_key, store_key) for store_key in self.ticker_storage if store_key[1] != interval) else: self.ticker_storage = {} self.save() def all(self, interval=None): """ Get all subscriptions. Args: interval (int): Limit match to tickers with this interval. Returns: tickers (list): If `interval` was given, this is a list of tickers using that interval. tickerpool_layout (dict): If `interval` was *not* given, this is a dict {interval1: [ticker1, ticker2, ...], ...} """ if interval is None: # return dict of all, ordered by interval return dict((interval, ticker.subscriptions) for interval, ticker in self.ticker_pool.tickers.iteritems()) else: # get individual interval ticker = self.ticker_pool.tickers.get(interval, None) if ticker: return {interval: ticker.subscriptions} def all_display(self): """ Get all tickers on an easily displayable form. Returns: tickers (dict): A list of all storekeys """ store_keys = [] for ticker in self.ticker_pool.tickers.itervalues(): for (objtup, callfunc, path, interval, idstring, persistent), (args, kwargs) in ticker.subscriptions.iteritems(): store_keys.append((kwargs.get("_obj", None), callfunc, path, interval, idstring, persistent)) return store_keys # main tickerhandler TICKER_HANDLER = TickerHandler()
add
Add new ticker subscriber. Args: store_key (str): Unique storage hash. args (any, optional): Arguments to send to the hook method.
""" TickerHandler This implements an efficient Ticker which uses a subscription model to 'tick' subscribed objects at regular intervals. The ticker mechanism is used by importing and accessing the instantiated TICKER_HANDLER instance in this module. This instance is run by the server; it will save its status across server reloads and be started automaticall on boot. Example: ```python from evennia.scripts.tickerhandler import TICKER_HANDLER # call tick myobj.at_tick(*args, **kwargs) every 15 seconds TICKER_HANDLER.add(15, myobj.at_tick, *args, **kwargs) ``` You supply the interval to tick and a callable to call regularly with any extra args/kwargs. The handler will transparently set up and add new timers behind the scenes to tick at given intervals, using a TickerPool - all callables with the same interval will share the interval ticker. To remove: ```python TICKER_HANDLER.remove(15, myobj.at_tick) ``` Both interval and callable must be given since a single object can be subscribed to many different tickers at the same time. You can also supply `idstring` as an identifying string if you ever want to tick the callable at the same interval but with different arguments (args/kwargs are not used for identifying the ticker). There is also `persistent=False` if you don't want to make a ticker that don't survive a reload. If either or both `idstring` or `persistent` has been changed from their defaults, they must be supplied to the `TICKER_HANDLER.remove` call to properly identify the ticker to remove. The TickerHandler's functionality can be overloaded by modifying the Ticker class and then changing TickerPool and TickerHandler to use the custom classes ```python class MyTicker(Ticker): # [doing custom stuff] class MyTickerPool(TickerPool): ticker_class = MyTicker class MyTickerHandler(TickerHandler): ticker_pool_class = MyTickerPool ``` If one wants to duplicate TICKER_HANDLER's auto-saving feature in a custom handler one can make a custom `AT_STARTSTOP_MODULE` entry to call the handler's `save()` and `restore()` methods when the server reboots. """ import inspect from builtins import object from twisted.internet.defer import inlineCallbacks from django.core.exceptions import ObjectDoesNotExist from evennia.scripts.scripts import ExtendedLoopingCall from evennia.server.models import ServerConfig from evennia.utils.logger import log_trace, log_err from evennia.utils.dbserialize import dbserialize, dbunserialize, pack_dbobj, unpack_dbobj from evennia.utils import variable_from_module _GA = object.__getattribute__ _SA = object.__setattr__ _ERROR_ADD_TICKER = \ """TickerHandler: Tried to add an invalid ticker: {storekey} Ticker was not added.""" class Ticker(object): """ Represents a repeatedly running task that calls hooks repeatedly. Overload `_callback` to change the way it operates. """ @inlineCallbacks def _callback(self): """ This will be called repeatedly every `self.interval` seconds. `self.subscriptions` contain tuples of (obj, args, kwargs) for each subscribing object. If overloading, this callback is expected to handle all subscriptions when it is triggered. It should not return anything and should not traceback on poorly designed hooks. The callback should ideally work under @inlineCallbacks so it can yield appropriately. The _hook_key, which is passed down through the handler via kwargs is used here to identify which hook method to call. """ self._to_add = [] self._to_remove = [] self._is_ticking = True for store_key, (args, kwargs) in self.subscriptions.iteritems(): callback = yield kwargs.pop("_callback", "at_tick") obj = yield kwargs.pop("_obj", None) try: if callable(callback): # call directly yield callback(*args, **kwargs) continue # try object method if not obj or not obj.pk: # object was deleted between calls self._to_remove.append(store_key) continue else: yield _GA(obj, callback)(*args, **kwargs) except ObjectDoesNotExist: log_trace("Removing ticker.") self._to_remove.append(store_key) except Exception: log_trace() finally: # make sure to re-store kwargs["_callback"] = callback kwargs["_obj"] = obj # cleanup - we do this here to avoid changing the subscription dict while it loops self._is_ticking = False for store_key in self._to_remove: self.remove(store_key) for store_key, (args, kwargs) in self._to_add: self.add(store_key, *args, **kwargs) self._to_remove = [] self._to_add = [] def __init__(self, interval): """ Set up the ticker Args: interval (int): The stepping interval. """ self.interval = interval self.subscriptions = {} self._is_ticking = False self._to_remove = [] self._to_add = [] # set up a twisted asynchronous repeat call self.task = ExtendedLoopingCall(self._callback) def validate(self, start_delay=None): """ Start/stop the task depending on how many subscribers we have using it. Args: start_delay (int): Time to way before starting. """ subs = self.subscriptions if self.task.running: if not subs: self.task.stop() elif subs: self.task.start(self.interval, now=False, start_delay=start_delay) def add(self, store_key, *args, **kwargs): """ Sign up a subscriber to this ticker. Args: store_key (str): Unique storage hash for this ticker subscription. args (any, optional): Arguments to call the hook method with. Kwargs: _start_delay (int): If set, this will be used to delay the start of the trigger instead of `interval`. """ if self._is_ticking: # protects the subscription dict from # updating while it is looping self._to_start.append((store_key, (args, kwargs))) else: start_delay = kwargs.pop("_start_delay", None) self.subscriptions[store_key] = (args, kwargs) self.validate(start_delay=start_delay) def remove(self, store_key): """ Unsubscribe object from this ticker Args: store_key (str): Unique store key. """ if self._is_ticking: # this protects the subscription dict from # updating while it is looping self._to_remove.append(store_key) else: self.subscriptions.pop(store_key, False) self.validate() def stop(self): """ Kill the Task, regardless of subscriptions. """ self.subscriptions = {} self.validate() class TickerPool(object): """ This maintains a pool of `evennia.scripts.scripts.ExtendedLoopingCall` tasks for calling subscribed objects at given times. """ ticker_class = Ticker def __init__(self): """ Initialize the pool. """ self.tickers = {} # MASKED: add function (lines 237-253) def remove(self, store_key): """ Remove subscription from pool. Args: store_key (str): Unique storage hash to remove """ _, _, _, interval, _, _ = store_key if interval in self.tickers: self.tickers[interval].remove(store_key) if not self.tickers[interval]: del self.tickers[interval] def stop(self, interval=None): """ Stop all scripts in pool. This is done at server reload since restoring the pool will automatically re-populate the pool. Args: interval (int, optional): Only stop tickers with this interval. """ if interval and interval in self.tickers: self.tickers[interval].stop() else: for ticker in self.tickers.values(): ticker.stop() class TickerHandler(object): """ The Tickerhandler maintains a pool of tasks for subscribing objects to various tick rates. The pool maintains creation instructions and and re-applies them at a server restart. """ ticker_pool_class = TickerPool def __init__(self, save_name="ticker_storage"): """ Initialize handler save_name (str, optional): The name of the ServerConfig instance to store the handler state persistently. """ self.ticker_storage = {} self.save_name = save_name self.ticker_pool = self.ticker_pool_class() def _get_callback(self, callback): """ Analyze callback and determine its consituents Args: callback (function or method): This is either a stand-alone function or class method on a typeclassed entitye (that is, an entity that can be saved to the database). Returns: ret (tuple): This is a tuple of the form `(obj, path, callfunc)`, where `obj` is the database object the callback is defined on if it's a method (otherwise `None`) and vice-versa, `path` is the python-path to the stand-alone function (`None` if a method). The `callfunc` is either the name of the method to call or the callable function object itself. """ outobj, outpath, outcallfunc = None, None, None if callable(callback): if inspect.ismethod(callback): outobj = callback.im_self outcallfunc = callback.im_func.func_name elif inspect.isfunction(callback): outpath = "%s.%s" % (callback.__module__, callback.func_name) outcallfunc = callback else: raise TypeError("%s is not a callable function or method." % callback) return outobj, outpath, outcallfunc def _store_key(self, obj, path, interval, callfunc, idstring="", persistent=True): """ Tries to create a store_key for the object. Args: obj (Object, tuple or None): Subscribing object if any. If a tuple, this is a packed_obj tuple from dbserialize. path (str or None): Python-path to callable, if any. interval (int): Ticker interval. callfunc (callable or str): This is either the callable function or the name of the method to call. Note that the callable is never stored in the key; that is uniquely identified with the python-path. idstring (str, optional): Additional separator between different subscription types. persistent (bool, optional): If this ticker should survive a system shutdown or not. Returns: store_key (tuple): A tuple `(packed_obj, methodname, outpath, interval, idstring, persistent)` that uniquely identifies the ticker. Here, `packed_obj` is the unique string representation of the object or `None`. The `methodname` is the string name of the method on `packed_obj` to call, or `None` if `packed_obj` is unset. `path` is the Python-path to a non-method callable, or `None`. Finally, `interval` `idstring` and `persistent` are integers, strings and bools respectively. """ interval = int(interval) persistent = bool(persistent) packed_obj = pack_dbobj(obj) methodname = callfunc if callfunc and isinstance(callfunc, basestring) else None outpath = path if path and isinstance(path, basestring) else None return (packed_obj, methodname, outpath, interval, idstring, persistent) def save(self): """ Save ticker_storage as a serialized string into a temporary ServerConf field. Whereas saving is done on the fly, if called by server when it shuts down, the current timer of each ticker will be saved so it can start over from that point. """ if self.ticker_storage: # get the current times so the tickers can be restarted with a delay later start_delays = dict((interval, ticker.task.next_call_time()) for interval, ticker in self.ticker_pool.tickers.items()) # remove any subscriptions that lost its object in the interim to_save = {store_key: (args, kwargs) for store_key, (args, kwargs) in self.ticker_storage.items() if ((store_key[1] and ("_obj" in kwargs and kwargs["_obj"].pk) and hasattr(kwargs["_obj"], store_key[1])) or # a valid method with existing obj store_key[2])} # a path given # update the timers for the tickers for store_key, (args, kwargs) in to_save.items(): interval = store_key[1] # this is a mutable, so it's updated in-place in ticker_storage kwargs["_start_delay"] = start_delays.get(interval, None) ServerConfig.objects.conf(key=self.save_name, value=dbserialize(to_save)) else: # make sure we have nothing lingering in the database ServerConfig.objects.conf(key=self.save_name, delete=True) def restore(self, server_reload=True): """ Restore ticker_storage from database and re-initialize the handler from storage. This is triggered by the server at restart. Args: server_reload (bool, optional): If this is False, it means the server went through a cold reboot and all non-persistent tickers must be killed. """ # load stored command instructions and use them to re-initialize handler restored_tickers = ServerConfig.objects.conf(key=self.save_name) if restored_tickers: # the dbunserialize will convert all serialized dbobjs to real objects restored_tickers = dbunserialize(restored_tickers) self.ticker_storage = {} for store_key, (args, kwargs) in restored_tickers.iteritems(): try: # at this point obj is the actual object (or None) due to how # the dbunserialize works obj, callfunc, path, interval, idstring, persistent = store_key if not persistent and not server_reload: # this ticker will not be restarted continue if isinstance(callfunc, basestring) and not obj: # methods must have an existing object continue # we must rebuild the store_key here since obj must not be # stored as the object itself for the store_key to be hashable. store_key = self._store_key(obj, path, interval, callfunc, idstring, persistent) if obj and callfunc: kwargs["_callback"] = callfunc kwargs["_obj"] = obj elif path: modname, varname = path.rsplit(".", 1) callback = variable_from_module(modname, varname) kwargs["_callback"] = callback kwargs["_obj"] = None else: # Neither object nor path - discard this ticker log_err("Tickerhandler: Removing malformed ticker: %s" % str(store_key)) continue except Exception: # this suggests a malformed save or missing objects log_trace("Tickerhandler: Removing malformed ticker: %s" % str(store_key)) continue # if we get here we should create a new ticker self.ticker_storage[store_key] = (args, kwargs) self.ticker_pool.add(store_key, *args, **kwargs) def add(self, interval=60, callback=None, idstring="", persistent=True, *args, **kwargs): """ Add subscription to tickerhandler Args: interval (int, optional): Interval in seconds between calling `callable(*args, **kwargs)` callable (callable function or method, optional): This should either be a stand-alone function or a method on a typeclassed entity (that is, one that can be saved to the database). idstring (str, optional): Identifier for separating this ticker-subscription from others with the same interval. Allows for managing multiple calls with the same time interval and callback. persistent (bool, optional): A ticker will always survive a server reload. If this is unset, the ticker will be deleted by a server shutdown. args, kwargs (optional): These will be passed into the callback every time it is called. Notes: The callback will be identified by type and stored either as as combination of serialized database object + methodname or as a python-path to the module + funcname. These strings will be combined iwth `interval` and `idstring` to define a unique storage key for saving. These must thus all be supplied when wanting to modify/remove the ticker later. """ if isinstance(callback, int): raise RuntimeError("TICKER_HANDLER.add has changed: " "the interval is now the first argument, callback the second.") obj, path, callfunc = self._get_callback(callback) store_key = self._store_key(obj, path, interval, callfunc, idstring, persistent) kwargs["_obj"] = obj kwargs["_callback"] = callfunc # either method-name or callable self.ticker_storage[store_key] = (args, kwargs) self.ticker_pool.add(store_key, *args, **kwargs) self.save() def remove(self, interval=60, callback=None, idstring="", persistent=True): """ Remove object from ticker or only remove it from tickers with a given interval. Args: interval (int, optional): Interval of ticker to remove. callback (callable function or method): Either a function or the method of a typeclassed object. idstring (str, optional): Identifier id of ticker to remove. """ if isinstance(callback, int): raise RuntimeError("TICKER_HANDLER.remove has changed: " "the interval is now the first argument, callback the second.") obj, path, callfunc = self._get_callback(callback) store_key = self._store_key(obj, path, interval, callfunc, idstring, persistent) to_remove = self.ticker_storage.pop(store_key, None) if to_remove: self.ticker_pool.remove(store_key) self.save() def clear(self, interval=None): """ Stop/remove tickers from handler. Args: interval (int): Only stop tickers with this interval. Notes: This is the only supported way to kill tickers related to non-db objects. """ self.ticker_pool.stop(interval) if interval: self.ticker_storage = dict((store_key, store_key) for store_key in self.ticker_storage if store_key[1] != interval) else: self.ticker_storage = {} self.save() def all(self, interval=None): """ Get all subscriptions. Args: interval (int): Limit match to tickers with this interval. Returns: tickers (list): If `interval` was given, this is a list of tickers using that interval. tickerpool_layout (dict): If `interval` was *not* given, this is a dict {interval1: [ticker1, ticker2, ...], ...} """ if interval is None: # return dict of all, ordered by interval return dict((interval, ticker.subscriptions) for interval, ticker in self.ticker_pool.tickers.iteritems()) else: # get individual interval ticker = self.ticker_pool.tickers.get(interval, None) if ticker: return {interval: ticker.subscriptions} def all_display(self): """ Get all tickers on an easily displayable form. Returns: tickers (dict): A list of all storekeys """ store_keys = [] for ticker in self.ticker_pool.tickers.itervalues(): for (objtup, callfunc, path, interval, idstring, persistent), (args, kwargs) in ticker.subscriptions.iteritems(): store_keys.append((kwargs.get("_obj", None), callfunc, path, interval, idstring, persistent)) return store_keys # main tickerhandler TICKER_HANDLER = TickerHandler()
def add(self, store_key, *args, **kwargs): """ Add new ticker subscriber. Args: store_key (str): Unique storage hash. args (any, optional): Arguments to send to the hook method. """ _, _, _, interval, _, _ = store_key if not interval: log_err(_ERROR_ADD_TICKER.format(store_key=store_key)) return if interval not in self.tickers: self.tickers[interval] = self.ticker_class(interval) self.tickers[interval].add(store_key, *args, **kwargs)
237
253
""" TickerHandler This implements an efficient Ticker which uses a subscription model to 'tick' subscribed objects at regular intervals. The ticker mechanism is used by importing and accessing the instantiated TICKER_HANDLER instance in this module. This instance is run by the server; it will save its status across server reloads and be started automaticall on boot. Example: ```python from evennia.scripts.tickerhandler import TICKER_HANDLER # call tick myobj.at_tick(*args, **kwargs) every 15 seconds TICKER_HANDLER.add(15, myobj.at_tick, *args, **kwargs) ``` You supply the interval to tick and a callable to call regularly with any extra args/kwargs. The handler will transparently set up and add new timers behind the scenes to tick at given intervals, using a TickerPool - all callables with the same interval will share the interval ticker. To remove: ```python TICKER_HANDLER.remove(15, myobj.at_tick) ``` Both interval and callable must be given since a single object can be subscribed to many different tickers at the same time. You can also supply `idstring` as an identifying string if you ever want to tick the callable at the same interval but with different arguments (args/kwargs are not used for identifying the ticker). There is also `persistent=False` if you don't want to make a ticker that don't survive a reload. If either or both `idstring` or `persistent` has been changed from their defaults, they must be supplied to the `TICKER_HANDLER.remove` call to properly identify the ticker to remove. The TickerHandler's functionality can be overloaded by modifying the Ticker class and then changing TickerPool and TickerHandler to use the custom classes ```python class MyTicker(Ticker): # [doing custom stuff] class MyTickerPool(TickerPool): ticker_class = MyTicker class MyTickerHandler(TickerHandler): ticker_pool_class = MyTickerPool ``` If one wants to duplicate TICKER_HANDLER's auto-saving feature in a custom handler one can make a custom `AT_STARTSTOP_MODULE` entry to call the handler's `save()` and `restore()` methods when the server reboots. """ import inspect from builtins import object from twisted.internet.defer import inlineCallbacks from django.core.exceptions import ObjectDoesNotExist from evennia.scripts.scripts import ExtendedLoopingCall from evennia.server.models import ServerConfig from evennia.utils.logger import log_trace, log_err from evennia.utils.dbserialize import dbserialize, dbunserialize, pack_dbobj, unpack_dbobj from evennia.utils import variable_from_module _GA = object.__getattribute__ _SA = object.__setattr__ _ERROR_ADD_TICKER = \ """TickerHandler: Tried to add an invalid ticker: {storekey} Ticker was not added.""" class Ticker(object): """ Represents a repeatedly running task that calls hooks repeatedly. Overload `_callback` to change the way it operates. """ @inlineCallbacks def _callback(self): """ This will be called repeatedly every `self.interval` seconds. `self.subscriptions` contain tuples of (obj, args, kwargs) for each subscribing object. If overloading, this callback is expected to handle all subscriptions when it is triggered. It should not return anything and should not traceback on poorly designed hooks. The callback should ideally work under @inlineCallbacks so it can yield appropriately. The _hook_key, which is passed down through the handler via kwargs is used here to identify which hook method to call. """ self._to_add = [] self._to_remove = [] self._is_ticking = True for store_key, (args, kwargs) in self.subscriptions.iteritems(): callback = yield kwargs.pop("_callback", "at_tick") obj = yield kwargs.pop("_obj", None) try: if callable(callback): # call directly yield callback(*args, **kwargs) continue # try object method if not obj or not obj.pk: # object was deleted between calls self._to_remove.append(store_key) continue else: yield _GA(obj, callback)(*args, **kwargs) except ObjectDoesNotExist: log_trace("Removing ticker.") self._to_remove.append(store_key) except Exception: log_trace() finally: # make sure to re-store kwargs["_callback"] = callback kwargs["_obj"] = obj # cleanup - we do this here to avoid changing the subscription dict while it loops self._is_ticking = False for store_key in self._to_remove: self.remove(store_key) for store_key, (args, kwargs) in self._to_add: self.add(store_key, *args, **kwargs) self._to_remove = [] self._to_add = [] def __init__(self, interval): """ Set up the ticker Args: interval (int): The stepping interval. """ self.interval = interval self.subscriptions = {} self._is_ticking = False self._to_remove = [] self._to_add = [] # set up a twisted asynchronous repeat call self.task = ExtendedLoopingCall(self._callback) def validate(self, start_delay=None): """ Start/stop the task depending on how many subscribers we have using it. Args: start_delay (int): Time to way before starting. """ subs = self.subscriptions if self.task.running: if not subs: self.task.stop() elif subs: self.task.start(self.interval, now=False, start_delay=start_delay) def add(self, store_key, *args, **kwargs): """ Sign up a subscriber to this ticker. Args: store_key (str): Unique storage hash for this ticker subscription. args (any, optional): Arguments to call the hook method with. Kwargs: _start_delay (int): If set, this will be used to delay the start of the trigger instead of `interval`. """ if self._is_ticking: # protects the subscription dict from # updating while it is looping self._to_start.append((store_key, (args, kwargs))) else: start_delay = kwargs.pop("_start_delay", None) self.subscriptions[store_key] = (args, kwargs) self.validate(start_delay=start_delay) def remove(self, store_key): """ Unsubscribe object from this ticker Args: store_key (str): Unique store key. """ if self._is_ticking: # this protects the subscription dict from # updating while it is looping self._to_remove.append(store_key) else: self.subscriptions.pop(store_key, False) self.validate() def stop(self): """ Kill the Task, regardless of subscriptions. """ self.subscriptions = {} self.validate() class TickerPool(object): """ This maintains a pool of `evennia.scripts.scripts.ExtendedLoopingCall` tasks for calling subscribed objects at given times. """ ticker_class = Ticker def __init__(self): """ Initialize the pool. """ self.tickers = {} def add(self, store_key, *args, **kwargs): """ Add new ticker subscriber. Args: store_key (str): Unique storage hash. args (any, optional): Arguments to send to the hook method. """ _, _, _, interval, _, _ = store_key if not interval: log_err(_ERROR_ADD_TICKER.format(store_key=store_key)) return if interval not in self.tickers: self.tickers[interval] = self.ticker_class(interval) self.tickers[interval].add(store_key, *args, **kwargs) def remove(self, store_key): """ Remove subscription from pool. Args: store_key (str): Unique storage hash to remove """ _, _, _, interval, _, _ = store_key if interval in self.tickers: self.tickers[interval].remove(store_key) if not self.tickers[interval]: del self.tickers[interval] def stop(self, interval=None): """ Stop all scripts in pool. This is done at server reload since restoring the pool will automatically re-populate the pool. Args: interval (int, optional): Only stop tickers with this interval. """ if interval and interval in self.tickers: self.tickers[interval].stop() else: for ticker in self.tickers.values(): ticker.stop() class TickerHandler(object): """ The Tickerhandler maintains a pool of tasks for subscribing objects to various tick rates. The pool maintains creation instructions and and re-applies them at a server restart. """ ticker_pool_class = TickerPool def __init__(self, save_name="ticker_storage"): """ Initialize handler save_name (str, optional): The name of the ServerConfig instance to store the handler state persistently. """ self.ticker_storage = {} self.save_name = save_name self.ticker_pool = self.ticker_pool_class() def _get_callback(self, callback): """ Analyze callback and determine its consituents Args: callback (function or method): This is either a stand-alone function or class method on a typeclassed entitye (that is, an entity that can be saved to the database). Returns: ret (tuple): This is a tuple of the form `(obj, path, callfunc)`, where `obj` is the database object the callback is defined on if it's a method (otherwise `None`) and vice-versa, `path` is the python-path to the stand-alone function (`None` if a method). The `callfunc` is either the name of the method to call or the callable function object itself. """ outobj, outpath, outcallfunc = None, None, None if callable(callback): if inspect.ismethod(callback): outobj = callback.im_self outcallfunc = callback.im_func.func_name elif inspect.isfunction(callback): outpath = "%s.%s" % (callback.__module__, callback.func_name) outcallfunc = callback else: raise TypeError("%s is not a callable function or method." % callback) return outobj, outpath, outcallfunc def _store_key(self, obj, path, interval, callfunc, idstring="", persistent=True): """ Tries to create a store_key for the object. Args: obj (Object, tuple or None): Subscribing object if any. If a tuple, this is a packed_obj tuple from dbserialize. path (str or None): Python-path to callable, if any. interval (int): Ticker interval. callfunc (callable or str): This is either the callable function or the name of the method to call. Note that the callable is never stored in the key; that is uniquely identified with the python-path. idstring (str, optional): Additional separator between different subscription types. persistent (bool, optional): If this ticker should survive a system shutdown or not. Returns: store_key (tuple): A tuple `(packed_obj, methodname, outpath, interval, idstring, persistent)` that uniquely identifies the ticker. Here, `packed_obj` is the unique string representation of the object or `None`. The `methodname` is the string name of the method on `packed_obj` to call, or `None` if `packed_obj` is unset. `path` is the Python-path to a non-method callable, or `None`. Finally, `interval` `idstring` and `persistent` are integers, strings and bools respectively. """ interval = int(interval) persistent = bool(persistent) packed_obj = pack_dbobj(obj) methodname = callfunc if callfunc and isinstance(callfunc, basestring) else None outpath = path if path and isinstance(path, basestring) else None return (packed_obj, methodname, outpath, interval, idstring, persistent) def save(self): """ Save ticker_storage as a serialized string into a temporary ServerConf field. Whereas saving is done on the fly, if called by server when it shuts down, the current timer of each ticker will be saved so it can start over from that point. """ if self.ticker_storage: # get the current times so the tickers can be restarted with a delay later start_delays = dict((interval, ticker.task.next_call_time()) for interval, ticker in self.ticker_pool.tickers.items()) # remove any subscriptions that lost its object in the interim to_save = {store_key: (args, kwargs) for store_key, (args, kwargs) in self.ticker_storage.items() if ((store_key[1] and ("_obj" in kwargs and kwargs["_obj"].pk) and hasattr(kwargs["_obj"], store_key[1])) or # a valid method with existing obj store_key[2])} # a path given # update the timers for the tickers for store_key, (args, kwargs) in to_save.items(): interval = store_key[1] # this is a mutable, so it's updated in-place in ticker_storage kwargs["_start_delay"] = start_delays.get(interval, None) ServerConfig.objects.conf(key=self.save_name, value=dbserialize(to_save)) else: # make sure we have nothing lingering in the database ServerConfig.objects.conf(key=self.save_name, delete=True) def restore(self, server_reload=True): """ Restore ticker_storage from database and re-initialize the handler from storage. This is triggered by the server at restart. Args: server_reload (bool, optional): If this is False, it means the server went through a cold reboot and all non-persistent tickers must be killed. """ # load stored command instructions and use them to re-initialize handler restored_tickers = ServerConfig.objects.conf(key=self.save_name) if restored_tickers: # the dbunserialize will convert all serialized dbobjs to real objects restored_tickers = dbunserialize(restored_tickers) self.ticker_storage = {} for store_key, (args, kwargs) in restored_tickers.iteritems(): try: # at this point obj is the actual object (or None) due to how # the dbunserialize works obj, callfunc, path, interval, idstring, persistent = store_key if not persistent and not server_reload: # this ticker will not be restarted continue if isinstance(callfunc, basestring) and not obj: # methods must have an existing object continue # we must rebuild the store_key here since obj must not be # stored as the object itself for the store_key to be hashable. store_key = self._store_key(obj, path, interval, callfunc, idstring, persistent) if obj and callfunc: kwargs["_callback"] = callfunc kwargs["_obj"] = obj elif path: modname, varname = path.rsplit(".", 1) callback = variable_from_module(modname, varname) kwargs["_callback"] = callback kwargs["_obj"] = None else: # Neither object nor path - discard this ticker log_err("Tickerhandler: Removing malformed ticker: %s" % str(store_key)) continue except Exception: # this suggests a malformed save or missing objects log_trace("Tickerhandler: Removing malformed ticker: %s" % str(store_key)) continue # if we get here we should create a new ticker self.ticker_storage[store_key] = (args, kwargs) self.ticker_pool.add(store_key, *args, **kwargs) def add(self, interval=60, callback=None, idstring="", persistent=True, *args, **kwargs): """ Add subscription to tickerhandler Args: interval (int, optional): Interval in seconds between calling `callable(*args, **kwargs)` callable (callable function or method, optional): This should either be a stand-alone function or a method on a typeclassed entity (that is, one that can be saved to the database). idstring (str, optional): Identifier for separating this ticker-subscription from others with the same interval. Allows for managing multiple calls with the same time interval and callback. persistent (bool, optional): A ticker will always survive a server reload. If this is unset, the ticker will be deleted by a server shutdown. args, kwargs (optional): These will be passed into the callback every time it is called. Notes: The callback will be identified by type and stored either as as combination of serialized database object + methodname or as a python-path to the module + funcname. These strings will be combined iwth `interval` and `idstring` to define a unique storage key for saving. These must thus all be supplied when wanting to modify/remove the ticker later. """ if isinstance(callback, int): raise RuntimeError("TICKER_HANDLER.add has changed: " "the interval is now the first argument, callback the second.") obj, path, callfunc = self._get_callback(callback) store_key = self._store_key(obj, path, interval, callfunc, idstring, persistent) kwargs["_obj"] = obj kwargs["_callback"] = callfunc # either method-name or callable self.ticker_storage[store_key] = (args, kwargs) self.ticker_pool.add(store_key, *args, **kwargs) self.save() def remove(self, interval=60, callback=None, idstring="", persistent=True): """ Remove object from ticker or only remove it from tickers with a given interval. Args: interval (int, optional): Interval of ticker to remove. callback (callable function or method): Either a function or the method of a typeclassed object. idstring (str, optional): Identifier id of ticker to remove. """ if isinstance(callback, int): raise RuntimeError("TICKER_HANDLER.remove has changed: " "the interval is now the first argument, callback the second.") obj, path, callfunc = self._get_callback(callback) store_key = self._store_key(obj, path, interval, callfunc, idstring, persistent) to_remove = self.ticker_storage.pop(store_key, None) if to_remove: self.ticker_pool.remove(store_key) self.save() def clear(self, interval=None): """ Stop/remove tickers from handler. Args: interval (int): Only stop tickers with this interval. Notes: This is the only supported way to kill tickers related to non-db objects. """ self.ticker_pool.stop(interval) if interval: self.ticker_storage = dict((store_key, store_key) for store_key in self.ticker_storage if store_key[1] != interval) else: self.ticker_storage = {} self.save() def all(self, interval=None): """ Get all subscriptions. Args: interval (int): Limit match to tickers with this interval. Returns: tickers (list): If `interval` was given, this is a list of tickers using that interval. tickerpool_layout (dict): If `interval` was *not* given, this is a dict {interval1: [ticker1, ticker2, ...], ...} """ if interval is None: # return dict of all, ordered by interval return dict((interval, ticker.subscriptions) for interval, ticker in self.ticker_pool.tickers.iteritems()) else: # get individual interval ticker = self.ticker_pool.tickers.get(interval, None) if ticker: return {interval: ticker.subscriptions} def all_display(self): """ Get all tickers on an easily displayable form. Returns: tickers (dict): A list of all storekeys """ store_keys = [] for ticker in self.ticker_pool.tickers.itervalues(): for (objtup, callfunc, path, interval, idstring, persistent), (args, kwargs) in ticker.subscriptions.iteritems(): store_keys.append((kwargs.get("_obj", None), callfunc, path, interval, idstring, persistent)) return store_keys # main tickerhandler TICKER_HANDLER = TickerHandler()
all_display
Get all tickers on an easily displayable form. Returns: tickers (dict): A list of all storekeys
""" TickerHandler This implements an efficient Ticker which uses a subscription model to 'tick' subscribed objects at regular intervals. The ticker mechanism is used by importing and accessing the instantiated TICKER_HANDLER instance in this module. This instance is run by the server; it will save its status across server reloads and be started automaticall on boot. Example: ```python from evennia.scripts.tickerhandler import TICKER_HANDLER # call tick myobj.at_tick(*args, **kwargs) every 15 seconds TICKER_HANDLER.add(15, myobj.at_tick, *args, **kwargs) ``` You supply the interval to tick and a callable to call regularly with any extra args/kwargs. The handler will transparently set up and add new timers behind the scenes to tick at given intervals, using a TickerPool - all callables with the same interval will share the interval ticker. To remove: ```python TICKER_HANDLER.remove(15, myobj.at_tick) ``` Both interval and callable must be given since a single object can be subscribed to many different tickers at the same time. You can also supply `idstring` as an identifying string if you ever want to tick the callable at the same interval but with different arguments (args/kwargs are not used for identifying the ticker). There is also `persistent=False` if you don't want to make a ticker that don't survive a reload. If either or both `idstring` or `persistent` has been changed from their defaults, they must be supplied to the `TICKER_HANDLER.remove` call to properly identify the ticker to remove. The TickerHandler's functionality can be overloaded by modifying the Ticker class and then changing TickerPool and TickerHandler to use the custom classes ```python class MyTicker(Ticker): # [doing custom stuff] class MyTickerPool(TickerPool): ticker_class = MyTicker class MyTickerHandler(TickerHandler): ticker_pool_class = MyTickerPool ``` If one wants to duplicate TICKER_HANDLER's auto-saving feature in a custom handler one can make a custom `AT_STARTSTOP_MODULE` entry to call the handler's `save()` and `restore()` methods when the server reboots. """ import inspect from builtins import object from twisted.internet.defer import inlineCallbacks from django.core.exceptions import ObjectDoesNotExist from evennia.scripts.scripts import ExtendedLoopingCall from evennia.server.models import ServerConfig from evennia.utils.logger import log_trace, log_err from evennia.utils.dbserialize import dbserialize, dbunserialize, pack_dbobj, unpack_dbobj from evennia.utils import variable_from_module _GA = object.__getattribute__ _SA = object.__setattr__ _ERROR_ADD_TICKER = \ """TickerHandler: Tried to add an invalid ticker: {storekey} Ticker was not added.""" class Ticker(object): """ Represents a repeatedly running task that calls hooks repeatedly. Overload `_callback` to change the way it operates. """ @inlineCallbacks def _callback(self): """ This will be called repeatedly every `self.interval` seconds. `self.subscriptions` contain tuples of (obj, args, kwargs) for each subscribing object. If overloading, this callback is expected to handle all subscriptions when it is triggered. It should not return anything and should not traceback on poorly designed hooks. The callback should ideally work under @inlineCallbacks so it can yield appropriately. The _hook_key, which is passed down through the handler via kwargs is used here to identify which hook method to call. """ self._to_add = [] self._to_remove = [] self._is_ticking = True for store_key, (args, kwargs) in self.subscriptions.iteritems(): callback = yield kwargs.pop("_callback", "at_tick") obj = yield kwargs.pop("_obj", None) try: if callable(callback): # call directly yield callback(*args, **kwargs) continue # try object method if not obj or not obj.pk: # object was deleted between calls self._to_remove.append(store_key) continue else: yield _GA(obj, callback)(*args, **kwargs) except ObjectDoesNotExist: log_trace("Removing ticker.") self._to_remove.append(store_key) except Exception: log_trace() finally: # make sure to re-store kwargs["_callback"] = callback kwargs["_obj"] = obj # cleanup - we do this here to avoid changing the subscription dict while it loops self._is_ticking = False for store_key in self._to_remove: self.remove(store_key) for store_key, (args, kwargs) in self._to_add: self.add(store_key, *args, **kwargs) self._to_remove = [] self._to_add = [] def __init__(self, interval): """ Set up the ticker Args: interval (int): The stepping interval. """ self.interval = interval self.subscriptions = {} self._is_ticking = False self._to_remove = [] self._to_add = [] # set up a twisted asynchronous repeat call self.task = ExtendedLoopingCall(self._callback) def validate(self, start_delay=None): """ Start/stop the task depending on how many subscribers we have using it. Args: start_delay (int): Time to way before starting. """ subs = self.subscriptions if self.task.running: if not subs: self.task.stop() elif subs: self.task.start(self.interval, now=False, start_delay=start_delay) def add(self, store_key, *args, **kwargs): """ Sign up a subscriber to this ticker. Args: store_key (str): Unique storage hash for this ticker subscription. args (any, optional): Arguments to call the hook method with. Kwargs: _start_delay (int): If set, this will be used to delay the start of the trigger instead of `interval`. """ if self._is_ticking: # protects the subscription dict from # updating while it is looping self._to_start.append((store_key, (args, kwargs))) else: start_delay = kwargs.pop("_start_delay", None) self.subscriptions[store_key] = (args, kwargs) self.validate(start_delay=start_delay) def remove(self, store_key): """ Unsubscribe object from this ticker Args: store_key (str): Unique store key. """ if self._is_ticking: # this protects the subscription dict from # updating while it is looping self._to_remove.append(store_key) else: self.subscriptions.pop(store_key, False) self.validate() def stop(self): """ Kill the Task, regardless of subscriptions. """ self.subscriptions = {} self.validate() class TickerPool(object): """ This maintains a pool of `evennia.scripts.scripts.ExtendedLoopingCall` tasks for calling subscribed objects at given times. """ ticker_class = Ticker def __init__(self): """ Initialize the pool. """ self.tickers = {} def add(self, store_key, *args, **kwargs): """ Add new ticker subscriber. Args: store_key (str): Unique storage hash. args (any, optional): Arguments to send to the hook method. """ _, _, _, interval, _, _ = store_key if not interval: log_err(_ERROR_ADD_TICKER.format(store_key=store_key)) return if interval not in self.tickers: self.tickers[interval] = self.ticker_class(interval) self.tickers[interval].add(store_key, *args, **kwargs) def remove(self, store_key): """ Remove subscription from pool. Args: store_key (str): Unique storage hash to remove """ _, _, _, interval, _, _ = store_key if interval in self.tickers: self.tickers[interval].remove(store_key) if not self.tickers[interval]: del self.tickers[interval] def stop(self, interval=None): """ Stop all scripts in pool. This is done at server reload since restoring the pool will automatically re-populate the pool. Args: interval (int, optional): Only stop tickers with this interval. """ if interval and interval in self.tickers: self.tickers[interval].stop() else: for ticker in self.tickers.values(): ticker.stop() class TickerHandler(object): """ The Tickerhandler maintains a pool of tasks for subscribing objects to various tick rates. The pool maintains creation instructions and and re-applies them at a server restart. """ ticker_pool_class = TickerPool def __init__(self, save_name="ticker_storage"): """ Initialize handler save_name (str, optional): The name of the ServerConfig instance to store the handler state persistently. """ self.ticker_storage = {} self.save_name = save_name self.ticker_pool = self.ticker_pool_class() def _get_callback(self, callback): """ Analyze callback and determine its consituents Args: callback (function or method): This is either a stand-alone function or class method on a typeclassed entitye (that is, an entity that can be saved to the database). Returns: ret (tuple): This is a tuple of the form `(obj, path, callfunc)`, where `obj` is the database object the callback is defined on if it's a method (otherwise `None`) and vice-versa, `path` is the python-path to the stand-alone function (`None` if a method). The `callfunc` is either the name of the method to call or the callable function object itself. """ outobj, outpath, outcallfunc = None, None, None if callable(callback): if inspect.ismethod(callback): outobj = callback.im_self outcallfunc = callback.im_func.func_name elif inspect.isfunction(callback): outpath = "%s.%s" % (callback.__module__, callback.func_name) outcallfunc = callback else: raise TypeError("%s is not a callable function or method." % callback) return outobj, outpath, outcallfunc def _store_key(self, obj, path, interval, callfunc, idstring="", persistent=True): """ Tries to create a store_key for the object. Args: obj (Object, tuple or None): Subscribing object if any. If a tuple, this is a packed_obj tuple from dbserialize. path (str or None): Python-path to callable, if any. interval (int): Ticker interval. callfunc (callable or str): This is either the callable function or the name of the method to call. Note that the callable is never stored in the key; that is uniquely identified with the python-path. idstring (str, optional): Additional separator between different subscription types. persistent (bool, optional): If this ticker should survive a system shutdown or not. Returns: store_key (tuple): A tuple `(packed_obj, methodname, outpath, interval, idstring, persistent)` that uniquely identifies the ticker. Here, `packed_obj` is the unique string representation of the object or `None`. The `methodname` is the string name of the method on `packed_obj` to call, or `None` if `packed_obj` is unset. `path` is the Python-path to a non-method callable, or `None`. Finally, `interval` `idstring` and `persistent` are integers, strings and bools respectively. """ interval = int(interval) persistent = bool(persistent) packed_obj = pack_dbobj(obj) methodname = callfunc if callfunc and isinstance(callfunc, basestring) else None outpath = path if path and isinstance(path, basestring) else None return (packed_obj, methodname, outpath, interval, idstring, persistent) def save(self): """ Save ticker_storage as a serialized string into a temporary ServerConf field. Whereas saving is done on the fly, if called by server when it shuts down, the current timer of each ticker will be saved so it can start over from that point. """ if self.ticker_storage: # get the current times so the tickers can be restarted with a delay later start_delays = dict((interval, ticker.task.next_call_time()) for interval, ticker in self.ticker_pool.tickers.items()) # remove any subscriptions that lost its object in the interim to_save = {store_key: (args, kwargs) for store_key, (args, kwargs) in self.ticker_storage.items() if ((store_key[1] and ("_obj" in kwargs and kwargs["_obj"].pk) and hasattr(kwargs["_obj"], store_key[1])) or # a valid method with existing obj store_key[2])} # a path given # update the timers for the tickers for store_key, (args, kwargs) in to_save.items(): interval = store_key[1] # this is a mutable, so it's updated in-place in ticker_storage kwargs["_start_delay"] = start_delays.get(interval, None) ServerConfig.objects.conf(key=self.save_name, value=dbserialize(to_save)) else: # make sure we have nothing lingering in the database ServerConfig.objects.conf(key=self.save_name, delete=True) def restore(self, server_reload=True): """ Restore ticker_storage from database and re-initialize the handler from storage. This is triggered by the server at restart. Args: server_reload (bool, optional): If this is False, it means the server went through a cold reboot and all non-persistent tickers must be killed. """ # load stored command instructions and use them to re-initialize handler restored_tickers = ServerConfig.objects.conf(key=self.save_name) if restored_tickers: # the dbunserialize will convert all serialized dbobjs to real objects restored_tickers = dbunserialize(restored_tickers) self.ticker_storage = {} for store_key, (args, kwargs) in restored_tickers.iteritems(): try: # at this point obj is the actual object (or None) due to how # the dbunserialize works obj, callfunc, path, interval, idstring, persistent = store_key if not persistent and not server_reload: # this ticker will not be restarted continue if isinstance(callfunc, basestring) and not obj: # methods must have an existing object continue # we must rebuild the store_key here since obj must not be # stored as the object itself for the store_key to be hashable. store_key = self._store_key(obj, path, interval, callfunc, idstring, persistent) if obj and callfunc: kwargs["_callback"] = callfunc kwargs["_obj"] = obj elif path: modname, varname = path.rsplit(".", 1) callback = variable_from_module(modname, varname) kwargs["_callback"] = callback kwargs["_obj"] = None else: # Neither object nor path - discard this ticker log_err("Tickerhandler: Removing malformed ticker: %s" % str(store_key)) continue except Exception: # this suggests a malformed save or missing objects log_trace("Tickerhandler: Removing malformed ticker: %s" % str(store_key)) continue # if we get here we should create a new ticker self.ticker_storage[store_key] = (args, kwargs) self.ticker_pool.add(store_key, *args, **kwargs) def add(self, interval=60, callback=None, idstring="", persistent=True, *args, **kwargs): """ Add subscription to tickerhandler Args: interval (int, optional): Interval in seconds between calling `callable(*args, **kwargs)` callable (callable function or method, optional): This should either be a stand-alone function or a method on a typeclassed entity (that is, one that can be saved to the database). idstring (str, optional): Identifier for separating this ticker-subscription from others with the same interval. Allows for managing multiple calls with the same time interval and callback. persistent (bool, optional): A ticker will always survive a server reload. If this is unset, the ticker will be deleted by a server shutdown. args, kwargs (optional): These will be passed into the callback every time it is called. Notes: The callback will be identified by type and stored either as as combination of serialized database object + methodname or as a python-path to the module + funcname. These strings will be combined iwth `interval` and `idstring` to define a unique storage key for saving. These must thus all be supplied when wanting to modify/remove the ticker later. """ if isinstance(callback, int): raise RuntimeError("TICKER_HANDLER.add has changed: " "the interval is now the first argument, callback the second.") obj, path, callfunc = self._get_callback(callback) store_key = self._store_key(obj, path, interval, callfunc, idstring, persistent) kwargs["_obj"] = obj kwargs["_callback"] = callfunc # either method-name or callable self.ticker_storage[store_key] = (args, kwargs) self.ticker_pool.add(store_key, *args, **kwargs) self.save() def remove(self, interval=60, callback=None, idstring="", persistent=True): """ Remove object from ticker or only remove it from tickers with a given interval. Args: interval (int, optional): Interval of ticker to remove. callback (callable function or method): Either a function or the method of a typeclassed object. idstring (str, optional): Identifier id of ticker to remove. """ if isinstance(callback, int): raise RuntimeError("TICKER_HANDLER.remove has changed: " "the interval is now the first argument, callback the second.") obj, path, callfunc = self._get_callback(callback) store_key = self._store_key(obj, path, interval, callfunc, idstring, persistent) to_remove = self.ticker_storage.pop(store_key, None) if to_remove: self.ticker_pool.remove(store_key) self.save() def clear(self, interval=None): """ Stop/remove tickers from handler. Args: interval (int): Only stop tickers with this interval. Notes: This is the only supported way to kill tickers related to non-db objects. """ self.ticker_pool.stop(interval) if interval: self.ticker_storage = dict((store_key, store_key) for store_key in self.ticker_storage if store_key[1] != interval) else: self.ticker_storage = {} self.save() def all(self, interval=None): """ Get all subscriptions. Args: interval (int): Limit match to tickers with this interval. Returns: tickers (list): If `interval` was given, this is a list of tickers using that interval. tickerpool_layout (dict): If `interval` was *not* given, this is a dict {interval1: [ticker1, ticker2, ...], ...} """ if interval is None: # return dict of all, ordered by interval return dict((interval, ticker.subscriptions) for interval, ticker in self.ticker_pool.tickers.iteritems()) else: # get individual interval ticker = self.ticker_pool.tickers.get(interval, None) if ticker: return {interval: ticker.subscriptions} # MASKED: all_display function (lines 564-576) # main tickerhandler TICKER_HANDLER = TickerHandler()
def all_display(self): """ Get all tickers on an easily displayable form. Returns: tickers (dict): A list of all storekeys """ store_keys = [] for ticker in self.ticker_pool.tickers.itervalues(): for (objtup, callfunc, path, interval, idstring, persistent), (args, kwargs) in ticker.subscriptions.iteritems(): store_keys.append((kwargs.get("_obj", None), callfunc, path, interval, idstring, persistent)) return store_keys
564
576
""" TickerHandler This implements an efficient Ticker which uses a subscription model to 'tick' subscribed objects at regular intervals. The ticker mechanism is used by importing and accessing the instantiated TICKER_HANDLER instance in this module. This instance is run by the server; it will save its status across server reloads and be started automaticall on boot. Example: ```python from evennia.scripts.tickerhandler import TICKER_HANDLER # call tick myobj.at_tick(*args, **kwargs) every 15 seconds TICKER_HANDLER.add(15, myobj.at_tick, *args, **kwargs) ``` You supply the interval to tick and a callable to call regularly with any extra args/kwargs. The handler will transparently set up and add new timers behind the scenes to tick at given intervals, using a TickerPool - all callables with the same interval will share the interval ticker. To remove: ```python TICKER_HANDLER.remove(15, myobj.at_tick) ``` Both interval and callable must be given since a single object can be subscribed to many different tickers at the same time. You can also supply `idstring` as an identifying string if you ever want to tick the callable at the same interval but with different arguments (args/kwargs are not used for identifying the ticker). There is also `persistent=False` if you don't want to make a ticker that don't survive a reload. If either or both `idstring` or `persistent` has been changed from their defaults, they must be supplied to the `TICKER_HANDLER.remove` call to properly identify the ticker to remove. The TickerHandler's functionality can be overloaded by modifying the Ticker class and then changing TickerPool and TickerHandler to use the custom classes ```python class MyTicker(Ticker): # [doing custom stuff] class MyTickerPool(TickerPool): ticker_class = MyTicker class MyTickerHandler(TickerHandler): ticker_pool_class = MyTickerPool ``` If one wants to duplicate TICKER_HANDLER's auto-saving feature in a custom handler one can make a custom `AT_STARTSTOP_MODULE` entry to call the handler's `save()` and `restore()` methods when the server reboots. """ import inspect from builtins import object from twisted.internet.defer import inlineCallbacks from django.core.exceptions import ObjectDoesNotExist from evennia.scripts.scripts import ExtendedLoopingCall from evennia.server.models import ServerConfig from evennia.utils.logger import log_trace, log_err from evennia.utils.dbserialize import dbserialize, dbunserialize, pack_dbobj, unpack_dbobj from evennia.utils import variable_from_module _GA = object.__getattribute__ _SA = object.__setattr__ _ERROR_ADD_TICKER = \ """TickerHandler: Tried to add an invalid ticker: {storekey} Ticker was not added.""" class Ticker(object): """ Represents a repeatedly running task that calls hooks repeatedly. Overload `_callback` to change the way it operates. """ @inlineCallbacks def _callback(self): """ This will be called repeatedly every `self.interval` seconds. `self.subscriptions` contain tuples of (obj, args, kwargs) for each subscribing object. If overloading, this callback is expected to handle all subscriptions when it is triggered. It should not return anything and should not traceback on poorly designed hooks. The callback should ideally work under @inlineCallbacks so it can yield appropriately. The _hook_key, which is passed down through the handler via kwargs is used here to identify which hook method to call. """ self._to_add = [] self._to_remove = [] self._is_ticking = True for store_key, (args, kwargs) in self.subscriptions.iteritems(): callback = yield kwargs.pop("_callback", "at_tick") obj = yield kwargs.pop("_obj", None) try: if callable(callback): # call directly yield callback(*args, **kwargs) continue # try object method if not obj or not obj.pk: # object was deleted between calls self._to_remove.append(store_key) continue else: yield _GA(obj, callback)(*args, **kwargs) except ObjectDoesNotExist: log_trace("Removing ticker.") self._to_remove.append(store_key) except Exception: log_trace() finally: # make sure to re-store kwargs["_callback"] = callback kwargs["_obj"] = obj # cleanup - we do this here to avoid changing the subscription dict while it loops self._is_ticking = False for store_key in self._to_remove: self.remove(store_key) for store_key, (args, kwargs) in self._to_add: self.add(store_key, *args, **kwargs) self._to_remove = [] self._to_add = [] def __init__(self, interval): """ Set up the ticker Args: interval (int): The stepping interval. """ self.interval = interval self.subscriptions = {} self._is_ticking = False self._to_remove = [] self._to_add = [] # set up a twisted asynchronous repeat call self.task = ExtendedLoopingCall(self._callback) def validate(self, start_delay=None): """ Start/stop the task depending on how many subscribers we have using it. Args: start_delay (int): Time to way before starting. """ subs = self.subscriptions if self.task.running: if not subs: self.task.stop() elif subs: self.task.start(self.interval, now=False, start_delay=start_delay) def add(self, store_key, *args, **kwargs): """ Sign up a subscriber to this ticker. Args: store_key (str): Unique storage hash for this ticker subscription. args (any, optional): Arguments to call the hook method with. Kwargs: _start_delay (int): If set, this will be used to delay the start of the trigger instead of `interval`. """ if self._is_ticking: # protects the subscription dict from # updating while it is looping self._to_start.append((store_key, (args, kwargs))) else: start_delay = kwargs.pop("_start_delay", None) self.subscriptions[store_key] = (args, kwargs) self.validate(start_delay=start_delay) def remove(self, store_key): """ Unsubscribe object from this ticker Args: store_key (str): Unique store key. """ if self._is_ticking: # this protects the subscription dict from # updating while it is looping self._to_remove.append(store_key) else: self.subscriptions.pop(store_key, False) self.validate() def stop(self): """ Kill the Task, regardless of subscriptions. """ self.subscriptions = {} self.validate() class TickerPool(object): """ This maintains a pool of `evennia.scripts.scripts.ExtendedLoopingCall` tasks for calling subscribed objects at given times. """ ticker_class = Ticker def __init__(self): """ Initialize the pool. """ self.tickers = {} def add(self, store_key, *args, **kwargs): """ Add new ticker subscriber. Args: store_key (str): Unique storage hash. args (any, optional): Arguments to send to the hook method. """ _, _, _, interval, _, _ = store_key if not interval: log_err(_ERROR_ADD_TICKER.format(store_key=store_key)) return if interval not in self.tickers: self.tickers[interval] = self.ticker_class(interval) self.tickers[interval].add(store_key, *args, **kwargs) def remove(self, store_key): """ Remove subscription from pool. Args: store_key (str): Unique storage hash to remove """ _, _, _, interval, _, _ = store_key if interval in self.tickers: self.tickers[interval].remove(store_key) if not self.tickers[interval]: del self.tickers[interval] def stop(self, interval=None): """ Stop all scripts in pool. This is done at server reload since restoring the pool will automatically re-populate the pool. Args: interval (int, optional): Only stop tickers with this interval. """ if interval and interval in self.tickers: self.tickers[interval].stop() else: for ticker in self.tickers.values(): ticker.stop() class TickerHandler(object): """ The Tickerhandler maintains a pool of tasks for subscribing objects to various tick rates. The pool maintains creation instructions and and re-applies them at a server restart. """ ticker_pool_class = TickerPool def __init__(self, save_name="ticker_storage"): """ Initialize handler save_name (str, optional): The name of the ServerConfig instance to store the handler state persistently. """ self.ticker_storage = {} self.save_name = save_name self.ticker_pool = self.ticker_pool_class() def _get_callback(self, callback): """ Analyze callback and determine its consituents Args: callback (function or method): This is either a stand-alone function or class method on a typeclassed entitye (that is, an entity that can be saved to the database). Returns: ret (tuple): This is a tuple of the form `(obj, path, callfunc)`, where `obj` is the database object the callback is defined on if it's a method (otherwise `None`) and vice-versa, `path` is the python-path to the stand-alone function (`None` if a method). The `callfunc` is either the name of the method to call or the callable function object itself. """ outobj, outpath, outcallfunc = None, None, None if callable(callback): if inspect.ismethod(callback): outobj = callback.im_self outcallfunc = callback.im_func.func_name elif inspect.isfunction(callback): outpath = "%s.%s" % (callback.__module__, callback.func_name) outcallfunc = callback else: raise TypeError("%s is not a callable function or method." % callback) return outobj, outpath, outcallfunc def _store_key(self, obj, path, interval, callfunc, idstring="", persistent=True): """ Tries to create a store_key for the object. Args: obj (Object, tuple or None): Subscribing object if any. If a tuple, this is a packed_obj tuple from dbserialize. path (str or None): Python-path to callable, if any. interval (int): Ticker interval. callfunc (callable or str): This is either the callable function or the name of the method to call. Note that the callable is never stored in the key; that is uniquely identified with the python-path. idstring (str, optional): Additional separator between different subscription types. persistent (bool, optional): If this ticker should survive a system shutdown or not. Returns: store_key (tuple): A tuple `(packed_obj, methodname, outpath, interval, idstring, persistent)` that uniquely identifies the ticker. Here, `packed_obj` is the unique string representation of the object or `None`. The `methodname` is the string name of the method on `packed_obj` to call, or `None` if `packed_obj` is unset. `path` is the Python-path to a non-method callable, or `None`. Finally, `interval` `idstring` and `persistent` are integers, strings and bools respectively. """ interval = int(interval) persistent = bool(persistent) packed_obj = pack_dbobj(obj) methodname = callfunc if callfunc and isinstance(callfunc, basestring) else None outpath = path if path and isinstance(path, basestring) else None return (packed_obj, methodname, outpath, interval, idstring, persistent) def save(self): """ Save ticker_storage as a serialized string into a temporary ServerConf field. Whereas saving is done on the fly, if called by server when it shuts down, the current timer of each ticker will be saved so it can start over from that point. """ if self.ticker_storage: # get the current times so the tickers can be restarted with a delay later start_delays = dict((interval, ticker.task.next_call_time()) for interval, ticker in self.ticker_pool.tickers.items()) # remove any subscriptions that lost its object in the interim to_save = {store_key: (args, kwargs) for store_key, (args, kwargs) in self.ticker_storage.items() if ((store_key[1] and ("_obj" in kwargs and kwargs["_obj"].pk) and hasattr(kwargs["_obj"], store_key[1])) or # a valid method with existing obj store_key[2])} # a path given # update the timers for the tickers for store_key, (args, kwargs) in to_save.items(): interval = store_key[1] # this is a mutable, so it's updated in-place in ticker_storage kwargs["_start_delay"] = start_delays.get(interval, None) ServerConfig.objects.conf(key=self.save_name, value=dbserialize(to_save)) else: # make sure we have nothing lingering in the database ServerConfig.objects.conf(key=self.save_name, delete=True) def restore(self, server_reload=True): """ Restore ticker_storage from database and re-initialize the handler from storage. This is triggered by the server at restart. Args: server_reload (bool, optional): If this is False, it means the server went through a cold reboot and all non-persistent tickers must be killed. """ # load stored command instructions and use them to re-initialize handler restored_tickers = ServerConfig.objects.conf(key=self.save_name) if restored_tickers: # the dbunserialize will convert all serialized dbobjs to real objects restored_tickers = dbunserialize(restored_tickers) self.ticker_storage = {} for store_key, (args, kwargs) in restored_tickers.iteritems(): try: # at this point obj is the actual object (or None) due to how # the dbunserialize works obj, callfunc, path, interval, idstring, persistent = store_key if not persistent and not server_reload: # this ticker will not be restarted continue if isinstance(callfunc, basestring) and not obj: # methods must have an existing object continue # we must rebuild the store_key here since obj must not be # stored as the object itself for the store_key to be hashable. store_key = self._store_key(obj, path, interval, callfunc, idstring, persistent) if obj and callfunc: kwargs["_callback"] = callfunc kwargs["_obj"] = obj elif path: modname, varname = path.rsplit(".", 1) callback = variable_from_module(modname, varname) kwargs["_callback"] = callback kwargs["_obj"] = None else: # Neither object nor path - discard this ticker log_err("Tickerhandler: Removing malformed ticker: %s" % str(store_key)) continue except Exception: # this suggests a malformed save or missing objects log_trace("Tickerhandler: Removing malformed ticker: %s" % str(store_key)) continue # if we get here we should create a new ticker self.ticker_storage[store_key] = (args, kwargs) self.ticker_pool.add(store_key, *args, **kwargs) def add(self, interval=60, callback=None, idstring="", persistent=True, *args, **kwargs): """ Add subscription to tickerhandler Args: interval (int, optional): Interval in seconds between calling `callable(*args, **kwargs)` callable (callable function or method, optional): This should either be a stand-alone function or a method on a typeclassed entity (that is, one that can be saved to the database). idstring (str, optional): Identifier for separating this ticker-subscription from others with the same interval. Allows for managing multiple calls with the same time interval and callback. persistent (bool, optional): A ticker will always survive a server reload. If this is unset, the ticker will be deleted by a server shutdown. args, kwargs (optional): These will be passed into the callback every time it is called. Notes: The callback will be identified by type and stored either as as combination of serialized database object + methodname or as a python-path to the module + funcname. These strings will be combined iwth `interval` and `idstring` to define a unique storage key for saving. These must thus all be supplied when wanting to modify/remove the ticker later. """ if isinstance(callback, int): raise RuntimeError("TICKER_HANDLER.add has changed: " "the interval is now the first argument, callback the second.") obj, path, callfunc = self._get_callback(callback) store_key = self._store_key(obj, path, interval, callfunc, idstring, persistent) kwargs["_obj"] = obj kwargs["_callback"] = callfunc # either method-name or callable self.ticker_storage[store_key] = (args, kwargs) self.ticker_pool.add(store_key, *args, **kwargs) self.save() def remove(self, interval=60, callback=None, idstring="", persistent=True): """ Remove object from ticker or only remove it from tickers with a given interval. Args: interval (int, optional): Interval of ticker to remove. callback (callable function or method): Either a function or the method of a typeclassed object. idstring (str, optional): Identifier id of ticker to remove. """ if isinstance(callback, int): raise RuntimeError("TICKER_HANDLER.remove has changed: " "the interval is now the first argument, callback the second.") obj, path, callfunc = self._get_callback(callback) store_key = self._store_key(obj, path, interval, callfunc, idstring, persistent) to_remove = self.ticker_storage.pop(store_key, None) if to_remove: self.ticker_pool.remove(store_key) self.save() def clear(self, interval=None): """ Stop/remove tickers from handler. Args: interval (int): Only stop tickers with this interval. Notes: This is the only supported way to kill tickers related to non-db objects. """ self.ticker_pool.stop(interval) if interval: self.ticker_storage = dict((store_key, store_key) for store_key in self.ticker_storage if store_key[1] != interval) else: self.ticker_storage = {} self.save() def all(self, interval=None): """ Get all subscriptions. Args: interval (int): Limit match to tickers with this interval. Returns: tickers (list): If `interval` was given, this is a list of tickers using that interval. tickerpool_layout (dict): If `interval` was *not* given, this is a dict {interval1: [ticker1, ticker2, ...], ...} """ if interval is None: # return dict of all, ordered by interval return dict((interval, ticker.subscriptions) for interval, ticker in self.ticker_pool.tickers.iteritems()) else: # get individual interval ticker = self.ticker_pool.tickers.get(interval, None) if ticker: return {interval: ticker.subscriptions} def all_display(self): """ Get all tickers on an easily displayable form. Returns: tickers (dict): A list of all storekeys """ store_keys = [] for ticker in self.ticker_pool.tickers.itervalues(): for (objtup, callfunc, path, interval, idstring, persistent), (args, kwargs) in ticker.subscriptions.iteritems(): store_keys.append((kwargs.get("_obj", None), callfunc, path, interval, idstring, persistent)) return store_keys # main tickerhandler TICKER_HANDLER = TickerHandler()
size_num_grads
Count total size of all gradient arrays of a given link Args: link (chainer.link.Link): Target link object.
import multiprocessing import warnings import six from chainer.backends import cuda from chainer.dataset import convert from chainer import reporter from chainer.training.updaters import standard_updater try: from cupy.cuda import nccl _available = True except Exception: _available = False import numpy class _Worker(multiprocessing.Process): def __init__(self, proc_id, pipe, master): super(_Worker, self).__init__() self.proc_id = proc_id self.pipe = pipe self.converter = master.converter self.model = master._master self.device = master._devices[proc_id] self.iterator = master._mpu_iterators[proc_id] self.n_devices = len(master._devices) def setup(self): _, comm_id = self.pipe.recv() self.comm = nccl.NcclCommunicator(self.n_devices, comm_id, self.proc_id) self.model.to_gpu(self.device) self.reporter = reporter.Reporter() self.reporter.add_observer('main', self.model) self.reporter.add_observers('main', self.model.namedlinks(skipself=True)) def run(self): dev = cuda.Device(self.device) dev.use() self.setup() while True: job, data = self.pipe.recv() if job == 'finalize': dev.synchronize() break if job == 'update': # For reducing memory self.model.cleargrads() batch = self.converter(self.iterator.next(), self.device) with self.reporter.scope({}): # pass dummy observation loss = _calc_loss(self.model, batch) self.model.cleargrads() loss.backward() del loss gg = gather_grads(self.model) nccl_data_type = _get_nccl_data_type(gg.dtype) null_stream = cuda.Stream.null self.comm.reduce(gg.data.ptr, gg.data.ptr, gg.size, nccl_data_type, nccl.NCCL_SUM, 0, null_stream.ptr) del gg self.model.cleargrads() gp = gather_params(self.model) nccl_data_type = _get_nccl_data_type(gp.dtype) self.comm.bcast(gp.data.ptr, gp.size, nccl_data_type, 0, null_stream.ptr) scatter_params(self.model, gp) del gp class MultiprocessParallelUpdater(standard_updater.StandardUpdater): """Implementation of a multiprocess parallel GPU Updater. This is an implementation of :class:`Updater` that uses multiple GPUs with multi-process data parallelism. It uses Nvidia NCCL for communication between multiple GPUs. It behaves similarly to :class:`~chainer.training.updaters.StandardUpdater`. The update routine is modified to support data-parallel computation on multiple GPUs in one machine. It is based on synchronous parallel SGD: it parallelizes the gradient computation over a mini-batch, and updates the parameters only in the main device. It does not transfer the values collected by :class:`Reporter` in the sub devices to the main device. So you can only see the reported values in the main device. Args: iterators: List of dataset iterator for the training dataset. The number of the iterators must be same to the number of GPUs you use. optimizer: Optimizer to update parameters. The model should be attached to the optimizer. converter: Converter function to build input arrays. Each batch extracted by the iterator is split equally between the devices and then passed with corresponding ``device`` option to this function. :func:`~chainer.dataset.concat_examples` is used by default. devices: Dictionary or list of devices to which the training data is sent. The master device will be the first one in the list or the value attached to the key ``'main'``. auto_new_epoch (bool): If ``True``, :meth:`~chainer.Optimizer.new_epoch` of the main optimizer is automatically called when the ``is_new_epoch`` attribute of the main iterator is ``True``. """ def __init__(self, iterators, optimizer, converter=convert.concat_examples, devices=None, auto_new_epoch=True): if not MultiprocessParallelUpdater.available(): raise Exception( 'NCCL is not enabled. MultiprocessParallelUpdater ' 'requires NCCL.\n' 'Please reinstall CuPy after you install NCCL.\n' '(see https://docs-cupy.chainer.org/en/latest/install.html)') try: cuda.cupy.cuda.driver.ctxGetCurrent() _cuda_initialized = True except cuda.cupy.cuda.driver.CUDADriverError: # The context is not initialized, it will be fine. _cuda_initialized = False if _cuda_initialized: raise RuntimeError( 'The CUDA context has been already initialized. ' 'MultiprocessParallelUpdater assumes the context is ' 'uninitialized. Please do not call CUDA API before ' 'MultiprocessParallelUpdater creates processes.') assert len(iterators) == len(devices) for iterator in iterators[1:]: assert len(iterator.dataset) == len(iterators[0].dataset) # Correct optimizer parameters for new minibatch size optim = optimizer.__class__.__name__ if optim in ('Adam', 'AdaGrad', 'RMSprop'): optimizer.eps *= len(devices) warnings.warn('optimizer.eps is changed to {} ' 'by MultiprocessParallelUpdater for new batch size.'. format(optimizer.eps)) elif optim in ('RMSpropGraves', 'AdaDelta'): optimizer.eps *= len(devices) ** 2 # not quite right for AdaDelta warnings.warn('optimizer.eps is changed to {} ' 'by MultiprocessParallelUpdater for new batch size.'. format(optimizer.eps)) elif hasattr(optimizer, 'lr'): optimizer.lr /= len(devices) warnings.warn('optimizer.lr is changed to {} ' 'by MultiprocessParallelUpdater for new batch size.'. format(optimizer.lr)) super(MultiprocessParallelUpdater, self).__init__( iterator=iterators[0], optimizer=optimizer, converter=converter, auto_new_epoch=auto_new_epoch, ) if isinstance(devices, dict): devices = devices.copy() main = devices.pop('main') devices = list(six.itervalues(devices)) devices = [main] + devices elif isinstance(devices, (list, tuple)): devices = list(devices) else: raise ValueError( 'devices argument should be either dict, list or tuple,' ' but {} was given.'.format(type(devices))) if devices is None or any(device is None for device in devices): raise ValueError('must specify GPU devices') self._master = optimizer.target self._devices = devices self._mpu_iterators = iterators self._initialized = False self._pipes = [] self._workers = [] self.comm = None @staticmethod def available(): return _available def _send_message(self, message): for pipe in self._pipes: pipe.send(message) def setup_workers(self): if self._initialized: return self._initialized = True self._master.cleargrads() for i in six.moves.range(1, len(self._devices)): pipe, worker_end = multiprocessing.Pipe() worker = _Worker(i, worker_end, self) worker.start() self._workers.append(worker) self._pipes.append(pipe) with cuda.Device(self._devices[0]): self._master.to_gpu(self._devices[0]) if len(self._devices) > 1: comm_id = nccl.get_unique_id() self._send_message(('set comm_id', comm_id)) self.comm = nccl.NcclCommunicator(len(self._devices), comm_id, 0) def update_core(self): self.setup_workers() self._send_message(('update', None)) with cuda.Device(self._devices[0]): # For reducing memory self._master.cleargrads() optimizer = self.get_optimizer('main') iterator = self.get_iterator('main') batch = iterator.next() batch = self.converter(batch, self._devices[0]) loss = _calc_loss(self._master, batch) self._master.cleargrads() loss.backward() # NCCL: reduce grads null_stream = cuda.Stream.null if self.comm is not None: gg = gather_grads(self._master) nccl_data_type = _get_nccl_data_type(gg.dtype) self.comm.reduce(gg.data.ptr, gg.data.ptr, gg.size, nccl_data_type, nccl.NCCL_SUM, 0, null_stream.ptr) scatter_grads(self._master, gg) del gg optimizer.update() if self.comm is not None: gp = gather_params(self._master) nccl_data_type = _get_nccl_data_type(gp.dtype) self.comm.bcast(gp.data.ptr, gp.size, nccl_data_type, 0, null_stream.ptr) if self.auto_new_epoch and iterator.is_new_epoch: optimizer.new_epoch(auto=True) def finalize(self): self._send_message(('finalize', None)) for worker in self._workers: worker.join() def _calc_loss(model, in_arrays): if isinstance(in_arrays, tuple): return model(*in_arrays) elif isinstance(in_arrays, dict): return model(**in_arrays) else: return model(in_arrays) # MASKED: size_num_grads function (lines 276-289) def _memcpy_gather(): return cuda.elementwise( 'raw T ptrs, raw X dtypes, raw Y info', 'raw float32 dst', ''' int id_min = id_pre; int id_max = num_src; while (id_max - id_min > 1) { int id = (id_max + id_min) / 2; if (i < info[id]) id_max = id; else id_min = id; } int id = id_min; int i_dst = i; int i_src = i; if (id > 0) i_src -= info[id]; dst[i_dst] = 0; if (ptrs[id] != NULL) { if (dtypes[id] == 0) { // fp32 float *src = reinterpret_cast<float *>(ptrs[id]); dst[i_dst] = src[i_src]; } else { // fp16 float16 *src = reinterpret_cast<float16 *>(ptrs[id]); dst[i_dst] = static_cast<float>(src[i_src]); } } id_pre = id; ''', '_memcpy_gather', loop_prep=''' int num_src = info[0]; int id_pre = 0; ''') def _gather(link, target): size, num = size_num_grads(link) ptrs = numpy.empty(num, dtype=numpy.uint64) dtypes = numpy.empty(num, dtype=numpy.int8) info = numpy.empty(num + 1, dtype=numpy.int32) info[0] = 0 i = 0 for _, param in sorted(link.namedparams()): if param.size == 0: continue ptrs[i] = 0 # NULL pointer d = getattr(param, target) if d is not None: ptrs[i] = d.data.ptr dtypes[i] = 0 # fp32 if param.dtype == numpy.float16: dtypes[i] = 1 # fp16 info[i + 1] = info[i] + param.size i += 1 info[0] = num ptrs = cuda.to_gpu(ptrs) dtypes = cuda.to_gpu(dtypes) info = cuda.to_gpu(info) return _memcpy_gather()(ptrs, dtypes, info, size=size) def gather_grads(link): """Put together all gradient arrays and make a single array Args: link (chainer.link.Link): Target link object. Return: cupy.ndarray """ if link.xp is numpy: raise RuntimeError('gather_grads works only on GPU.') return _gather(link, 'grad') def gather_params(link): """Put together all gradient arrays and make a single array Args: link (chainer.link.Link): Target link object. Return: cupy.ndarray """ if link.xp is numpy: raise RuntimeError('Link.gather_params works only on GPU.') return _gather(link, 'data') def _memcpy_scatter(): return cuda.elementwise( 'raw T ptrs, raw X dtypes, raw Y info, raw float32 array', '', ''' int id_min = id_pre; int id_max = num_src; while (id_max - id_min > 1) { int id = (id_max + id_min) / 2; if (i < info[id]) id_max = id; else id_min = id; } int id = id_min; int i_src = i; int i_dst = i; if (id > 0) i_dst -= info[id]; if (ptrs[id] != NULL) { if (dtypes[id] == 0) { // fp32 float *dst = reinterpret_cast<float *>(ptrs[id]); dst[i_dst] = array[i_src]; } else { // fp16 float16 *dst = reinterpret_cast<float16 *>(ptrs[id]); dst[i_dst] = static_cast<float16>(array[i_src]); } } id_pre = id; ''', '_memcpy_scatter', loop_prep=''' int num_src = info[0]; int id_pre = 0; ''') def _scatter(link, array, target): size, num = size_num_grads(link) ptrs = numpy.zeros(num, dtype=numpy.uint64) dtypes = numpy.zeros(num, dtype=numpy.int8) info = numpy.zeros(num + 1, dtype=numpy.int32) info[0] = 0 i = 0 for _, param in sorted(link.namedparams()): if param.size == 0: continue ptrs[i] = 0 # NULL pointer d = getattr(param, target) if d is None: d = cuda.cupy.zeros(param.shape, dtype=param.dtype) setattr(param, target, d) ptrs[i] = d.data.ptr dtypes[i] = 0 # fp32 if param.dtype == numpy.float16: dtypes[i] = 1 # fp16 info[i + 1] = info[i] + param.size i += 1 if i != num: raise() info[0] = num ptrs = cuda.to_gpu(ptrs) dtypes = cuda.to_gpu(dtypes) info = cuda.to_gpu(info) return _memcpy_scatter()(ptrs, dtypes, info, array, size=size) def scatter_grads(link, array): """Put back contents of the specified array to the related gradient arrays Args: link (chainer.link.Link): Target link object. array (cupy.ndarray): gathered array created by gather_grads() """ return _scatter(link, array, 'grad') def scatter_params(link, array): """Put back contents of the specified array to the related gradient arrays Args: link (chainer.link.Link): Target link object. array (cupy.ndarray): gathered array created by gather_params() """ return _scatter(link, array, 'data') def _get_nccl_data_type(dtype): """Get data type for NCCL""" if dtype == numpy.float32: nccl_data_type = nccl.NCCL_FLOAT elif dtype == numpy.float16: nccl_data_type = nccl.NCCL_HALF elif dtype == numpy.float64: nccl_data_type = nccl.NCCL_DOUBLE else: raise RuntimeError('Unexpected data type:{}'.format(dtype)) return nccl_data_type
def size_num_grads(link): """Count total size of all gradient arrays of a given link Args: link (chainer.link.Link): Target link object. """ size = 0 num = 0 for param in link.params(): if param.size == 0: continue size += param.size num += 1 return size, num
276
289
import multiprocessing import warnings import six from chainer.backends import cuda from chainer.dataset import convert from chainer import reporter from chainer.training.updaters import standard_updater try: from cupy.cuda import nccl _available = True except Exception: _available = False import numpy class _Worker(multiprocessing.Process): def __init__(self, proc_id, pipe, master): super(_Worker, self).__init__() self.proc_id = proc_id self.pipe = pipe self.converter = master.converter self.model = master._master self.device = master._devices[proc_id] self.iterator = master._mpu_iterators[proc_id] self.n_devices = len(master._devices) def setup(self): _, comm_id = self.pipe.recv() self.comm = nccl.NcclCommunicator(self.n_devices, comm_id, self.proc_id) self.model.to_gpu(self.device) self.reporter = reporter.Reporter() self.reporter.add_observer('main', self.model) self.reporter.add_observers('main', self.model.namedlinks(skipself=True)) def run(self): dev = cuda.Device(self.device) dev.use() self.setup() while True: job, data = self.pipe.recv() if job == 'finalize': dev.synchronize() break if job == 'update': # For reducing memory self.model.cleargrads() batch = self.converter(self.iterator.next(), self.device) with self.reporter.scope({}): # pass dummy observation loss = _calc_loss(self.model, batch) self.model.cleargrads() loss.backward() del loss gg = gather_grads(self.model) nccl_data_type = _get_nccl_data_type(gg.dtype) null_stream = cuda.Stream.null self.comm.reduce(gg.data.ptr, gg.data.ptr, gg.size, nccl_data_type, nccl.NCCL_SUM, 0, null_stream.ptr) del gg self.model.cleargrads() gp = gather_params(self.model) nccl_data_type = _get_nccl_data_type(gp.dtype) self.comm.bcast(gp.data.ptr, gp.size, nccl_data_type, 0, null_stream.ptr) scatter_params(self.model, gp) del gp class MultiprocessParallelUpdater(standard_updater.StandardUpdater): """Implementation of a multiprocess parallel GPU Updater. This is an implementation of :class:`Updater` that uses multiple GPUs with multi-process data parallelism. It uses Nvidia NCCL for communication between multiple GPUs. It behaves similarly to :class:`~chainer.training.updaters.StandardUpdater`. The update routine is modified to support data-parallel computation on multiple GPUs in one machine. It is based on synchronous parallel SGD: it parallelizes the gradient computation over a mini-batch, and updates the parameters only in the main device. It does not transfer the values collected by :class:`Reporter` in the sub devices to the main device. So you can only see the reported values in the main device. Args: iterators: List of dataset iterator for the training dataset. The number of the iterators must be same to the number of GPUs you use. optimizer: Optimizer to update parameters. The model should be attached to the optimizer. converter: Converter function to build input arrays. Each batch extracted by the iterator is split equally between the devices and then passed with corresponding ``device`` option to this function. :func:`~chainer.dataset.concat_examples` is used by default. devices: Dictionary or list of devices to which the training data is sent. The master device will be the first one in the list or the value attached to the key ``'main'``. auto_new_epoch (bool): If ``True``, :meth:`~chainer.Optimizer.new_epoch` of the main optimizer is automatically called when the ``is_new_epoch`` attribute of the main iterator is ``True``. """ def __init__(self, iterators, optimizer, converter=convert.concat_examples, devices=None, auto_new_epoch=True): if not MultiprocessParallelUpdater.available(): raise Exception( 'NCCL is not enabled. MultiprocessParallelUpdater ' 'requires NCCL.\n' 'Please reinstall CuPy after you install NCCL.\n' '(see https://docs-cupy.chainer.org/en/latest/install.html)') try: cuda.cupy.cuda.driver.ctxGetCurrent() _cuda_initialized = True except cuda.cupy.cuda.driver.CUDADriverError: # The context is not initialized, it will be fine. _cuda_initialized = False if _cuda_initialized: raise RuntimeError( 'The CUDA context has been already initialized. ' 'MultiprocessParallelUpdater assumes the context is ' 'uninitialized. Please do not call CUDA API before ' 'MultiprocessParallelUpdater creates processes.') assert len(iterators) == len(devices) for iterator in iterators[1:]: assert len(iterator.dataset) == len(iterators[0].dataset) # Correct optimizer parameters for new minibatch size optim = optimizer.__class__.__name__ if optim in ('Adam', 'AdaGrad', 'RMSprop'): optimizer.eps *= len(devices) warnings.warn('optimizer.eps is changed to {} ' 'by MultiprocessParallelUpdater for new batch size.'. format(optimizer.eps)) elif optim in ('RMSpropGraves', 'AdaDelta'): optimizer.eps *= len(devices) ** 2 # not quite right for AdaDelta warnings.warn('optimizer.eps is changed to {} ' 'by MultiprocessParallelUpdater for new batch size.'. format(optimizer.eps)) elif hasattr(optimizer, 'lr'): optimizer.lr /= len(devices) warnings.warn('optimizer.lr is changed to {} ' 'by MultiprocessParallelUpdater for new batch size.'. format(optimizer.lr)) super(MultiprocessParallelUpdater, self).__init__( iterator=iterators[0], optimizer=optimizer, converter=converter, auto_new_epoch=auto_new_epoch, ) if isinstance(devices, dict): devices = devices.copy() main = devices.pop('main') devices = list(six.itervalues(devices)) devices = [main] + devices elif isinstance(devices, (list, tuple)): devices = list(devices) else: raise ValueError( 'devices argument should be either dict, list or tuple,' ' but {} was given.'.format(type(devices))) if devices is None or any(device is None for device in devices): raise ValueError('must specify GPU devices') self._master = optimizer.target self._devices = devices self._mpu_iterators = iterators self._initialized = False self._pipes = [] self._workers = [] self.comm = None @staticmethod def available(): return _available def _send_message(self, message): for pipe in self._pipes: pipe.send(message) def setup_workers(self): if self._initialized: return self._initialized = True self._master.cleargrads() for i in six.moves.range(1, len(self._devices)): pipe, worker_end = multiprocessing.Pipe() worker = _Worker(i, worker_end, self) worker.start() self._workers.append(worker) self._pipes.append(pipe) with cuda.Device(self._devices[0]): self._master.to_gpu(self._devices[0]) if len(self._devices) > 1: comm_id = nccl.get_unique_id() self._send_message(('set comm_id', comm_id)) self.comm = nccl.NcclCommunicator(len(self._devices), comm_id, 0) def update_core(self): self.setup_workers() self._send_message(('update', None)) with cuda.Device(self._devices[0]): # For reducing memory self._master.cleargrads() optimizer = self.get_optimizer('main') iterator = self.get_iterator('main') batch = iterator.next() batch = self.converter(batch, self._devices[0]) loss = _calc_loss(self._master, batch) self._master.cleargrads() loss.backward() # NCCL: reduce grads null_stream = cuda.Stream.null if self.comm is not None: gg = gather_grads(self._master) nccl_data_type = _get_nccl_data_type(gg.dtype) self.comm.reduce(gg.data.ptr, gg.data.ptr, gg.size, nccl_data_type, nccl.NCCL_SUM, 0, null_stream.ptr) scatter_grads(self._master, gg) del gg optimizer.update() if self.comm is not None: gp = gather_params(self._master) nccl_data_type = _get_nccl_data_type(gp.dtype) self.comm.bcast(gp.data.ptr, gp.size, nccl_data_type, 0, null_stream.ptr) if self.auto_new_epoch and iterator.is_new_epoch: optimizer.new_epoch(auto=True) def finalize(self): self._send_message(('finalize', None)) for worker in self._workers: worker.join() def _calc_loss(model, in_arrays): if isinstance(in_arrays, tuple): return model(*in_arrays) elif isinstance(in_arrays, dict): return model(**in_arrays) else: return model(in_arrays) def size_num_grads(link): """Count total size of all gradient arrays of a given link Args: link (chainer.link.Link): Target link object. """ size = 0 num = 0 for param in link.params(): if param.size == 0: continue size += param.size num += 1 return size, num def _memcpy_gather(): return cuda.elementwise( 'raw T ptrs, raw X dtypes, raw Y info', 'raw float32 dst', ''' int id_min = id_pre; int id_max = num_src; while (id_max - id_min > 1) { int id = (id_max + id_min) / 2; if (i < info[id]) id_max = id; else id_min = id; } int id = id_min; int i_dst = i; int i_src = i; if (id > 0) i_src -= info[id]; dst[i_dst] = 0; if (ptrs[id] != NULL) { if (dtypes[id] == 0) { // fp32 float *src = reinterpret_cast<float *>(ptrs[id]); dst[i_dst] = src[i_src]; } else { // fp16 float16 *src = reinterpret_cast<float16 *>(ptrs[id]); dst[i_dst] = static_cast<float>(src[i_src]); } } id_pre = id; ''', '_memcpy_gather', loop_prep=''' int num_src = info[0]; int id_pre = 0; ''') def _gather(link, target): size, num = size_num_grads(link) ptrs = numpy.empty(num, dtype=numpy.uint64) dtypes = numpy.empty(num, dtype=numpy.int8) info = numpy.empty(num + 1, dtype=numpy.int32) info[0] = 0 i = 0 for _, param in sorted(link.namedparams()): if param.size == 0: continue ptrs[i] = 0 # NULL pointer d = getattr(param, target) if d is not None: ptrs[i] = d.data.ptr dtypes[i] = 0 # fp32 if param.dtype == numpy.float16: dtypes[i] = 1 # fp16 info[i + 1] = info[i] + param.size i += 1 info[0] = num ptrs = cuda.to_gpu(ptrs) dtypes = cuda.to_gpu(dtypes) info = cuda.to_gpu(info) return _memcpy_gather()(ptrs, dtypes, info, size=size) def gather_grads(link): """Put together all gradient arrays and make a single array Args: link (chainer.link.Link): Target link object. Return: cupy.ndarray """ if link.xp is numpy: raise RuntimeError('gather_grads works only on GPU.') return _gather(link, 'grad') def gather_params(link): """Put together all gradient arrays and make a single array Args: link (chainer.link.Link): Target link object. Return: cupy.ndarray """ if link.xp is numpy: raise RuntimeError('Link.gather_params works only on GPU.') return _gather(link, 'data') def _memcpy_scatter(): return cuda.elementwise( 'raw T ptrs, raw X dtypes, raw Y info, raw float32 array', '', ''' int id_min = id_pre; int id_max = num_src; while (id_max - id_min > 1) { int id = (id_max + id_min) / 2; if (i < info[id]) id_max = id; else id_min = id; } int id = id_min; int i_src = i; int i_dst = i; if (id > 0) i_dst -= info[id]; if (ptrs[id] != NULL) { if (dtypes[id] == 0) { // fp32 float *dst = reinterpret_cast<float *>(ptrs[id]); dst[i_dst] = array[i_src]; } else { // fp16 float16 *dst = reinterpret_cast<float16 *>(ptrs[id]); dst[i_dst] = static_cast<float16>(array[i_src]); } } id_pre = id; ''', '_memcpy_scatter', loop_prep=''' int num_src = info[0]; int id_pre = 0; ''') def _scatter(link, array, target): size, num = size_num_grads(link) ptrs = numpy.zeros(num, dtype=numpy.uint64) dtypes = numpy.zeros(num, dtype=numpy.int8) info = numpy.zeros(num + 1, dtype=numpy.int32) info[0] = 0 i = 0 for _, param in sorted(link.namedparams()): if param.size == 0: continue ptrs[i] = 0 # NULL pointer d = getattr(param, target) if d is None: d = cuda.cupy.zeros(param.shape, dtype=param.dtype) setattr(param, target, d) ptrs[i] = d.data.ptr dtypes[i] = 0 # fp32 if param.dtype == numpy.float16: dtypes[i] = 1 # fp16 info[i + 1] = info[i] + param.size i += 1 if i != num: raise() info[0] = num ptrs = cuda.to_gpu(ptrs) dtypes = cuda.to_gpu(dtypes) info = cuda.to_gpu(info) return _memcpy_scatter()(ptrs, dtypes, info, array, size=size) def scatter_grads(link, array): """Put back contents of the specified array to the related gradient arrays Args: link (chainer.link.Link): Target link object. array (cupy.ndarray): gathered array created by gather_grads() """ return _scatter(link, array, 'grad') def scatter_params(link, array): """Put back contents of the specified array to the related gradient arrays Args: link (chainer.link.Link): Target link object. array (cupy.ndarray): gathered array created by gather_params() """ return _scatter(link, array, 'data') def _get_nccl_data_type(dtype): """Get data type for NCCL""" if dtype == numpy.float32: nccl_data_type = nccl.NCCL_FLOAT elif dtype == numpy.float16: nccl_data_type = nccl.NCCL_HALF elif dtype == numpy.float64: nccl_data_type = nccl.NCCL_DOUBLE else: raise RuntimeError('Unexpected data type:{}'.format(dtype)) return nccl_data_type
gather_grads
Put together all gradient arrays and make a single array Args: link (chainer.link.Link): Target link object. Return: cupy.ndarray
import multiprocessing import warnings import six from chainer.backends import cuda from chainer.dataset import convert from chainer import reporter from chainer.training.updaters import standard_updater try: from cupy.cuda import nccl _available = True except Exception: _available = False import numpy class _Worker(multiprocessing.Process): def __init__(self, proc_id, pipe, master): super(_Worker, self).__init__() self.proc_id = proc_id self.pipe = pipe self.converter = master.converter self.model = master._master self.device = master._devices[proc_id] self.iterator = master._mpu_iterators[proc_id] self.n_devices = len(master._devices) def setup(self): _, comm_id = self.pipe.recv() self.comm = nccl.NcclCommunicator(self.n_devices, comm_id, self.proc_id) self.model.to_gpu(self.device) self.reporter = reporter.Reporter() self.reporter.add_observer('main', self.model) self.reporter.add_observers('main', self.model.namedlinks(skipself=True)) def run(self): dev = cuda.Device(self.device) dev.use() self.setup() while True: job, data = self.pipe.recv() if job == 'finalize': dev.synchronize() break if job == 'update': # For reducing memory self.model.cleargrads() batch = self.converter(self.iterator.next(), self.device) with self.reporter.scope({}): # pass dummy observation loss = _calc_loss(self.model, batch) self.model.cleargrads() loss.backward() del loss gg = gather_grads(self.model) nccl_data_type = _get_nccl_data_type(gg.dtype) null_stream = cuda.Stream.null self.comm.reduce(gg.data.ptr, gg.data.ptr, gg.size, nccl_data_type, nccl.NCCL_SUM, 0, null_stream.ptr) del gg self.model.cleargrads() gp = gather_params(self.model) nccl_data_type = _get_nccl_data_type(gp.dtype) self.comm.bcast(gp.data.ptr, gp.size, nccl_data_type, 0, null_stream.ptr) scatter_params(self.model, gp) del gp class MultiprocessParallelUpdater(standard_updater.StandardUpdater): """Implementation of a multiprocess parallel GPU Updater. This is an implementation of :class:`Updater` that uses multiple GPUs with multi-process data parallelism. It uses Nvidia NCCL for communication between multiple GPUs. It behaves similarly to :class:`~chainer.training.updaters.StandardUpdater`. The update routine is modified to support data-parallel computation on multiple GPUs in one machine. It is based on synchronous parallel SGD: it parallelizes the gradient computation over a mini-batch, and updates the parameters only in the main device. It does not transfer the values collected by :class:`Reporter` in the sub devices to the main device. So you can only see the reported values in the main device. Args: iterators: List of dataset iterator for the training dataset. The number of the iterators must be same to the number of GPUs you use. optimizer: Optimizer to update parameters. The model should be attached to the optimizer. converter: Converter function to build input arrays. Each batch extracted by the iterator is split equally between the devices and then passed with corresponding ``device`` option to this function. :func:`~chainer.dataset.concat_examples` is used by default. devices: Dictionary or list of devices to which the training data is sent. The master device will be the first one in the list or the value attached to the key ``'main'``. auto_new_epoch (bool): If ``True``, :meth:`~chainer.Optimizer.new_epoch` of the main optimizer is automatically called when the ``is_new_epoch`` attribute of the main iterator is ``True``. """ def __init__(self, iterators, optimizer, converter=convert.concat_examples, devices=None, auto_new_epoch=True): if not MultiprocessParallelUpdater.available(): raise Exception( 'NCCL is not enabled. MultiprocessParallelUpdater ' 'requires NCCL.\n' 'Please reinstall CuPy after you install NCCL.\n' '(see https://docs-cupy.chainer.org/en/latest/install.html)') try: cuda.cupy.cuda.driver.ctxGetCurrent() _cuda_initialized = True except cuda.cupy.cuda.driver.CUDADriverError: # The context is not initialized, it will be fine. _cuda_initialized = False if _cuda_initialized: raise RuntimeError( 'The CUDA context has been already initialized. ' 'MultiprocessParallelUpdater assumes the context is ' 'uninitialized. Please do not call CUDA API before ' 'MultiprocessParallelUpdater creates processes.') assert len(iterators) == len(devices) for iterator in iterators[1:]: assert len(iterator.dataset) == len(iterators[0].dataset) # Correct optimizer parameters for new minibatch size optim = optimizer.__class__.__name__ if optim in ('Adam', 'AdaGrad', 'RMSprop'): optimizer.eps *= len(devices) warnings.warn('optimizer.eps is changed to {} ' 'by MultiprocessParallelUpdater for new batch size.'. format(optimizer.eps)) elif optim in ('RMSpropGraves', 'AdaDelta'): optimizer.eps *= len(devices) ** 2 # not quite right for AdaDelta warnings.warn('optimizer.eps is changed to {} ' 'by MultiprocessParallelUpdater for new batch size.'. format(optimizer.eps)) elif hasattr(optimizer, 'lr'): optimizer.lr /= len(devices) warnings.warn('optimizer.lr is changed to {} ' 'by MultiprocessParallelUpdater for new batch size.'. format(optimizer.lr)) super(MultiprocessParallelUpdater, self).__init__( iterator=iterators[0], optimizer=optimizer, converter=converter, auto_new_epoch=auto_new_epoch, ) if isinstance(devices, dict): devices = devices.copy() main = devices.pop('main') devices = list(six.itervalues(devices)) devices = [main] + devices elif isinstance(devices, (list, tuple)): devices = list(devices) else: raise ValueError( 'devices argument should be either dict, list or tuple,' ' but {} was given.'.format(type(devices))) if devices is None or any(device is None for device in devices): raise ValueError('must specify GPU devices') self._master = optimizer.target self._devices = devices self._mpu_iterators = iterators self._initialized = False self._pipes = [] self._workers = [] self.comm = None @staticmethod def available(): return _available def _send_message(self, message): for pipe in self._pipes: pipe.send(message) def setup_workers(self): if self._initialized: return self._initialized = True self._master.cleargrads() for i in six.moves.range(1, len(self._devices)): pipe, worker_end = multiprocessing.Pipe() worker = _Worker(i, worker_end, self) worker.start() self._workers.append(worker) self._pipes.append(pipe) with cuda.Device(self._devices[0]): self._master.to_gpu(self._devices[0]) if len(self._devices) > 1: comm_id = nccl.get_unique_id() self._send_message(('set comm_id', comm_id)) self.comm = nccl.NcclCommunicator(len(self._devices), comm_id, 0) def update_core(self): self.setup_workers() self._send_message(('update', None)) with cuda.Device(self._devices[0]): # For reducing memory self._master.cleargrads() optimizer = self.get_optimizer('main') iterator = self.get_iterator('main') batch = iterator.next() batch = self.converter(batch, self._devices[0]) loss = _calc_loss(self._master, batch) self._master.cleargrads() loss.backward() # NCCL: reduce grads null_stream = cuda.Stream.null if self.comm is not None: gg = gather_grads(self._master) nccl_data_type = _get_nccl_data_type(gg.dtype) self.comm.reduce(gg.data.ptr, gg.data.ptr, gg.size, nccl_data_type, nccl.NCCL_SUM, 0, null_stream.ptr) scatter_grads(self._master, gg) del gg optimizer.update() if self.comm is not None: gp = gather_params(self._master) nccl_data_type = _get_nccl_data_type(gp.dtype) self.comm.bcast(gp.data.ptr, gp.size, nccl_data_type, 0, null_stream.ptr) if self.auto_new_epoch and iterator.is_new_epoch: optimizer.new_epoch(auto=True) def finalize(self): self._send_message(('finalize', None)) for worker in self._workers: worker.join() def _calc_loss(model, in_arrays): if isinstance(in_arrays, tuple): return model(*in_arrays) elif isinstance(in_arrays, dict): return model(**in_arrays) else: return model(in_arrays) def size_num_grads(link): """Count total size of all gradient arrays of a given link Args: link (chainer.link.Link): Target link object. """ size = 0 num = 0 for param in link.params(): if param.size == 0: continue size += param.size num += 1 return size, num def _memcpy_gather(): return cuda.elementwise( 'raw T ptrs, raw X dtypes, raw Y info', 'raw float32 dst', ''' int id_min = id_pre; int id_max = num_src; while (id_max - id_min > 1) { int id = (id_max + id_min) / 2; if (i < info[id]) id_max = id; else id_min = id; } int id = id_min; int i_dst = i; int i_src = i; if (id > 0) i_src -= info[id]; dst[i_dst] = 0; if (ptrs[id] != NULL) { if (dtypes[id] == 0) { // fp32 float *src = reinterpret_cast<float *>(ptrs[id]); dst[i_dst] = src[i_src]; } else { // fp16 float16 *src = reinterpret_cast<float16 *>(ptrs[id]); dst[i_dst] = static_cast<float>(src[i_src]); } } id_pre = id; ''', '_memcpy_gather', loop_prep=''' int num_src = info[0]; int id_pre = 0; ''') def _gather(link, target): size, num = size_num_grads(link) ptrs = numpy.empty(num, dtype=numpy.uint64) dtypes = numpy.empty(num, dtype=numpy.int8) info = numpy.empty(num + 1, dtype=numpy.int32) info[0] = 0 i = 0 for _, param in sorted(link.namedparams()): if param.size == 0: continue ptrs[i] = 0 # NULL pointer d = getattr(param, target) if d is not None: ptrs[i] = d.data.ptr dtypes[i] = 0 # fp32 if param.dtype == numpy.float16: dtypes[i] = 1 # fp16 info[i + 1] = info[i] + param.size i += 1 info[0] = num ptrs = cuda.to_gpu(ptrs) dtypes = cuda.to_gpu(dtypes) info = cuda.to_gpu(info) return _memcpy_gather()(ptrs, dtypes, info, size=size) # MASKED: gather_grads function (lines 359-369) def gather_params(link): """Put together all gradient arrays and make a single array Args: link (chainer.link.Link): Target link object. Return: cupy.ndarray """ if link.xp is numpy: raise RuntimeError('Link.gather_params works only on GPU.') return _gather(link, 'data') def _memcpy_scatter(): return cuda.elementwise( 'raw T ptrs, raw X dtypes, raw Y info, raw float32 array', '', ''' int id_min = id_pre; int id_max = num_src; while (id_max - id_min > 1) { int id = (id_max + id_min) / 2; if (i < info[id]) id_max = id; else id_min = id; } int id = id_min; int i_src = i; int i_dst = i; if (id > 0) i_dst -= info[id]; if (ptrs[id] != NULL) { if (dtypes[id] == 0) { // fp32 float *dst = reinterpret_cast<float *>(ptrs[id]); dst[i_dst] = array[i_src]; } else { // fp16 float16 *dst = reinterpret_cast<float16 *>(ptrs[id]); dst[i_dst] = static_cast<float16>(array[i_src]); } } id_pre = id; ''', '_memcpy_scatter', loop_prep=''' int num_src = info[0]; int id_pre = 0; ''') def _scatter(link, array, target): size, num = size_num_grads(link) ptrs = numpy.zeros(num, dtype=numpy.uint64) dtypes = numpy.zeros(num, dtype=numpy.int8) info = numpy.zeros(num + 1, dtype=numpy.int32) info[0] = 0 i = 0 for _, param in sorted(link.namedparams()): if param.size == 0: continue ptrs[i] = 0 # NULL pointer d = getattr(param, target) if d is None: d = cuda.cupy.zeros(param.shape, dtype=param.dtype) setattr(param, target, d) ptrs[i] = d.data.ptr dtypes[i] = 0 # fp32 if param.dtype == numpy.float16: dtypes[i] = 1 # fp16 info[i + 1] = info[i] + param.size i += 1 if i != num: raise() info[0] = num ptrs = cuda.to_gpu(ptrs) dtypes = cuda.to_gpu(dtypes) info = cuda.to_gpu(info) return _memcpy_scatter()(ptrs, dtypes, info, array, size=size) def scatter_grads(link, array): """Put back contents of the specified array to the related gradient arrays Args: link (chainer.link.Link): Target link object. array (cupy.ndarray): gathered array created by gather_grads() """ return _scatter(link, array, 'grad') def scatter_params(link, array): """Put back contents of the specified array to the related gradient arrays Args: link (chainer.link.Link): Target link object. array (cupy.ndarray): gathered array created by gather_params() """ return _scatter(link, array, 'data') def _get_nccl_data_type(dtype): """Get data type for NCCL""" if dtype == numpy.float32: nccl_data_type = nccl.NCCL_FLOAT elif dtype == numpy.float16: nccl_data_type = nccl.NCCL_HALF elif dtype == numpy.float64: nccl_data_type = nccl.NCCL_DOUBLE else: raise RuntimeError('Unexpected data type:{}'.format(dtype)) return nccl_data_type
def gather_grads(link): """Put together all gradient arrays and make a single array Args: link (chainer.link.Link): Target link object. Return: cupy.ndarray """ if link.xp is numpy: raise RuntimeError('gather_grads works only on GPU.') return _gather(link, 'grad')
359
369
import multiprocessing import warnings import six from chainer.backends import cuda from chainer.dataset import convert from chainer import reporter from chainer.training.updaters import standard_updater try: from cupy.cuda import nccl _available = True except Exception: _available = False import numpy class _Worker(multiprocessing.Process): def __init__(self, proc_id, pipe, master): super(_Worker, self).__init__() self.proc_id = proc_id self.pipe = pipe self.converter = master.converter self.model = master._master self.device = master._devices[proc_id] self.iterator = master._mpu_iterators[proc_id] self.n_devices = len(master._devices) def setup(self): _, comm_id = self.pipe.recv() self.comm = nccl.NcclCommunicator(self.n_devices, comm_id, self.proc_id) self.model.to_gpu(self.device) self.reporter = reporter.Reporter() self.reporter.add_observer('main', self.model) self.reporter.add_observers('main', self.model.namedlinks(skipself=True)) def run(self): dev = cuda.Device(self.device) dev.use() self.setup() while True: job, data = self.pipe.recv() if job == 'finalize': dev.synchronize() break if job == 'update': # For reducing memory self.model.cleargrads() batch = self.converter(self.iterator.next(), self.device) with self.reporter.scope({}): # pass dummy observation loss = _calc_loss(self.model, batch) self.model.cleargrads() loss.backward() del loss gg = gather_grads(self.model) nccl_data_type = _get_nccl_data_type(gg.dtype) null_stream = cuda.Stream.null self.comm.reduce(gg.data.ptr, gg.data.ptr, gg.size, nccl_data_type, nccl.NCCL_SUM, 0, null_stream.ptr) del gg self.model.cleargrads() gp = gather_params(self.model) nccl_data_type = _get_nccl_data_type(gp.dtype) self.comm.bcast(gp.data.ptr, gp.size, nccl_data_type, 0, null_stream.ptr) scatter_params(self.model, gp) del gp class MultiprocessParallelUpdater(standard_updater.StandardUpdater): """Implementation of a multiprocess parallel GPU Updater. This is an implementation of :class:`Updater` that uses multiple GPUs with multi-process data parallelism. It uses Nvidia NCCL for communication between multiple GPUs. It behaves similarly to :class:`~chainer.training.updaters.StandardUpdater`. The update routine is modified to support data-parallel computation on multiple GPUs in one machine. It is based on synchronous parallel SGD: it parallelizes the gradient computation over a mini-batch, and updates the parameters only in the main device. It does not transfer the values collected by :class:`Reporter` in the sub devices to the main device. So you can only see the reported values in the main device. Args: iterators: List of dataset iterator for the training dataset. The number of the iterators must be same to the number of GPUs you use. optimizer: Optimizer to update parameters. The model should be attached to the optimizer. converter: Converter function to build input arrays. Each batch extracted by the iterator is split equally between the devices and then passed with corresponding ``device`` option to this function. :func:`~chainer.dataset.concat_examples` is used by default. devices: Dictionary or list of devices to which the training data is sent. The master device will be the first one in the list or the value attached to the key ``'main'``. auto_new_epoch (bool): If ``True``, :meth:`~chainer.Optimizer.new_epoch` of the main optimizer is automatically called when the ``is_new_epoch`` attribute of the main iterator is ``True``. """ def __init__(self, iterators, optimizer, converter=convert.concat_examples, devices=None, auto_new_epoch=True): if not MultiprocessParallelUpdater.available(): raise Exception( 'NCCL is not enabled. MultiprocessParallelUpdater ' 'requires NCCL.\n' 'Please reinstall CuPy after you install NCCL.\n' '(see https://docs-cupy.chainer.org/en/latest/install.html)') try: cuda.cupy.cuda.driver.ctxGetCurrent() _cuda_initialized = True except cuda.cupy.cuda.driver.CUDADriverError: # The context is not initialized, it will be fine. _cuda_initialized = False if _cuda_initialized: raise RuntimeError( 'The CUDA context has been already initialized. ' 'MultiprocessParallelUpdater assumes the context is ' 'uninitialized. Please do not call CUDA API before ' 'MultiprocessParallelUpdater creates processes.') assert len(iterators) == len(devices) for iterator in iterators[1:]: assert len(iterator.dataset) == len(iterators[0].dataset) # Correct optimizer parameters for new minibatch size optim = optimizer.__class__.__name__ if optim in ('Adam', 'AdaGrad', 'RMSprop'): optimizer.eps *= len(devices) warnings.warn('optimizer.eps is changed to {} ' 'by MultiprocessParallelUpdater for new batch size.'. format(optimizer.eps)) elif optim in ('RMSpropGraves', 'AdaDelta'): optimizer.eps *= len(devices) ** 2 # not quite right for AdaDelta warnings.warn('optimizer.eps is changed to {} ' 'by MultiprocessParallelUpdater for new batch size.'. format(optimizer.eps)) elif hasattr(optimizer, 'lr'): optimizer.lr /= len(devices) warnings.warn('optimizer.lr is changed to {} ' 'by MultiprocessParallelUpdater for new batch size.'. format(optimizer.lr)) super(MultiprocessParallelUpdater, self).__init__( iterator=iterators[0], optimizer=optimizer, converter=converter, auto_new_epoch=auto_new_epoch, ) if isinstance(devices, dict): devices = devices.copy() main = devices.pop('main') devices = list(six.itervalues(devices)) devices = [main] + devices elif isinstance(devices, (list, tuple)): devices = list(devices) else: raise ValueError( 'devices argument should be either dict, list or tuple,' ' but {} was given.'.format(type(devices))) if devices is None or any(device is None for device in devices): raise ValueError('must specify GPU devices') self._master = optimizer.target self._devices = devices self._mpu_iterators = iterators self._initialized = False self._pipes = [] self._workers = [] self.comm = None @staticmethod def available(): return _available def _send_message(self, message): for pipe in self._pipes: pipe.send(message) def setup_workers(self): if self._initialized: return self._initialized = True self._master.cleargrads() for i in six.moves.range(1, len(self._devices)): pipe, worker_end = multiprocessing.Pipe() worker = _Worker(i, worker_end, self) worker.start() self._workers.append(worker) self._pipes.append(pipe) with cuda.Device(self._devices[0]): self._master.to_gpu(self._devices[0]) if len(self._devices) > 1: comm_id = nccl.get_unique_id() self._send_message(('set comm_id', comm_id)) self.comm = nccl.NcclCommunicator(len(self._devices), comm_id, 0) def update_core(self): self.setup_workers() self._send_message(('update', None)) with cuda.Device(self._devices[0]): # For reducing memory self._master.cleargrads() optimizer = self.get_optimizer('main') iterator = self.get_iterator('main') batch = iterator.next() batch = self.converter(batch, self._devices[0]) loss = _calc_loss(self._master, batch) self._master.cleargrads() loss.backward() # NCCL: reduce grads null_stream = cuda.Stream.null if self.comm is not None: gg = gather_grads(self._master) nccl_data_type = _get_nccl_data_type(gg.dtype) self.comm.reduce(gg.data.ptr, gg.data.ptr, gg.size, nccl_data_type, nccl.NCCL_SUM, 0, null_stream.ptr) scatter_grads(self._master, gg) del gg optimizer.update() if self.comm is not None: gp = gather_params(self._master) nccl_data_type = _get_nccl_data_type(gp.dtype) self.comm.bcast(gp.data.ptr, gp.size, nccl_data_type, 0, null_stream.ptr) if self.auto_new_epoch and iterator.is_new_epoch: optimizer.new_epoch(auto=True) def finalize(self): self._send_message(('finalize', None)) for worker in self._workers: worker.join() def _calc_loss(model, in_arrays): if isinstance(in_arrays, tuple): return model(*in_arrays) elif isinstance(in_arrays, dict): return model(**in_arrays) else: return model(in_arrays) def size_num_grads(link): """Count total size of all gradient arrays of a given link Args: link (chainer.link.Link): Target link object. """ size = 0 num = 0 for param in link.params(): if param.size == 0: continue size += param.size num += 1 return size, num def _memcpy_gather(): return cuda.elementwise( 'raw T ptrs, raw X dtypes, raw Y info', 'raw float32 dst', ''' int id_min = id_pre; int id_max = num_src; while (id_max - id_min > 1) { int id = (id_max + id_min) / 2; if (i < info[id]) id_max = id; else id_min = id; } int id = id_min; int i_dst = i; int i_src = i; if (id > 0) i_src -= info[id]; dst[i_dst] = 0; if (ptrs[id] != NULL) { if (dtypes[id] == 0) { // fp32 float *src = reinterpret_cast<float *>(ptrs[id]); dst[i_dst] = src[i_src]; } else { // fp16 float16 *src = reinterpret_cast<float16 *>(ptrs[id]); dst[i_dst] = static_cast<float>(src[i_src]); } } id_pre = id; ''', '_memcpy_gather', loop_prep=''' int num_src = info[0]; int id_pre = 0; ''') def _gather(link, target): size, num = size_num_grads(link) ptrs = numpy.empty(num, dtype=numpy.uint64) dtypes = numpy.empty(num, dtype=numpy.int8) info = numpy.empty(num + 1, dtype=numpy.int32) info[0] = 0 i = 0 for _, param in sorted(link.namedparams()): if param.size == 0: continue ptrs[i] = 0 # NULL pointer d = getattr(param, target) if d is not None: ptrs[i] = d.data.ptr dtypes[i] = 0 # fp32 if param.dtype == numpy.float16: dtypes[i] = 1 # fp16 info[i + 1] = info[i] + param.size i += 1 info[0] = num ptrs = cuda.to_gpu(ptrs) dtypes = cuda.to_gpu(dtypes) info = cuda.to_gpu(info) return _memcpy_gather()(ptrs, dtypes, info, size=size) def gather_grads(link): """Put together all gradient arrays and make a single array Args: link (chainer.link.Link): Target link object. Return: cupy.ndarray """ if link.xp is numpy: raise RuntimeError('gather_grads works only on GPU.') return _gather(link, 'grad') def gather_params(link): """Put together all gradient arrays and make a single array Args: link (chainer.link.Link): Target link object. Return: cupy.ndarray """ if link.xp is numpy: raise RuntimeError('Link.gather_params works only on GPU.') return _gather(link, 'data') def _memcpy_scatter(): return cuda.elementwise( 'raw T ptrs, raw X dtypes, raw Y info, raw float32 array', '', ''' int id_min = id_pre; int id_max = num_src; while (id_max - id_min > 1) { int id = (id_max + id_min) / 2; if (i < info[id]) id_max = id; else id_min = id; } int id = id_min; int i_src = i; int i_dst = i; if (id > 0) i_dst -= info[id]; if (ptrs[id] != NULL) { if (dtypes[id] == 0) { // fp32 float *dst = reinterpret_cast<float *>(ptrs[id]); dst[i_dst] = array[i_src]; } else { // fp16 float16 *dst = reinterpret_cast<float16 *>(ptrs[id]); dst[i_dst] = static_cast<float16>(array[i_src]); } } id_pre = id; ''', '_memcpy_scatter', loop_prep=''' int num_src = info[0]; int id_pre = 0; ''') def _scatter(link, array, target): size, num = size_num_grads(link) ptrs = numpy.zeros(num, dtype=numpy.uint64) dtypes = numpy.zeros(num, dtype=numpy.int8) info = numpy.zeros(num + 1, dtype=numpy.int32) info[0] = 0 i = 0 for _, param in sorted(link.namedparams()): if param.size == 0: continue ptrs[i] = 0 # NULL pointer d = getattr(param, target) if d is None: d = cuda.cupy.zeros(param.shape, dtype=param.dtype) setattr(param, target, d) ptrs[i] = d.data.ptr dtypes[i] = 0 # fp32 if param.dtype == numpy.float16: dtypes[i] = 1 # fp16 info[i + 1] = info[i] + param.size i += 1 if i != num: raise() info[0] = num ptrs = cuda.to_gpu(ptrs) dtypes = cuda.to_gpu(dtypes) info = cuda.to_gpu(info) return _memcpy_scatter()(ptrs, dtypes, info, array, size=size) def scatter_grads(link, array): """Put back contents of the specified array to the related gradient arrays Args: link (chainer.link.Link): Target link object. array (cupy.ndarray): gathered array created by gather_grads() """ return _scatter(link, array, 'grad') def scatter_params(link, array): """Put back contents of the specified array to the related gradient arrays Args: link (chainer.link.Link): Target link object. array (cupy.ndarray): gathered array created by gather_params() """ return _scatter(link, array, 'data') def _get_nccl_data_type(dtype): """Get data type for NCCL""" if dtype == numpy.float32: nccl_data_type = nccl.NCCL_FLOAT elif dtype == numpy.float16: nccl_data_type = nccl.NCCL_HALF elif dtype == numpy.float64: nccl_data_type = nccl.NCCL_DOUBLE else: raise RuntimeError('Unexpected data type:{}'.format(dtype)) return nccl_data_type
gather_params
Put together all gradient arrays and make a single array Args: link (chainer.link.Link): Target link object. Return: cupy.ndarray
import multiprocessing import warnings import six from chainer.backends import cuda from chainer.dataset import convert from chainer import reporter from chainer.training.updaters import standard_updater try: from cupy.cuda import nccl _available = True except Exception: _available = False import numpy class _Worker(multiprocessing.Process): def __init__(self, proc_id, pipe, master): super(_Worker, self).__init__() self.proc_id = proc_id self.pipe = pipe self.converter = master.converter self.model = master._master self.device = master._devices[proc_id] self.iterator = master._mpu_iterators[proc_id] self.n_devices = len(master._devices) def setup(self): _, comm_id = self.pipe.recv() self.comm = nccl.NcclCommunicator(self.n_devices, comm_id, self.proc_id) self.model.to_gpu(self.device) self.reporter = reporter.Reporter() self.reporter.add_observer('main', self.model) self.reporter.add_observers('main', self.model.namedlinks(skipself=True)) def run(self): dev = cuda.Device(self.device) dev.use() self.setup() while True: job, data = self.pipe.recv() if job == 'finalize': dev.synchronize() break if job == 'update': # For reducing memory self.model.cleargrads() batch = self.converter(self.iterator.next(), self.device) with self.reporter.scope({}): # pass dummy observation loss = _calc_loss(self.model, batch) self.model.cleargrads() loss.backward() del loss gg = gather_grads(self.model) nccl_data_type = _get_nccl_data_type(gg.dtype) null_stream = cuda.Stream.null self.comm.reduce(gg.data.ptr, gg.data.ptr, gg.size, nccl_data_type, nccl.NCCL_SUM, 0, null_stream.ptr) del gg self.model.cleargrads() gp = gather_params(self.model) nccl_data_type = _get_nccl_data_type(gp.dtype) self.comm.bcast(gp.data.ptr, gp.size, nccl_data_type, 0, null_stream.ptr) scatter_params(self.model, gp) del gp class MultiprocessParallelUpdater(standard_updater.StandardUpdater): """Implementation of a multiprocess parallel GPU Updater. This is an implementation of :class:`Updater` that uses multiple GPUs with multi-process data parallelism. It uses Nvidia NCCL for communication between multiple GPUs. It behaves similarly to :class:`~chainer.training.updaters.StandardUpdater`. The update routine is modified to support data-parallel computation on multiple GPUs in one machine. It is based on synchronous parallel SGD: it parallelizes the gradient computation over a mini-batch, and updates the parameters only in the main device. It does not transfer the values collected by :class:`Reporter` in the sub devices to the main device. So you can only see the reported values in the main device. Args: iterators: List of dataset iterator for the training dataset. The number of the iterators must be same to the number of GPUs you use. optimizer: Optimizer to update parameters. The model should be attached to the optimizer. converter: Converter function to build input arrays. Each batch extracted by the iterator is split equally between the devices and then passed with corresponding ``device`` option to this function. :func:`~chainer.dataset.concat_examples` is used by default. devices: Dictionary or list of devices to which the training data is sent. The master device will be the first one in the list or the value attached to the key ``'main'``. auto_new_epoch (bool): If ``True``, :meth:`~chainer.Optimizer.new_epoch` of the main optimizer is automatically called when the ``is_new_epoch`` attribute of the main iterator is ``True``. """ def __init__(self, iterators, optimizer, converter=convert.concat_examples, devices=None, auto_new_epoch=True): if not MultiprocessParallelUpdater.available(): raise Exception( 'NCCL is not enabled. MultiprocessParallelUpdater ' 'requires NCCL.\n' 'Please reinstall CuPy after you install NCCL.\n' '(see https://docs-cupy.chainer.org/en/latest/install.html)') try: cuda.cupy.cuda.driver.ctxGetCurrent() _cuda_initialized = True except cuda.cupy.cuda.driver.CUDADriverError: # The context is not initialized, it will be fine. _cuda_initialized = False if _cuda_initialized: raise RuntimeError( 'The CUDA context has been already initialized. ' 'MultiprocessParallelUpdater assumes the context is ' 'uninitialized. Please do not call CUDA API before ' 'MultiprocessParallelUpdater creates processes.') assert len(iterators) == len(devices) for iterator in iterators[1:]: assert len(iterator.dataset) == len(iterators[0].dataset) # Correct optimizer parameters for new minibatch size optim = optimizer.__class__.__name__ if optim in ('Adam', 'AdaGrad', 'RMSprop'): optimizer.eps *= len(devices) warnings.warn('optimizer.eps is changed to {} ' 'by MultiprocessParallelUpdater for new batch size.'. format(optimizer.eps)) elif optim in ('RMSpropGraves', 'AdaDelta'): optimizer.eps *= len(devices) ** 2 # not quite right for AdaDelta warnings.warn('optimizer.eps is changed to {} ' 'by MultiprocessParallelUpdater for new batch size.'. format(optimizer.eps)) elif hasattr(optimizer, 'lr'): optimizer.lr /= len(devices) warnings.warn('optimizer.lr is changed to {} ' 'by MultiprocessParallelUpdater for new batch size.'. format(optimizer.lr)) super(MultiprocessParallelUpdater, self).__init__( iterator=iterators[0], optimizer=optimizer, converter=converter, auto_new_epoch=auto_new_epoch, ) if isinstance(devices, dict): devices = devices.copy() main = devices.pop('main') devices = list(six.itervalues(devices)) devices = [main] + devices elif isinstance(devices, (list, tuple)): devices = list(devices) else: raise ValueError( 'devices argument should be either dict, list or tuple,' ' but {} was given.'.format(type(devices))) if devices is None or any(device is None for device in devices): raise ValueError('must specify GPU devices') self._master = optimizer.target self._devices = devices self._mpu_iterators = iterators self._initialized = False self._pipes = [] self._workers = [] self.comm = None @staticmethod def available(): return _available def _send_message(self, message): for pipe in self._pipes: pipe.send(message) def setup_workers(self): if self._initialized: return self._initialized = True self._master.cleargrads() for i in six.moves.range(1, len(self._devices)): pipe, worker_end = multiprocessing.Pipe() worker = _Worker(i, worker_end, self) worker.start() self._workers.append(worker) self._pipes.append(pipe) with cuda.Device(self._devices[0]): self._master.to_gpu(self._devices[0]) if len(self._devices) > 1: comm_id = nccl.get_unique_id() self._send_message(('set comm_id', comm_id)) self.comm = nccl.NcclCommunicator(len(self._devices), comm_id, 0) def update_core(self): self.setup_workers() self._send_message(('update', None)) with cuda.Device(self._devices[0]): # For reducing memory self._master.cleargrads() optimizer = self.get_optimizer('main') iterator = self.get_iterator('main') batch = iterator.next() batch = self.converter(batch, self._devices[0]) loss = _calc_loss(self._master, batch) self._master.cleargrads() loss.backward() # NCCL: reduce grads null_stream = cuda.Stream.null if self.comm is not None: gg = gather_grads(self._master) nccl_data_type = _get_nccl_data_type(gg.dtype) self.comm.reduce(gg.data.ptr, gg.data.ptr, gg.size, nccl_data_type, nccl.NCCL_SUM, 0, null_stream.ptr) scatter_grads(self._master, gg) del gg optimizer.update() if self.comm is not None: gp = gather_params(self._master) nccl_data_type = _get_nccl_data_type(gp.dtype) self.comm.bcast(gp.data.ptr, gp.size, nccl_data_type, 0, null_stream.ptr) if self.auto_new_epoch and iterator.is_new_epoch: optimizer.new_epoch(auto=True) def finalize(self): self._send_message(('finalize', None)) for worker in self._workers: worker.join() def _calc_loss(model, in_arrays): if isinstance(in_arrays, tuple): return model(*in_arrays) elif isinstance(in_arrays, dict): return model(**in_arrays) else: return model(in_arrays) def size_num_grads(link): """Count total size of all gradient arrays of a given link Args: link (chainer.link.Link): Target link object. """ size = 0 num = 0 for param in link.params(): if param.size == 0: continue size += param.size num += 1 return size, num def _memcpy_gather(): return cuda.elementwise( 'raw T ptrs, raw X dtypes, raw Y info', 'raw float32 dst', ''' int id_min = id_pre; int id_max = num_src; while (id_max - id_min > 1) { int id = (id_max + id_min) / 2; if (i < info[id]) id_max = id; else id_min = id; } int id = id_min; int i_dst = i; int i_src = i; if (id > 0) i_src -= info[id]; dst[i_dst] = 0; if (ptrs[id] != NULL) { if (dtypes[id] == 0) { // fp32 float *src = reinterpret_cast<float *>(ptrs[id]); dst[i_dst] = src[i_src]; } else { // fp16 float16 *src = reinterpret_cast<float16 *>(ptrs[id]); dst[i_dst] = static_cast<float>(src[i_src]); } } id_pre = id; ''', '_memcpy_gather', loop_prep=''' int num_src = info[0]; int id_pre = 0; ''') def _gather(link, target): size, num = size_num_grads(link) ptrs = numpy.empty(num, dtype=numpy.uint64) dtypes = numpy.empty(num, dtype=numpy.int8) info = numpy.empty(num + 1, dtype=numpy.int32) info[0] = 0 i = 0 for _, param in sorted(link.namedparams()): if param.size == 0: continue ptrs[i] = 0 # NULL pointer d = getattr(param, target) if d is not None: ptrs[i] = d.data.ptr dtypes[i] = 0 # fp32 if param.dtype == numpy.float16: dtypes[i] = 1 # fp16 info[i + 1] = info[i] + param.size i += 1 info[0] = num ptrs = cuda.to_gpu(ptrs) dtypes = cuda.to_gpu(dtypes) info = cuda.to_gpu(info) return _memcpy_gather()(ptrs, dtypes, info, size=size) def gather_grads(link): """Put together all gradient arrays and make a single array Args: link (chainer.link.Link): Target link object. Return: cupy.ndarray """ if link.xp is numpy: raise RuntimeError('gather_grads works only on GPU.') return _gather(link, 'grad') # MASKED: gather_params function (lines 372-382) def _memcpy_scatter(): return cuda.elementwise( 'raw T ptrs, raw X dtypes, raw Y info, raw float32 array', '', ''' int id_min = id_pre; int id_max = num_src; while (id_max - id_min > 1) { int id = (id_max + id_min) / 2; if (i < info[id]) id_max = id; else id_min = id; } int id = id_min; int i_src = i; int i_dst = i; if (id > 0) i_dst -= info[id]; if (ptrs[id] != NULL) { if (dtypes[id] == 0) { // fp32 float *dst = reinterpret_cast<float *>(ptrs[id]); dst[i_dst] = array[i_src]; } else { // fp16 float16 *dst = reinterpret_cast<float16 *>(ptrs[id]); dst[i_dst] = static_cast<float16>(array[i_src]); } } id_pre = id; ''', '_memcpy_scatter', loop_prep=''' int num_src = info[0]; int id_pre = 0; ''') def _scatter(link, array, target): size, num = size_num_grads(link) ptrs = numpy.zeros(num, dtype=numpy.uint64) dtypes = numpy.zeros(num, dtype=numpy.int8) info = numpy.zeros(num + 1, dtype=numpy.int32) info[0] = 0 i = 0 for _, param in sorted(link.namedparams()): if param.size == 0: continue ptrs[i] = 0 # NULL pointer d = getattr(param, target) if d is None: d = cuda.cupy.zeros(param.shape, dtype=param.dtype) setattr(param, target, d) ptrs[i] = d.data.ptr dtypes[i] = 0 # fp32 if param.dtype == numpy.float16: dtypes[i] = 1 # fp16 info[i + 1] = info[i] + param.size i += 1 if i != num: raise() info[0] = num ptrs = cuda.to_gpu(ptrs) dtypes = cuda.to_gpu(dtypes) info = cuda.to_gpu(info) return _memcpy_scatter()(ptrs, dtypes, info, array, size=size) def scatter_grads(link, array): """Put back contents of the specified array to the related gradient arrays Args: link (chainer.link.Link): Target link object. array (cupy.ndarray): gathered array created by gather_grads() """ return _scatter(link, array, 'grad') def scatter_params(link, array): """Put back contents of the specified array to the related gradient arrays Args: link (chainer.link.Link): Target link object. array (cupy.ndarray): gathered array created by gather_params() """ return _scatter(link, array, 'data') def _get_nccl_data_type(dtype): """Get data type for NCCL""" if dtype == numpy.float32: nccl_data_type = nccl.NCCL_FLOAT elif dtype == numpy.float16: nccl_data_type = nccl.NCCL_HALF elif dtype == numpy.float64: nccl_data_type = nccl.NCCL_DOUBLE else: raise RuntimeError('Unexpected data type:{}'.format(dtype)) return nccl_data_type
def gather_params(link): """Put together all gradient arrays and make a single array Args: link (chainer.link.Link): Target link object. Return: cupy.ndarray """ if link.xp is numpy: raise RuntimeError('Link.gather_params works only on GPU.') return _gather(link, 'data')
372
382
import multiprocessing import warnings import six from chainer.backends import cuda from chainer.dataset import convert from chainer import reporter from chainer.training.updaters import standard_updater try: from cupy.cuda import nccl _available = True except Exception: _available = False import numpy class _Worker(multiprocessing.Process): def __init__(self, proc_id, pipe, master): super(_Worker, self).__init__() self.proc_id = proc_id self.pipe = pipe self.converter = master.converter self.model = master._master self.device = master._devices[proc_id] self.iterator = master._mpu_iterators[proc_id] self.n_devices = len(master._devices) def setup(self): _, comm_id = self.pipe.recv() self.comm = nccl.NcclCommunicator(self.n_devices, comm_id, self.proc_id) self.model.to_gpu(self.device) self.reporter = reporter.Reporter() self.reporter.add_observer('main', self.model) self.reporter.add_observers('main', self.model.namedlinks(skipself=True)) def run(self): dev = cuda.Device(self.device) dev.use() self.setup() while True: job, data = self.pipe.recv() if job == 'finalize': dev.synchronize() break if job == 'update': # For reducing memory self.model.cleargrads() batch = self.converter(self.iterator.next(), self.device) with self.reporter.scope({}): # pass dummy observation loss = _calc_loss(self.model, batch) self.model.cleargrads() loss.backward() del loss gg = gather_grads(self.model) nccl_data_type = _get_nccl_data_type(gg.dtype) null_stream = cuda.Stream.null self.comm.reduce(gg.data.ptr, gg.data.ptr, gg.size, nccl_data_type, nccl.NCCL_SUM, 0, null_stream.ptr) del gg self.model.cleargrads() gp = gather_params(self.model) nccl_data_type = _get_nccl_data_type(gp.dtype) self.comm.bcast(gp.data.ptr, gp.size, nccl_data_type, 0, null_stream.ptr) scatter_params(self.model, gp) del gp class MultiprocessParallelUpdater(standard_updater.StandardUpdater): """Implementation of a multiprocess parallel GPU Updater. This is an implementation of :class:`Updater` that uses multiple GPUs with multi-process data parallelism. It uses Nvidia NCCL for communication between multiple GPUs. It behaves similarly to :class:`~chainer.training.updaters.StandardUpdater`. The update routine is modified to support data-parallel computation on multiple GPUs in one machine. It is based on synchronous parallel SGD: it parallelizes the gradient computation over a mini-batch, and updates the parameters only in the main device. It does not transfer the values collected by :class:`Reporter` in the sub devices to the main device. So you can only see the reported values in the main device. Args: iterators: List of dataset iterator for the training dataset. The number of the iterators must be same to the number of GPUs you use. optimizer: Optimizer to update parameters. The model should be attached to the optimizer. converter: Converter function to build input arrays. Each batch extracted by the iterator is split equally between the devices and then passed with corresponding ``device`` option to this function. :func:`~chainer.dataset.concat_examples` is used by default. devices: Dictionary or list of devices to which the training data is sent. The master device will be the first one in the list or the value attached to the key ``'main'``. auto_new_epoch (bool): If ``True``, :meth:`~chainer.Optimizer.new_epoch` of the main optimizer is automatically called when the ``is_new_epoch`` attribute of the main iterator is ``True``. """ def __init__(self, iterators, optimizer, converter=convert.concat_examples, devices=None, auto_new_epoch=True): if not MultiprocessParallelUpdater.available(): raise Exception( 'NCCL is not enabled. MultiprocessParallelUpdater ' 'requires NCCL.\n' 'Please reinstall CuPy after you install NCCL.\n' '(see https://docs-cupy.chainer.org/en/latest/install.html)') try: cuda.cupy.cuda.driver.ctxGetCurrent() _cuda_initialized = True except cuda.cupy.cuda.driver.CUDADriverError: # The context is not initialized, it will be fine. _cuda_initialized = False if _cuda_initialized: raise RuntimeError( 'The CUDA context has been already initialized. ' 'MultiprocessParallelUpdater assumes the context is ' 'uninitialized. Please do not call CUDA API before ' 'MultiprocessParallelUpdater creates processes.') assert len(iterators) == len(devices) for iterator in iterators[1:]: assert len(iterator.dataset) == len(iterators[0].dataset) # Correct optimizer parameters for new minibatch size optim = optimizer.__class__.__name__ if optim in ('Adam', 'AdaGrad', 'RMSprop'): optimizer.eps *= len(devices) warnings.warn('optimizer.eps is changed to {} ' 'by MultiprocessParallelUpdater for new batch size.'. format(optimizer.eps)) elif optim in ('RMSpropGraves', 'AdaDelta'): optimizer.eps *= len(devices) ** 2 # not quite right for AdaDelta warnings.warn('optimizer.eps is changed to {} ' 'by MultiprocessParallelUpdater for new batch size.'. format(optimizer.eps)) elif hasattr(optimizer, 'lr'): optimizer.lr /= len(devices) warnings.warn('optimizer.lr is changed to {} ' 'by MultiprocessParallelUpdater for new batch size.'. format(optimizer.lr)) super(MultiprocessParallelUpdater, self).__init__( iterator=iterators[0], optimizer=optimizer, converter=converter, auto_new_epoch=auto_new_epoch, ) if isinstance(devices, dict): devices = devices.copy() main = devices.pop('main') devices = list(six.itervalues(devices)) devices = [main] + devices elif isinstance(devices, (list, tuple)): devices = list(devices) else: raise ValueError( 'devices argument should be either dict, list or tuple,' ' but {} was given.'.format(type(devices))) if devices is None or any(device is None for device in devices): raise ValueError('must specify GPU devices') self._master = optimizer.target self._devices = devices self._mpu_iterators = iterators self._initialized = False self._pipes = [] self._workers = [] self.comm = None @staticmethod def available(): return _available def _send_message(self, message): for pipe in self._pipes: pipe.send(message) def setup_workers(self): if self._initialized: return self._initialized = True self._master.cleargrads() for i in six.moves.range(1, len(self._devices)): pipe, worker_end = multiprocessing.Pipe() worker = _Worker(i, worker_end, self) worker.start() self._workers.append(worker) self._pipes.append(pipe) with cuda.Device(self._devices[0]): self._master.to_gpu(self._devices[0]) if len(self._devices) > 1: comm_id = nccl.get_unique_id() self._send_message(('set comm_id', comm_id)) self.comm = nccl.NcclCommunicator(len(self._devices), comm_id, 0) def update_core(self): self.setup_workers() self._send_message(('update', None)) with cuda.Device(self._devices[0]): # For reducing memory self._master.cleargrads() optimizer = self.get_optimizer('main') iterator = self.get_iterator('main') batch = iterator.next() batch = self.converter(batch, self._devices[0]) loss = _calc_loss(self._master, batch) self._master.cleargrads() loss.backward() # NCCL: reduce grads null_stream = cuda.Stream.null if self.comm is not None: gg = gather_grads(self._master) nccl_data_type = _get_nccl_data_type(gg.dtype) self.comm.reduce(gg.data.ptr, gg.data.ptr, gg.size, nccl_data_type, nccl.NCCL_SUM, 0, null_stream.ptr) scatter_grads(self._master, gg) del gg optimizer.update() if self.comm is not None: gp = gather_params(self._master) nccl_data_type = _get_nccl_data_type(gp.dtype) self.comm.bcast(gp.data.ptr, gp.size, nccl_data_type, 0, null_stream.ptr) if self.auto_new_epoch and iterator.is_new_epoch: optimizer.new_epoch(auto=True) def finalize(self): self._send_message(('finalize', None)) for worker in self._workers: worker.join() def _calc_loss(model, in_arrays): if isinstance(in_arrays, tuple): return model(*in_arrays) elif isinstance(in_arrays, dict): return model(**in_arrays) else: return model(in_arrays) def size_num_grads(link): """Count total size of all gradient arrays of a given link Args: link (chainer.link.Link): Target link object. """ size = 0 num = 0 for param in link.params(): if param.size == 0: continue size += param.size num += 1 return size, num def _memcpy_gather(): return cuda.elementwise( 'raw T ptrs, raw X dtypes, raw Y info', 'raw float32 dst', ''' int id_min = id_pre; int id_max = num_src; while (id_max - id_min > 1) { int id = (id_max + id_min) / 2; if (i < info[id]) id_max = id; else id_min = id; } int id = id_min; int i_dst = i; int i_src = i; if (id > 0) i_src -= info[id]; dst[i_dst] = 0; if (ptrs[id] != NULL) { if (dtypes[id] == 0) { // fp32 float *src = reinterpret_cast<float *>(ptrs[id]); dst[i_dst] = src[i_src]; } else { // fp16 float16 *src = reinterpret_cast<float16 *>(ptrs[id]); dst[i_dst] = static_cast<float>(src[i_src]); } } id_pre = id; ''', '_memcpy_gather', loop_prep=''' int num_src = info[0]; int id_pre = 0; ''') def _gather(link, target): size, num = size_num_grads(link) ptrs = numpy.empty(num, dtype=numpy.uint64) dtypes = numpy.empty(num, dtype=numpy.int8) info = numpy.empty(num + 1, dtype=numpy.int32) info[0] = 0 i = 0 for _, param in sorted(link.namedparams()): if param.size == 0: continue ptrs[i] = 0 # NULL pointer d = getattr(param, target) if d is not None: ptrs[i] = d.data.ptr dtypes[i] = 0 # fp32 if param.dtype == numpy.float16: dtypes[i] = 1 # fp16 info[i + 1] = info[i] + param.size i += 1 info[0] = num ptrs = cuda.to_gpu(ptrs) dtypes = cuda.to_gpu(dtypes) info = cuda.to_gpu(info) return _memcpy_gather()(ptrs, dtypes, info, size=size) def gather_grads(link): """Put together all gradient arrays and make a single array Args: link (chainer.link.Link): Target link object. Return: cupy.ndarray """ if link.xp is numpy: raise RuntimeError('gather_grads works only on GPU.') return _gather(link, 'grad') def gather_params(link): """Put together all gradient arrays and make a single array Args: link (chainer.link.Link): Target link object. Return: cupy.ndarray """ if link.xp is numpy: raise RuntimeError('Link.gather_params works only on GPU.') return _gather(link, 'data') def _memcpy_scatter(): return cuda.elementwise( 'raw T ptrs, raw X dtypes, raw Y info, raw float32 array', '', ''' int id_min = id_pre; int id_max = num_src; while (id_max - id_min > 1) { int id = (id_max + id_min) / 2; if (i < info[id]) id_max = id; else id_min = id; } int id = id_min; int i_src = i; int i_dst = i; if (id > 0) i_dst -= info[id]; if (ptrs[id] != NULL) { if (dtypes[id] == 0) { // fp32 float *dst = reinterpret_cast<float *>(ptrs[id]); dst[i_dst] = array[i_src]; } else { // fp16 float16 *dst = reinterpret_cast<float16 *>(ptrs[id]); dst[i_dst] = static_cast<float16>(array[i_src]); } } id_pre = id; ''', '_memcpy_scatter', loop_prep=''' int num_src = info[0]; int id_pre = 0; ''') def _scatter(link, array, target): size, num = size_num_grads(link) ptrs = numpy.zeros(num, dtype=numpy.uint64) dtypes = numpy.zeros(num, dtype=numpy.int8) info = numpy.zeros(num + 1, dtype=numpy.int32) info[0] = 0 i = 0 for _, param in sorted(link.namedparams()): if param.size == 0: continue ptrs[i] = 0 # NULL pointer d = getattr(param, target) if d is None: d = cuda.cupy.zeros(param.shape, dtype=param.dtype) setattr(param, target, d) ptrs[i] = d.data.ptr dtypes[i] = 0 # fp32 if param.dtype == numpy.float16: dtypes[i] = 1 # fp16 info[i + 1] = info[i] + param.size i += 1 if i != num: raise() info[0] = num ptrs = cuda.to_gpu(ptrs) dtypes = cuda.to_gpu(dtypes) info = cuda.to_gpu(info) return _memcpy_scatter()(ptrs, dtypes, info, array, size=size) def scatter_grads(link, array): """Put back contents of the specified array to the related gradient arrays Args: link (chainer.link.Link): Target link object. array (cupy.ndarray): gathered array created by gather_grads() """ return _scatter(link, array, 'grad') def scatter_params(link, array): """Put back contents of the specified array to the related gradient arrays Args: link (chainer.link.Link): Target link object. array (cupy.ndarray): gathered array created by gather_params() """ return _scatter(link, array, 'data') def _get_nccl_data_type(dtype): """Get data type for NCCL""" if dtype == numpy.float32: nccl_data_type = nccl.NCCL_FLOAT elif dtype == numpy.float16: nccl_data_type = nccl.NCCL_HALF elif dtype == numpy.float64: nccl_data_type = nccl.NCCL_DOUBLE else: raise RuntimeError('Unexpected data type:{}'.format(dtype)) return nccl_data_type
get_heaviest_peak
Returns: the header_hash, height, and weight of the heaviest block that one of our peers has notified us of.
import asyncio import logging from typing import Dict, List, Optional, Set, Tuple from seno.types.blockchain_format.sized_bytes import bytes32 from seno.util.ints import uint32, uint128 log = logging.getLogger(__name__) class SyncStore: # Whether or not we are syncing sync_mode: bool long_sync: bool peak_to_peer: Dict[bytes32, Set[bytes32]] # Header hash : peer node id peer_to_peak: Dict[bytes32, Tuple[bytes32, uint32, uint128]] # peer node id : [header_hash, height, weight] sync_target_header_hash: Optional[bytes32] # Peak hash we are syncing towards sync_target_height: Optional[uint32] # Peak height we are syncing towards peers_changed: asyncio.Event batch_syncing: Set[bytes32] # Set of nodes which we are batch syncing from backtrack_syncing: Dict[bytes32, int] # Set of nodes which we are backtrack syncing from, and how many threads @classmethod async def create(cls): self = cls() self.sync_mode = False self.long_sync = False self.sync_target_header_hash = None self.sync_target_height = None self.peak_fork_point = {} self.peak_to_peer = {} self.peer_to_peak = {} self.peers_changed = asyncio.Event() self.batch_syncing = set() self.backtrack_syncing = {} return self def set_peak_target(self, peak_hash: bytes32, target_height: uint32): self.sync_target_header_hash = peak_hash self.sync_target_height = target_height def get_sync_target_hash(self) -> Optional[bytes32]: return self.sync_target_header_hash def get_sync_target_height(self) -> Optional[bytes32]: return self.sync_target_height def set_sync_mode(self, sync_mode: bool): self.sync_mode = sync_mode def get_sync_mode(self) -> bool: return self.sync_mode def set_long_sync(self, long_sync: bool): self.long_sync = long_sync def get_long_sync(self) -> bool: return self.long_sync def peer_has_block(self, header_hash: bytes32, peer_id: bytes32, weight: uint128, height: uint32, new_peak: bool): """ Adds a record that a certain peer has a block. """ if header_hash == self.sync_target_header_hash: self.peers_changed.set() if header_hash in self.peak_to_peer: self.peak_to_peer[header_hash].add(peer_id) else: self.peak_to_peer[header_hash] = {peer_id} if new_peak: self.peer_to_peak[peer_id] = (header_hash, height, weight) def get_peers_that_have_peak(self, header_hashes: List[bytes32]) -> Set[bytes32]: """ Returns: peer ids of peers that have at least one of the header hashes. """ node_ids: Set[bytes32] = set() for header_hash in header_hashes: if header_hash in self.peak_to_peer: for node_id in self.peak_to_peer[header_hash]: node_ids.add(node_id) return node_ids def get_peak_of_each_peer(self) -> Dict[bytes32, Tuple[bytes32, uint32, uint128]]: """ Returns: dictionary of peer id to peak information. """ ret = {} for peer_id, v in self.peer_to_peak.items(): if v[0] not in self.peak_to_peer: continue ret[peer_id] = v return ret # MASKED: get_heaviest_peak function (lines 101-120) async def clear_sync_info(self): """ Clears the peak_to_peer info which can get quite large. """ self.peak_to_peer = {} def peer_disconnected(self, node_id: bytes32): if node_id in self.peer_to_peak: del self.peer_to_peak[node_id] for peak, peers in self.peak_to_peer.items(): if node_id in peers: self.peak_to_peer[peak].remove(node_id) assert node_id not in self.peak_to_peer[peak] self.peers_changed.set()
def get_heaviest_peak(self) -> Optional[Tuple[bytes32, uint32, uint128]]: """ Returns: the header_hash, height, and weight of the heaviest block that one of our peers has notified us of. """ if len(self.peer_to_peak) == 0: return None heaviest_peak_hash: Optional[bytes32] = None heaviest_peak_weight: uint128 = uint128(0) heaviest_peak_height: Optional[uint32] = None for peer_id, (peak_hash, height, weight) in self.peer_to_peak.items(): if peak_hash not in self.peak_to_peer: continue if heaviest_peak_hash is None or weight > heaviest_peak_weight: heaviest_peak_hash = peak_hash heaviest_peak_weight = weight heaviest_peak_height = height assert heaviest_peak_hash is not None and heaviest_peak_weight is not None and heaviest_peak_height is not None return heaviest_peak_hash, heaviest_peak_height, heaviest_peak_weight
101
120
import asyncio import logging from typing import Dict, List, Optional, Set, Tuple from seno.types.blockchain_format.sized_bytes import bytes32 from seno.util.ints import uint32, uint128 log = logging.getLogger(__name__) class SyncStore: # Whether or not we are syncing sync_mode: bool long_sync: bool peak_to_peer: Dict[bytes32, Set[bytes32]] # Header hash : peer node id peer_to_peak: Dict[bytes32, Tuple[bytes32, uint32, uint128]] # peer node id : [header_hash, height, weight] sync_target_header_hash: Optional[bytes32] # Peak hash we are syncing towards sync_target_height: Optional[uint32] # Peak height we are syncing towards peers_changed: asyncio.Event batch_syncing: Set[bytes32] # Set of nodes which we are batch syncing from backtrack_syncing: Dict[bytes32, int] # Set of nodes which we are backtrack syncing from, and how many threads @classmethod async def create(cls): self = cls() self.sync_mode = False self.long_sync = False self.sync_target_header_hash = None self.sync_target_height = None self.peak_fork_point = {} self.peak_to_peer = {} self.peer_to_peak = {} self.peers_changed = asyncio.Event() self.batch_syncing = set() self.backtrack_syncing = {} return self def set_peak_target(self, peak_hash: bytes32, target_height: uint32): self.sync_target_header_hash = peak_hash self.sync_target_height = target_height def get_sync_target_hash(self) -> Optional[bytes32]: return self.sync_target_header_hash def get_sync_target_height(self) -> Optional[bytes32]: return self.sync_target_height def set_sync_mode(self, sync_mode: bool): self.sync_mode = sync_mode def get_sync_mode(self) -> bool: return self.sync_mode def set_long_sync(self, long_sync: bool): self.long_sync = long_sync def get_long_sync(self) -> bool: return self.long_sync def peer_has_block(self, header_hash: bytes32, peer_id: bytes32, weight: uint128, height: uint32, new_peak: bool): """ Adds a record that a certain peer has a block. """ if header_hash == self.sync_target_header_hash: self.peers_changed.set() if header_hash in self.peak_to_peer: self.peak_to_peer[header_hash].add(peer_id) else: self.peak_to_peer[header_hash] = {peer_id} if new_peak: self.peer_to_peak[peer_id] = (header_hash, height, weight) def get_peers_that_have_peak(self, header_hashes: List[bytes32]) -> Set[bytes32]: """ Returns: peer ids of peers that have at least one of the header hashes. """ node_ids: Set[bytes32] = set() for header_hash in header_hashes: if header_hash in self.peak_to_peer: for node_id in self.peak_to_peer[header_hash]: node_ids.add(node_id) return node_ids def get_peak_of_each_peer(self) -> Dict[bytes32, Tuple[bytes32, uint32, uint128]]: """ Returns: dictionary of peer id to peak information. """ ret = {} for peer_id, v in self.peer_to_peak.items(): if v[0] not in self.peak_to_peer: continue ret[peer_id] = v return ret def get_heaviest_peak(self) -> Optional[Tuple[bytes32, uint32, uint128]]: """ Returns: the header_hash, height, and weight of the heaviest block that one of our peers has notified us of. """ if len(self.peer_to_peak) == 0: return None heaviest_peak_hash: Optional[bytes32] = None heaviest_peak_weight: uint128 = uint128(0) heaviest_peak_height: Optional[uint32] = None for peer_id, (peak_hash, height, weight) in self.peer_to_peak.items(): if peak_hash not in self.peak_to_peer: continue if heaviest_peak_hash is None or weight > heaviest_peak_weight: heaviest_peak_hash = peak_hash heaviest_peak_weight = weight heaviest_peak_height = height assert heaviest_peak_hash is not None and heaviest_peak_weight is not None and heaviest_peak_height is not None return heaviest_peak_hash, heaviest_peak_height, heaviest_peak_weight async def clear_sync_info(self): """ Clears the peak_to_peer info which can get quite large. """ self.peak_to_peer = {} def peer_disconnected(self, node_id: bytes32): if node_id in self.peer_to_peak: del self.peer_to_peak[node_id] for peak, peers in self.peak_to_peer.items(): if node_id in peers: self.peak_to_peer[peak].remove(node_id) assert node_id not in self.peak_to_peer[peak] self.peers_changed.set()
calculate_chunk_slices
Calculate slices for indexing an adapter. Parameters ---------- items_per_chunk: int Approximate number of items per chunk. num_items: int Total number of items. Returns ------- list of slices
#!/usr/bin/env python import os import re import pickle import json import glob import numpy as np from abc import ABC, abstractmethod from concurrent.futures import ProcessPoolExecutor from contextlib import contextmanager from collections import namedtuple, OrderedDict from tqdm import tqdm from .utils import img_to_jpeg_bytes, jpeg_bytes_to_img, _DEFAULT_JPEG_QUALITY from pathlib import Path #from simplejpeg import is_jpeg def is_jpeg(data): """ Check whether a bytes object (or similar) contains JPEG (JFIF) data. Returns False for truncated files. Taken from simplejpeg.is_jpeg, but less strict because it doesn't check EOI, as most JPEG viewers don't really throw error for missing EOI. :param data: JPEG (JFIF) data :return: True if JPEG """ return data[:2] == b'\xFF\xD8' ImgInfo = namedtuple('ImgInfo', ['loc', 'pad', 'length']) class FileFormatException(Exception): pass class AbstractSerializer(ABC): # pragma: no cover @abstractmethod def load(self, file_name): pass @abstractmethod def dump(self, thing, file_name): pass class PickleSerializer(AbstractSerializer): def load(self, file_name): with open(file_name, 'rb') as file_pointer: return pickle.load(file_pointer) def dump(self, thing, file_name): with open(file_name, 'wb') as file_pointer: pickle.dump(thing, file_pointer) class JSONSerializer(AbstractSerializer): def load(self, file_name): with open(file_name, 'r') as file_pointer: return json.load(file_pointer, object_pairs_hook=OrderedDict) def dump(self, thing, file_name): with open(file_name, 'w') as file_pointer: json.dump(thing, file_pointer) pickle_serializer = PickleSerializer() json_serializer = JSONSerializer() def extract_input_for_getitem(element): if isinstance(element, tuple) and len(element) == 2: id_, slice_ = element elif isinstance(element, (int, str)): id_, slice_ = element, None else: raise TypeError("Undefined input type! id or (id, slice) expected") id_ = str(id_) return id_, slice_ class GulpDirectory(object): """ Represents a directory containing *.gulp and *.gmeta files. Parameters ---------- output_dir: str Path to the directory containing the files. jpeg_decoder: callable that takes a JPEG stored as :py:class:`bytes` and returns the desired decoded image format (e.g. np.ndarray) Attributes ---------- all_meta_dicts: list of dicts All meta dicts from all chunks as a list. chunk_lookup: dict: int -> str Mapping element id to chunk index. chunk_objs_lookup: dict: int -> GulpChunk Mapping element id to chunk index. merged_meta_dict: dict: id -> meta dict all meta dicts merged """ def __init__(self, output_dir, jpeg_decoder=jpeg_bytes_to_img): self.output_dir = output_dir self.jpeg_decoder = jpeg_decoder self.chunk_objs_lookup = OrderedDict(zip(self._chunk_ids(), self._chunks())) self.all_meta_dicts = [c.meta_dict for c in self.chunk_objs_lookup.values()] self.num_chunks = len(self.chunk_objs_lookup) self.chunk_lookup = {} for chunk_id, chunk in self.chunk_objs_lookup.items(): for id_ in chunk.meta_dict: self.chunk_lookup[id_] = chunk_id self.merged_meta_dict = {} for d in self.all_meta_dicts: for k in d.keys(): assert k not in self.merged_meta_dict,\ "Duplicate id detected {}".format(k) else: self.merged_meta_dict.update(d) def __iter__(self): return iter(self.chunk_objs_lookup.values()) def chunks(self): """ Return a generator over existing GulpChunk objects which are ready to be opened and read from. """ return self.__iter__() def _chunks(self): return (GulpChunk(*paths, jpeg_decoder=self.jpeg_decoder) for paths in self._existing_file_paths()) def new_chunks(self, total_new_chunks): """ Return a generator over freshly setup GulpChunk objects which are ready to be opened and written to. Parameters ---------- total_new_chunks: int The total number of new chunks to initialize. """ return ((GulpChunk(*paths, jpeg_decoder=self.jpeg_decoder) for paths in self._allocate_new_file_paths(total_new_chunks))) def __getitem__(self, element): id_, _ = extract_input_for_getitem(element) chunk_id = self.chunk_lookup[id_] gulp_chunk = self.chunk_objs_lookup[chunk_id] with gulp_chunk.open(): return gulp_chunk[element] def _find_existing_data_paths(self): return sorted(glob.glob(os.path.join(self.output_dir, 'data*.gulp'))) def _find_existing_meta_paths(self): return sorted(glob.glob(os.path.join(self.output_dir, 'meta*.gmeta'))) def _load_label_dict(self): return json.load(open(os.path.join(self.output_dir, 'label2idx.json'), 'rb')) def _existing_file_paths(self): data_paths = self._find_existing_data_paths() meta_paths = self._find_existing_meta_paths() assert len(data_paths) == len(meta_paths) return zip(data_paths, meta_paths) def _find_ids_from_paths(self, paths): return [int(re.findall(r'\d+', os.path.basename(p))[0]) for p in paths] def _chunk_ids(self): data_paths = self._find_existing_data_paths() meta_paths = self._find_existing_meta_paths() data_ids = self._find_ids_from_paths(data_paths) meta_ids = self._find_ids_from_paths(meta_paths) assert data_ids == meta_ids return data_ids def _next_chunk_id(self): existing_chunk_ids = self._chunk_ids() next_chunk_id = 0 if len(existing_chunk_ids) > 0: next_chunk_id = max([int(i) for i in existing_chunk_ids]) + 1 return next_chunk_id def _allocate_new_file_paths(self, total_new_chunks): next_chunk_id = self._next_chunk_id() return [self._initialize_filenames(i) for i in range(next_chunk_id, next_chunk_id + total_new_chunks)] def _initialize_filenames(self, chunk_id): data_file_path = os.path.join( self.output_dir, 'data_{}.gulp'.format(chunk_id)) meta_file_path = os.path.join( self.output_dir, 'meta_{}.gmeta'.format(chunk_id)) return data_file_path, meta_file_path class GulpChunk(object): """ Represents a gulp chunk on disk. Parameters ---------- data_file_path: str Path to the *.gulp file. meta_file_path: str Path to the *.gmeta file. serializer: subclass of AbstractSerializer The type of serializer to use. jpeg_decoder: callable that takes a JPEG stored as :py:class:`bytes` and returns the desired decoded image format (e.g. np.ndarray) """ def __init__(self, data_file_path, meta_file_path, serializer=json_serializer, jpeg_decoder=jpeg_bytes_to_img): self.jpeg_decoder = jpeg_decoder self.serializer = serializer self.data_file_path = data_file_path self.meta_file_path = meta_file_path self.meta_dict = self._get_or_create_dict() self._img_info = {} self.fp = None def __contains__(self, id_): return str(id_) in self.meta_dict def __getitem__(self, element): id_, slice_ = extract_input_for_getitem(element) return self.read_frames(id_, slice_) def __iter__(self): return self.iter_all() def _get_frame_infos(self, id_): id_ = str(id_) if id_ in self.meta_dict: return (self._get_or_create_img_info(id_), self._copy_meta_data(id_)) def _copy_meta_data(self, id_): return dict(self.meta_dict[id_]['meta_data'][0]) def _get_or_create_img_info(self, id_): if id_ not in self._img_info: self._img_info[id_] = [ImgInfo(*info) for info in self.meta_dict[id_]['frame_info']] return self._img_info[id_] def _get_or_create_dict(self): if os.path.exists(self.meta_file_path): return self.serializer.load(self.meta_file_path) else: return OrderedDict() @staticmethod def _default_factory(): return OrderedDict([('frame_info', []), ('meta_data', [])]) @staticmethod def _pad_image(number): return (4 - (number % 4)) % 4 def _append_meta(self, id_, meta_data): id_ = str(id_) if id_ not in self.meta_dict: # implements an OrderedDefaultDict self.meta_dict[id_] = self._default_factory() self.meta_dict[id_]['meta_data'].append(meta_data) def _write_frame(self, id_, image, jpeg_encode_quality=_DEFAULT_JPEG_QUALITY): loc = self.fp.tell() if isinstance(image, (str, Path)): # If image is a string or pathlib Path, assume that it is a path to a jpeg file # and add it directly without decoding and encoding it. with open(str(image), 'rb') as image_file: img_str = image_file.read() if not is_jpeg(img_str): raise FileFormatException(f'Image file from path {image} does not appear to be a JPEG file.') else: # np.array img_str = img_to_jpeg_bytes(image, jpeg_encode_quality) assert len(img_str) > 0 pad = self._pad_image(len(img_str)) record = img_str.ljust(len(img_str) + pad, b'\0') assert len(record) > 0 img_info = ImgInfo(loc=loc, length=len(record), pad=pad) id_ = str(id_) if id_ not in self.meta_dict: # implements an OrderedDefaultDict self.meta_dict[id_] = self._default_factory() self.meta_dict[id_]['frame_info'].append(img_info) self.fp.write(record) def _write_frames(self, id_, frames, jpeg_encode_quality=_DEFAULT_JPEG_QUALITY): for frame in frames: self._write_frame(id_, frame, jpeg_encode_quality) @contextmanager def open(self, flag='rb'): """Open the gulp chunk for reading. Parameters ---------- flag: str 'rb': Read binary 'wb': Write binary 'ab': Append to binary Notes ----- Works as a context manager but returns None. """ if flag in ['wb', 'rb', 'ab']: self.fp = open(self.data_file_path, flag) else: m = "This file does not support the mode: '{}'".format(flag) raise NotImplementedError(m) yield if flag in ['wb', 'ab']: self.flush() self.fp.close() def flush(self): """Flush all buffers and write the meta file.""" self.fp.flush() self.serializer.dump(self.meta_dict, self.meta_file_path) def append(self, id_, meta_data, frames, jpeg_encode_quality=_DEFAULT_JPEG_QUALITY): """ Append an item to the gulp. Parameters ---------- id_ : str The ID of the item meta_data: dict The meta-data associated with the item. frames: list of numpy arrays The frames of the item as a list of numpy dictionaries consisting of image pixel values. """ self._append_meta(id_, meta_data) self._write_frames(id_, frames, jpeg_encode_quality=jpeg_encode_quality) def read_frames(self, id_, slice_=None): """ Read frames for a single item. Parameters ---------- id_: str The ID of the item slice_: slice or list of ints: A slice or list of indices with which to select frames. Returns ------- frames (int), meta(dict) The frames of the item as a list of numpy arrays consisting of image pixel values. And the metadata. """ frame_infos, meta_data = self._get_frame_infos(id_) slice_element = slice_ if slice_ is not None else slice(0, len(frame_infos)) def extract_frame(frame_info): self.fp.seek(frame_info.loc) record = self.fp.read(frame_info.length) img_str = record[:len(record)-frame_info.pad] img = self.jpeg_decoder(img_str) return img if isinstance(slice_element, (list, np.ndarray)): selected_frame_infos = [frame_infos[idx] for idx in slice_element] else: selected_frame_infos = frame_infos[slice_element] frames = [extract_frame(frame_info) for frame_info in selected_frame_infos] return frames, meta_data def iter_all(self, accepted_ids=None, shuffle=False): """ Iterate over all frames in the gulp. Parameters ---------- accepted_ids: list of str A filter for accepted ids. shuffle: bool Shuffle the items or not. Returns ------- iterator An iterator that yield a series of frames,meta tuples. See `read_frames` for details. """ ids = self.meta_dict.keys() if accepted_ids is not None: intersection = list(set(ids) & set(accepted_ids)) ids = [id_ for id_ in ids if id_ in intersection] if shuffle: ids = list(ids) np.random.shuffle(ids) with self.open('rb'): for id_ in ids: frames, meta = self.read_frames(id_) yield frames, meta class ChunkWriter(object): """Can write from an adapter to a gulp chunk. Parameters ---------- adapter: subclass of AbstractDatasetAdapter The adapter to get items from. """ def __init__(self, adapter): self.adapter = adapter def write_chunk(self, output_chunk, input_slice): """Write from an input slice in the adapter to an output chunk. Parameters ---------- output_chunk: GulpChunk The chunk to write to input_slice: slice The slice to use from the adapter. """ with output_chunk.open('wb'): for video in self.adapter.iter_data(input_slice): id_ = video['id'] meta_data = video['meta'] frames = video['frames'] if len(frames) > 0: output_chunk.append(id_, meta_data, frames, self.adapter.jpeg_encode_quality()) else: print("Failed to write video with id: {}; no frames" .format(id_)) # MASKED: calculate_chunk_slices function (lines 462-480) class GulpIngestor(object): """Ingest items from an adapter into an gulp chunks. Parameters ---------- adapter: subclass of AbstractDatasetAdapter The adapter to ingest from. output_folder: str The folder/directory to write to. videos_per_chunk: int The total number of items per chunk. num_workers: int The level of parallelism. """ def __init__(self, adapter, output_folder, videos_per_chunk, num_workers): assert int(num_workers) > 0 self.adapter = adapter self.output_folder = output_folder self.videos_per_chunk = int(videos_per_chunk) self.num_workers = int(num_workers) def __call__(self): os.makedirs(self.output_folder, exist_ok=True) chunk_slices = calculate_chunk_slices(self.videos_per_chunk, len(self.adapter)) gulp_directory = GulpDirectory(self.output_folder) new_chunks = gulp_directory.new_chunks(len(chunk_slices)) chunk_writer = ChunkWriter(self.adapter) with ProcessPoolExecutor(max_workers=self.num_workers) as executor: result = executor.map(chunk_writer.write_chunk, new_chunks, chunk_slices) for r in tqdm(result, desc='Chunks finished', unit='chunk', dynamic_ncols=True, total=len(chunk_slices)): pass
def calculate_chunk_slices(items_per_chunk, num_items): """Calculate slices for indexing an adapter. Parameters ---------- items_per_chunk: int Approximate number of items per chunk. num_items: int Total number of items. Returns ------- list of slices """ assert items_per_chunk > 0 assert num_items > 0 return [slice(i, min(i + items_per_chunk, num_items)) for i in range(0, num_items, items_per_chunk)]
462
480
#!/usr/bin/env python import os import re import pickle import json import glob import numpy as np from abc import ABC, abstractmethod from concurrent.futures import ProcessPoolExecutor from contextlib import contextmanager from collections import namedtuple, OrderedDict from tqdm import tqdm from .utils import img_to_jpeg_bytes, jpeg_bytes_to_img, _DEFAULT_JPEG_QUALITY from pathlib import Path #from simplejpeg import is_jpeg def is_jpeg(data): """ Check whether a bytes object (or similar) contains JPEG (JFIF) data. Returns False for truncated files. Taken from simplejpeg.is_jpeg, but less strict because it doesn't check EOI, as most JPEG viewers don't really throw error for missing EOI. :param data: JPEG (JFIF) data :return: True if JPEG """ return data[:2] == b'\xFF\xD8' ImgInfo = namedtuple('ImgInfo', ['loc', 'pad', 'length']) class FileFormatException(Exception): pass class AbstractSerializer(ABC): # pragma: no cover @abstractmethod def load(self, file_name): pass @abstractmethod def dump(self, thing, file_name): pass class PickleSerializer(AbstractSerializer): def load(self, file_name): with open(file_name, 'rb') as file_pointer: return pickle.load(file_pointer) def dump(self, thing, file_name): with open(file_name, 'wb') as file_pointer: pickle.dump(thing, file_pointer) class JSONSerializer(AbstractSerializer): def load(self, file_name): with open(file_name, 'r') as file_pointer: return json.load(file_pointer, object_pairs_hook=OrderedDict) def dump(self, thing, file_name): with open(file_name, 'w') as file_pointer: json.dump(thing, file_pointer) pickle_serializer = PickleSerializer() json_serializer = JSONSerializer() def extract_input_for_getitem(element): if isinstance(element, tuple) and len(element) == 2: id_, slice_ = element elif isinstance(element, (int, str)): id_, slice_ = element, None else: raise TypeError("Undefined input type! id or (id, slice) expected") id_ = str(id_) return id_, slice_ class GulpDirectory(object): """ Represents a directory containing *.gulp and *.gmeta files. Parameters ---------- output_dir: str Path to the directory containing the files. jpeg_decoder: callable that takes a JPEG stored as :py:class:`bytes` and returns the desired decoded image format (e.g. np.ndarray) Attributes ---------- all_meta_dicts: list of dicts All meta dicts from all chunks as a list. chunk_lookup: dict: int -> str Mapping element id to chunk index. chunk_objs_lookup: dict: int -> GulpChunk Mapping element id to chunk index. merged_meta_dict: dict: id -> meta dict all meta dicts merged """ def __init__(self, output_dir, jpeg_decoder=jpeg_bytes_to_img): self.output_dir = output_dir self.jpeg_decoder = jpeg_decoder self.chunk_objs_lookup = OrderedDict(zip(self._chunk_ids(), self._chunks())) self.all_meta_dicts = [c.meta_dict for c in self.chunk_objs_lookup.values()] self.num_chunks = len(self.chunk_objs_lookup) self.chunk_lookup = {} for chunk_id, chunk in self.chunk_objs_lookup.items(): for id_ in chunk.meta_dict: self.chunk_lookup[id_] = chunk_id self.merged_meta_dict = {} for d in self.all_meta_dicts: for k in d.keys(): assert k not in self.merged_meta_dict,\ "Duplicate id detected {}".format(k) else: self.merged_meta_dict.update(d) def __iter__(self): return iter(self.chunk_objs_lookup.values()) def chunks(self): """ Return a generator over existing GulpChunk objects which are ready to be opened and read from. """ return self.__iter__() def _chunks(self): return (GulpChunk(*paths, jpeg_decoder=self.jpeg_decoder) for paths in self._existing_file_paths()) def new_chunks(self, total_new_chunks): """ Return a generator over freshly setup GulpChunk objects which are ready to be opened and written to. Parameters ---------- total_new_chunks: int The total number of new chunks to initialize. """ return ((GulpChunk(*paths, jpeg_decoder=self.jpeg_decoder) for paths in self._allocate_new_file_paths(total_new_chunks))) def __getitem__(self, element): id_, _ = extract_input_for_getitem(element) chunk_id = self.chunk_lookup[id_] gulp_chunk = self.chunk_objs_lookup[chunk_id] with gulp_chunk.open(): return gulp_chunk[element] def _find_existing_data_paths(self): return sorted(glob.glob(os.path.join(self.output_dir, 'data*.gulp'))) def _find_existing_meta_paths(self): return sorted(glob.glob(os.path.join(self.output_dir, 'meta*.gmeta'))) def _load_label_dict(self): return json.load(open(os.path.join(self.output_dir, 'label2idx.json'), 'rb')) def _existing_file_paths(self): data_paths = self._find_existing_data_paths() meta_paths = self._find_existing_meta_paths() assert len(data_paths) == len(meta_paths) return zip(data_paths, meta_paths) def _find_ids_from_paths(self, paths): return [int(re.findall(r'\d+', os.path.basename(p))[0]) for p in paths] def _chunk_ids(self): data_paths = self._find_existing_data_paths() meta_paths = self._find_existing_meta_paths() data_ids = self._find_ids_from_paths(data_paths) meta_ids = self._find_ids_from_paths(meta_paths) assert data_ids == meta_ids return data_ids def _next_chunk_id(self): existing_chunk_ids = self._chunk_ids() next_chunk_id = 0 if len(existing_chunk_ids) > 0: next_chunk_id = max([int(i) for i in existing_chunk_ids]) + 1 return next_chunk_id def _allocate_new_file_paths(self, total_new_chunks): next_chunk_id = self._next_chunk_id() return [self._initialize_filenames(i) for i in range(next_chunk_id, next_chunk_id + total_new_chunks)] def _initialize_filenames(self, chunk_id): data_file_path = os.path.join( self.output_dir, 'data_{}.gulp'.format(chunk_id)) meta_file_path = os.path.join( self.output_dir, 'meta_{}.gmeta'.format(chunk_id)) return data_file_path, meta_file_path class GulpChunk(object): """ Represents a gulp chunk on disk. Parameters ---------- data_file_path: str Path to the *.gulp file. meta_file_path: str Path to the *.gmeta file. serializer: subclass of AbstractSerializer The type of serializer to use. jpeg_decoder: callable that takes a JPEG stored as :py:class:`bytes` and returns the desired decoded image format (e.g. np.ndarray) """ def __init__(self, data_file_path, meta_file_path, serializer=json_serializer, jpeg_decoder=jpeg_bytes_to_img): self.jpeg_decoder = jpeg_decoder self.serializer = serializer self.data_file_path = data_file_path self.meta_file_path = meta_file_path self.meta_dict = self._get_or_create_dict() self._img_info = {} self.fp = None def __contains__(self, id_): return str(id_) in self.meta_dict def __getitem__(self, element): id_, slice_ = extract_input_for_getitem(element) return self.read_frames(id_, slice_) def __iter__(self): return self.iter_all() def _get_frame_infos(self, id_): id_ = str(id_) if id_ in self.meta_dict: return (self._get_or_create_img_info(id_), self._copy_meta_data(id_)) def _copy_meta_data(self, id_): return dict(self.meta_dict[id_]['meta_data'][0]) def _get_or_create_img_info(self, id_): if id_ not in self._img_info: self._img_info[id_] = [ImgInfo(*info) for info in self.meta_dict[id_]['frame_info']] return self._img_info[id_] def _get_or_create_dict(self): if os.path.exists(self.meta_file_path): return self.serializer.load(self.meta_file_path) else: return OrderedDict() @staticmethod def _default_factory(): return OrderedDict([('frame_info', []), ('meta_data', [])]) @staticmethod def _pad_image(number): return (4 - (number % 4)) % 4 def _append_meta(self, id_, meta_data): id_ = str(id_) if id_ not in self.meta_dict: # implements an OrderedDefaultDict self.meta_dict[id_] = self._default_factory() self.meta_dict[id_]['meta_data'].append(meta_data) def _write_frame(self, id_, image, jpeg_encode_quality=_DEFAULT_JPEG_QUALITY): loc = self.fp.tell() if isinstance(image, (str, Path)): # If image is a string or pathlib Path, assume that it is a path to a jpeg file # and add it directly without decoding and encoding it. with open(str(image), 'rb') as image_file: img_str = image_file.read() if not is_jpeg(img_str): raise FileFormatException(f'Image file from path {image} does not appear to be a JPEG file.') else: # np.array img_str = img_to_jpeg_bytes(image, jpeg_encode_quality) assert len(img_str) > 0 pad = self._pad_image(len(img_str)) record = img_str.ljust(len(img_str) + pad, b'\0') assert len(record) > 0 img_info = ImgInfo(loc=loc, length=len(record), pad=pad) id_ = str(id_) if id_ not in self.meta_dict: # implements an OrderedDefaultDict self.meta_dict[id_] = self._default_factory() self.meta_dict[id_]['frame_info'].append(img_info) self.fp.write(record) def _write_frames(self, id_, frames, jpeg_encode_quality=_DEFAULT_JPEG_QUALITY): for frame in frames: self._write_frame(id_, frame, jpeg_encode_quality) @contextmanager def open(self, flag='rb'): """Open the gulp chunk for reading. Parameters ---------- flag: str 'rb': Read binary 'wb': Write binary 'ab': Append to binary Notes ----- Works as a context manager but returns None. """ if flag in ['wb', 'rb', 'ab']: self.fp = open(self.data_file_path, flag) else: m = "This file does not support the mode: '{}'".format(flag) raise NotImplementedError(m) yield if flag in ['wb', 'ab']: self.flush() self.fp.close() def flush(self): """Flush all buffers and write the meta file.""" self.fp.flush() self.serializer.dump(self.meta_dict, self.meta_file_path) def append(self, id_, meta_data, frames, jpeg_encode_quality=_DEFAULT_JPEG_QUALITY): """ Append an item to the gulp. Parameters ---------- id_ : str The ID of the item meta_data: dict The meta-data associated with the item. frames: list of numpy arrays The frames of the item as a list of numpy dictionaries consisting of image pixel values. """ self._append_meta(id_, meta_data) self._write_frames(id_, frames, jpeg_encode_quality=jpeg_encode_quality) def read_frames(self, id_, slice_=None): """ Read frames for a single item. Parameters ---------- id_: str The ID of the item slice_: slice or list of ints: A slice or list of indices with which to select frames. Returns ------- frames (int), meta(dict) The frames of the item as a list of numpy arrays consisting of image pixel values. And the metadata. """ frame_infos, meta_data = self._get_frame_infos(id_) slice_element = slice_ if slice_ is not None else slice(0, len(frame_infos)) def extract_frame(frame_info): self.fp.seek(frame_info.loc) record = self.fp.read(frame_info.length) img_str = record[:len(record)-frame_info.pad] img = self.jpeg_decoder(img_str) return img if isinstance(slice_element, (list, np.ndarray)): selected_frame_infos = [frame_infos[idx] for idx in slice_element] else: selected_frame_infos = frame_infos[slice_element] frames = [extract_frame(frame_info) for frame_info in selected_frame_infos] return frames, meta_data def iter_all(self, accepted_ids=None, shuffle=False): """ Iterate over all frames in the gulp. Parameters ---------- accepted_ids: list of str A filter for accepted ids. shuffle: bool Shuffle the items or not. Returns ------- iterator An iterator that yield a series of frames,meta tuples. See `read_frames` for details. """ ids = self.meta_dict.keys() if accepted_ids is not None: intersection = list(set(ids) & set(accepted_ids)) ids = [id_ for id_ in ids if id_ in intersection] if shuffle: ids = list(ids) np.random.shuffle(ids) with self.open('rb'): for id_ in ids: frames, meta = self.read_frames(id_) yield frames, meta class ChunkWriter(object): """Can write from an adapter to a gulp chunk. Parameters ---------- adapter: subclass of AbstractDatasetAdapter The adapter to get items from. """ def __init__(self, adapter): self.adapter = adapter def write_chunk(self, output_chunk, input_slice): """Write from an input slice in the adapter to an output chunk. Parameters ---------- output_chunk: GulpChunk The chunk to write to input_slice: slice The slice to use from the adapter. """ with output_chunk.open('wb'): for video in self.adapter.iter_data(input_slice): id_ = video['id'] meta_data = video['meta'] frames = video['frames'] if len(frames) > 0: output_chunk.append(id_, meta_data, frames, self.adapter.jpeg_encode_quality()) else: print("Failed to write video with id: {}; no frames" .format(id_)) def calculate_chunk_slices(items_per_chunk, num_items): """Calculate slices for indexing an adapter. Parameters ---------- items_per_chunk: int Approximate number of items per chunk. num_items: int Total number of items. Returns ------- list of slices """ assert items_per_chunk > 0 assert num_items > 0 return [slice(i, min(i + items_per_chunk, num_items)) for i in range(0, num_items, items_per_chunk)] class GulpIngestor(object): """Ingest items from an adapter into an gulp chunks. Parameters ---------- adapter: subclass of AbstractDatasetAdapter The adapter to ingest from. output_folder: str The folder/directory to write to. videos_per_chunk: int The total number of items per chunk. num_workers: int The level of parallelism. """ def __init__(self, adapter, output_folder, videos_per_chunk, num_workers): assert int(num_workers) > 0 self.adapter = adapter self.output_folder = output_folder self.videos_per_chunk = int(videos_per_chunk) self.num_workers = int(num_workers) def __call__(self): os.makedirs(self.output_folder, exist_ok=True) chunk_slices = calculate_chunk_slices(self.videos_per_chunk, len(self.adapter)) gulp_directory = GulpDirectory(self.output_folder) new_chunks = gulp_directory.new_chunks(len(chunk_slices)) chunk_writer = ChunkWriter(self.adapter) with ProcessPoolExecutor(max_workers=self.num_workers) as executor: result = executor.map(chunk_writer.write_chunk, new_chunks, chunk_slices) for r in tqdm(result, desc='Chunks finished', unit='chunk', dynamic_ncols=True, total=len(chunk_slices)): pass
iter_all
Iterate over all frames in the gulp. Parameters ---------- accepted_ids: list of str A filter for accepted ids. shuffle: bool Shuffle the items or not. Returns ------- iterator An iterator that yield a series of frames,meta tuples. See `read_frames` for details.
#!/usr/bin/env python import os import re import pickle import json import glob import numpy as np from abc import ABC, abstractmethod from concurrent.futures import ProcessPoolExecutor from contextlib import contextmanager from collections import namedtuple, OrderedDict from tqdm import tqdm from .utils import img_to_jpeg_bytes, jpeg_bytes_to_img, _DEFAULT_JPEG_QUALITY from pathlib import Path #from simplejpeg import is_jpeg def is_jpeg(data): """ Check whether a bytes object (or similar) contains JPEG (JFIF) data. Returns False for truncated files. Taken from simplejpeg.is_jpeg, but less strict because it doesn't check EOI, as most JPEG viewers don't really throw error for missing EOI. :param data: JPEG (JFIF) data :return: True if JPEG """ return data[:2] == b'\xFF\xD8' ImgInfo = namedtuple('ImgInfo', ['loc', 'pad', 'length']) class FileFormatException(Exception): pass class AbstractSerializer(ABC): # pragma: no cover @abstractmethod def load(self, file_name): pass @abstractmethod def dump(self, thing, file_name): pass class PickleSerializer(AbstractSerializer): def load(self, file_name): with open(file_name, 'rb') as file_pointer: return pickle.load(file_pointer) def dump(self, thing, file_name): with open(file_name, 'wb') as file_pointer: pickle.dump(thing, file_pointer) class JSONSerializer(AbstractSerializer): def load(self, file_name): with open(file_name, 'r') as file_pointer: return json.load(file_pointer, object_pairs_hook=OrderedDict) def dump(self, thing, file_name): with open(file_name, 'w') as file_pointer: json.dump(thing, file_pointer) pickle_serializer = PickleSerializer() json_serializer = JSONSerializer() def extract_input_for_getitem(element): if isinstance(element, tuple) and len(element) == 2: id_, slice_ = element elif isinstance(element, (int, str)): id_, slice_ = element, None else: raise TypeError("Undefined input type! id or (id, slice) expected") id_ = str(id_) return id_, slice_ class GulpDirectory(object): """ Represents a directory containing *.gulp and *.gmeta files. Parameters ---------- output_dir: str Path to the directory containing the files. jpeg_decoder: callable that takes a JPEG stored as :py:class:`bytes` and returns the desired decoded image format (e.g. np.ndarray) Attributes ---------- all_meta_dicts: list of dicts All meta dicts from all chunks as a list. chunk_lookup: dict: int -> str Mapping element id to chunk index. chunk_objs_lookup: dict: int -> GulpChunk Mapping element id to chunk index. merged_meta_dict: dict: id -> meta dict all meta dicts merged """ def __init__(self, output_dir, jpeg_decoder=jpeg_bytes_to_img): self.output_dir = output_dir self.jpeg_decoder = jpeg_decoder self.chunk_objs_lookup = OrderedDict(zip(self._chunk_ids(), self._chunks())) self.all_meta_dicts = [c.meta_dict for c in self.chunk_objs_lookup.values()] self.num_chunks = len(self.chunk_objs_lookup) self.chunk_lookup = {} for chunk_id, chunk in self.chunk_objs_lookup.items(): for id_ in chunk.meta_dict: self.chunk_lookup[id_] = chunk_id self.merged_meta_dict = {} for d in self.all_meta_dicts: for k in d.keys(): assert k not in self.merged_meta_dict,\ "Duplicate id detected {}".format(k) else: self.merged_meta_dict.update(d) def __iter__(self): return iter(self.chunk_objs_lookup.values()) def chunks(self): """ Return a generator over existing GulpChunk objects which are ready to be opened and read from. """ return self.__iter__() def _chunks(self): return (GulpChunk(*paths, jpeg_decoder=self.jpeg_decoder) for paths in self._existing_file_paths()) def new_chunks(self, total_new_chunks): """ Return a generator over freshly setup GulpChunk objects which are ready to be opened and written to. Parameters ---------- total_new_chunks: int The total number of new chunks to initialize. """ return ((GulpChunk(*paths, jpeg_decoder=self.jpeg_decoder) for paths in self._allocate_new_file_paths(total_new_chunks))) def __getitem__(self, element): id_, _ = extract_input_for_getitem(element) chunk_id = self.chunk_lookup[id_] gulp_chunk = self.chunk_objs_lookup[chunk_id] with gulp_chunk.open(): return gulp_chunk[element] def _find_existing_data_paths(self): return sorted(glob.glob(os.path.join(self.output_dir, 'data*.gulp'))) def _find_existing_meta_paths(self): return sorted(glob.glob(os.path.join(self.output_dir, 'meta*.gmeta'))) def _load_label_dict(self): return json.load(open(os.path.join(self.output_dir, 'label2idx.json'), 'rb')) def _existing_file_paths(self): data_paths = self._find_existing_data_paths() meta_paths = self._find_existing_meta_paths() assert len(data_paths) == len(meta_paths) return zip(data_paths, meta_paths) def _find_ids_from_paths(self, paths): return [int(re.findall(r'\d+', os.path.basename(p))[0]) for p in paths] def _chunk_ids(self): data_paths = self._find_existing_data_paths() meta_paths = self._find_existing_meta_paths() data_ids = self._find_ids_from_paths(data_paths) meta_ids = self._find_ids_from_paths(meta_paths) assert data_ids == meta_ids return data_ids def _next_chunk_id(self): existing_chunk_ids = self._chunk_ids() next_chunk_id = 0 if len(existing_chunk_ids) > 0: next_chunk_id = max([int(i) for i in existing_chunk_ids]) + 1 return next_chunk_id def _allocate_new_file_paths(self, total_new_chunks): next_chunk_id = self._next_chunk_id() return [self._initialize_filenames(i) for i in range(next_chunk_id, next_chunk_id + total_new_chunks)] def _initialize_filenames(self, chunk_id): data_file_path = os.path.join( self.output_dir, 'data_{}.gulp'.format(chunk_id)) meta_file_path = os.path.join( self.output_dir, 'meta_{}.gmeta'.format(chunk_id)) return data_file_path, meta_file_path class GulpChunk(object): """ Represents a gulp chunk on disk. Parameters ---------- data_file_path: str Path to the *.gulp file. meta_file_path: str Path to the *.gmeta file. serializer: subclass of AbstractSerializer The type of serializer to use. jpeg_decoder: callable that takes a JPEG stored as :py:class:`bytes` and returns the desired decoded image format (e.g. np.ndarray) """ def __init__(self, data_file_path, meta_file_path, serializer=json_serializer, jpeg_decoder=jpeg_bytes_to_img): self.jpeg_decoder = jpeg_decoder self.serializer = serializer self.data_file_path = data_file_path self.meta_file_path = meta_file_path self.meta_dict = self._get_or_create_dict() self._img_info = {} self.fp = None def __contains__(self, id_): return str(id_) in self.meta_dict def __getitem__(self, element): id_, slice_ = extract_input_for_getitem(element) return self.read_frames(id_, slice_) def __iter__(self): return self.iter_all() def _get_frame_infos(self, id_): id_ = str(id_) if id_ in self.meta_dict: return (self._get_or_create_img_info(id_), self._copy_meta_data(id_)) def _copy_meta_data(self, id_): return dict(self.meta_dict[id_]['meta_data'][0]) def _get_or_create_img_info(self, id_): if id_ not in self._img_info: self._img_info[id_] = [ImgInfo(*info) for info in self.meta_dict[id_]['frame_info']] return self._img_info[id_] def _get_or_create_dict(self): if os.path.exists(self.meta_file_path): return self.serializer.load(self.meta_file_path) else: return OrderedDict() @staticmethod def _default_factory(): return OrderedDict([('frame_info', []), ('meta_data', [])]) @staticmethod def _pad_image(number): return (4 - (number % 4)) % 4 def _append_meta(self, id_, meta_data): id_ = str(id_) if id_ not in self.meta_dict: # implements an OrderedDefaultDict self.meta_dict[id_] = self._default_factory() self.meta_dict[id_]['meta_data'].append(meta_data) def _write_frame(self, id_, image, jpeg_encode_quality=_DEFAULT_JPEG_QUALITY): loc = self.fp.tell() if isinstance(image, (str, Path)): # If image is a string or pathlib Path, assume that it is a path to a jpeg file # and add it directly without decoding and encoding it. with open(str(image), 'rb') as image_file: img_str = image_file.read() if not is_jpeg(img_str): raise FileFormatException(f'Image file from path {image} does not appear to be a JPEG file.') else: # np.array img_str = img_to_jpeg_bytes(image, jpeg_encode_quality) assert len(img_str) > 0 pad = self._pad_image(len(img_str)) record = img_str.ljust(len(img_str) + pad, b'\0') assert len(record) > 0 img_info = ImgInfo(loc=loc, length=len(record), pad=pad) id_ = str(id_) if id_ not in self.meta_dict: # implements an OrderedDefaultDict self.meta_dict[id_] = self._default_factory() self.meta_dict[id_]['frame_info'].append(img_info) self.fp.write(record) def _write_frames(self, id_, frames, jpeg_encode_quality=_DEFAULT_JPEG_QUALITY): for frame in frames: self._write_frame(id_, frame, jpeg_encode_quality) @contextmanager def open(self, flag='rb'): """Open the gulp chunk for reading. Parameters ---------- flag: str 'rb': Read binary 'wb': Write binary 'ab': Append to binary Notes ----- Works as a context manager but returns None. """ if flag in ['wb', 'rb', 'ab']: self.fp = open(self.data_file_path, flag) else: m = "This file does not support the mode: '{}'".format(flag) raise NotImplementedError(m) yield if flag in ['wb', 'ab']: self.flush() self.fp.close() def flush(self): """Flush all buffers and write the meta file.""" self.fp.flush() self.serializer.dump(self.meta_dict, self.meta_file_path) def append(self, id_, meta_data, frames, jpeg_encode_quality=_DEFAULT_JPEG_QUALITY): """ Append an item to the gulp. Parameters ---------- id_ : str The ID of the item meta_data: dict The meta-data associated with the item. frames: list of numpy arrays The frames of the item as a list of numpy dictionaries consisting of image pixel values. """ self._append_meta(id_, meta_data) self._write_frames(id_, frames, jpeg_encode_quality=jpeg_encode_quality) def read_frames(self, id_, slice_=None): """ Read frames for a single item. Parameters ---------- id_: str The ID of the item slice_: slice or list of ints: A slice or list of indices with which to select frames. Returns ------- frames (int), meta(dict) The frames of the item as a list of numpy arrays consisting of image pixel values. And the metadata. """ frame_infos, meta_data = self._get_frame_infos(id_) slice_element = slice_ if slice_ is not None else slice(0, len(frame_infos)) def extract_frame(frame_info): self.fp.seek(frame_info.loc) record = self.fp.read(frame_info.length) img_str = record[:len(record)-frame_info.pad] img = self.jpeg_decoder(img_str) return img if isinstance(slice_element, (list, np.ndarray)): selected_frame_infos = [frame_infos[idx] for idx in slice_element] else: selected_frame_infos = frame_infos[slice_element] frames = [extract_frame(frame_info) for frame_info in selected_frame_infos] return frames, meta_data # MASKED: iter_all function (lines 393-423) class ChunkWriter(object): """Can write from an adapter to a gulp chunk. Parameters ---------- adapter: subclass of AbstractDatasetAdapter The adapter to get items from. """ def __init__(self, adapter): self.adapter = adapter def write_chunk(self, output_chunk, input_slice): """Write from an input slice in the adapter to an output chunk. Parameters ---------- output_chunk: GulpChunk The chunk to write to input_slice: slice The slice to use from the adapter. """ with output_chunk.open('wb'): for video in self.adapter.iter_data(input_slice): id_ = video['id'] meta_data = video['meta'] frames = video['frames'] if len(frames) > 0: output_chunk.append(id_, meta_data, frames, self.adapter.jpeg_encode_quality()) else: print("Failed to write video with id: {}; no frames" .format(id_)) def calculate_chunk_slices(items_per_chunk, num_items): """Calculate slices for indexing an adapter. Parameters ---------- items_per_chunk: int Approximate number of items per chunk. num_items: int Total number of items. Returns ------- list of slices """ assert items_per_chunk > 0 assert num_items > 0 return [slice(i, min(i + items_per_chunk, num_items)) for i in range(0, num_items, items_per_chunk)] class GulpIngestor(object): """Ingest items from an adapter into an gulp chunks. Parameters ---------- adapter: subclass of AbstractDatasetAdapter The adapter to ingest from. output_folder: str The folder/directory to write to. videos_per_chunk: int The total number of items per chunk. num_workers: int The level of parallelism. """ def __init__(self, adapter, output_folder, videos_per_chunk, num_workers): assert int(num_workers) > 0 self.adapter = adapter self.output_folder = output_folder self.videos_per_chunk = int(videos_per_chunk) self.num_workers = int(num_workers) def __call__(self): os.makedirs(self.output_folder, exist_ok=True) chunk_slices = calculate_chunk_slices(self.videos_per_chunk, len(self.adapter)) gulp_directory = GulpDirectory(self.output_folder) new_chunks = gulp_directory.new_chunks(len(chunk_slices)) chunk_writer = ChunkWriter(self.adapter) with ProcessPoolExecutor(max_workers=self.num_workers) as executor: result = executor.map(chunk_writer.write_chunk, new_chunks, chunk_slices) for r in tqdm(result, desc='Chunks finished', unit='chunk', dynamic_ncols=True, total=len(chunk_slices)): pass
def iter_all(self, accepted_ids=None, shuffle=False): """ Iterate over all frames in the gulp. Parameters ---------- accepted_ids: list of str A filter for accepted ids. shuffle: bool Shuffle the items or not. Returns ------- iterator An iterator that yield a series of frames,meta tuples. See `read_frames` for details. """ ids = self.meta_dict.keys() if accepted_ids is not None: intersection = list(set(ids) & set(accepted_ids)) ids = [id_ for id_ in ids if id_ in intersection] if shuffle: ids = list(ids) np.random.shuffle(ids) with self.open('rb'): for id_ in ids: frames, meta = self.read_frames(id_) yield frames, meta
393
423
#!/usr/bin/env python import os import re import pickle import json import glob import numpy as np from abc import ABC, abstractmethod from concurrent.futures import ProcessPoolExecutor from contextlib import contextmanager from collections import namedtuple, OrderedDict from tqdm import tqdm from .utils import img_to_jpeg_bytes, jpeg_bytes_to_img, _DEFAULT_JPEG_QUALITY from pathlib import Path #from simplejpeg import is_jpeg def is_jpeg(data): """ Check whether a bytes object (or similar) contains JPEG (JFIF) data. Returns False for truncated files. Taken from simplejpeg.is_jpeg, but less strict because it doesn't check EOI, as most JPEG viewers don't really throw error for missing EOI. :param data: JPEG (JFIF) data :return: True if JPEG """ return data[:2] == b'\xFF\xD8' ImgInfo = namedtuple('ImgInfo', ['loc', 'pad', 'length']) class FileFormatException(Exception): pass class AbstractSerializer(ABC): # pragma: no cover @abstractmethod def load(self, file_name): pass @abstractmethod def dump(self, thing, file_name): pass class PickleSerializer(AbstractSerializer): def load(self, file_name): with open(file_name, 'rb') as file_pointer: return pickle.load(file_pointer) def dump(self, thing, file_name): with open(file_name, 'wb') as file_pointer: pickle.dump(thing, file_pointer) class JSONSerializer(AbstractSerializer): def load(self, file_name): with open(file_name, 'r') as file_pointer: return json.load(file_pointer, object_pairs_hook=OrderedDict) def dump(self, thing, file_name): with open(file_name, 'w') as file_pointer: json.dump(thing, file_pointer) pickle_serializer = PickleSerializer() json_serializer = JSONSerializer() def extract_input_for_getitem(element): if isinstance(element, tuple) and len(element) == 2: id_, slice_ = element elif isinstance(element, (int, str)): id_, slice_ = element, None else: raise TypeError("Undefined input type! id or (id, slice) expected") id_ = str(id_) return id_, slice_ class GulpDirectory(object): """ Represents a directory containing *.gulp and *.gmeta files. Parameters ---------- output_dir: str Path to the directory containing the files. jpeg_decoder: callable that takes a JPEG stored as :py:class:`bytes` and returns the desired decoded image format (e.g. np.ndarray) Attributes ---------- all_meta_dicts: list of dicts All meta dicts from all chunks as a list. chunk_lookup: dict: int -> str Mapping element id to chunk index. chunk_objs_lookup: dict: int -> GulpChunk Mapping element id to chunk index. merged_meta_dict: dict: id -> meta dict all meta dicts merged """ def __init__(self, output_dir, jpeg_decoder=jpeg_bytes_to_img): self.output_dir = output_dir self.jpeg_decoder = jpeg_decoder self.chunk_objs_lookup = OrderedDict(zip(self._chunk_ids(), self._chunks())) self.all_meta_dicts = [c.meta_dict for c in self.chunk_objs_lookup.values()] self.num_chunks = len(self.chunk_objs_lookup) self.chunk_lookup = {} for chunk_id, chunk in self.chunk_objs_lookup.items(): for id_ in chunk.meta_dict: self.chunk_lookup[id_] = chunk_id self.merged_meta_dict = {} for d in self.all_meta_dicts: for k in d.keys(): assert k not in self.merged_meta_dict,\ "Duplicate id detected {}".format(k) else: self.merged_meta_dict.update(d) def __iter__(self): return iter(self.chunk_objs_lookup.values()) def chunks(self): """ Return a generator over existing GulpChunk objects which are ready to be opened and read from. """ return self.__iter__() def _chunks(self): return (GulpChunk(*paths, jpeg_decoder=self.jpeg_decoder) for paths in self._existing_file_paths()) def new_chunks(self, total_new_chunks): """ Return a generator over freshly setup GulpChunk objects which are ready to be opened and written to. Parameters ---------- total_new_chunks: int The total number of new chunks to initialize. """ return ((GulpChunk(*paths, jpeg_decoder=self.jpeg_decoder) for paths in self._allocate_new_file_paths(total_new_chunks))) def __getitem__(self, element): id_, _ = extract_input_for_getitem(element) chunk_id = self.chunk_lookup[id_] gulp_chunk = self.chunk_objs_lookup[chunk_id] with gulp_chunk.open(): return gulp_chunk[element] def _find_existing_data_paths(self): return sorted(glob.glob(os.path.join(self.output_dir, 'data*.gulp'))) def _find_existing_meta_paths(self): return sorted(glob.glob(os.path.join(self.output_dir, 'meta*.gmeta'))) def _load_label_dict(self): return json.load(open(os.path.join(self.output_dir, 'label2idx.json'), 'rb')) def _existing_file_paths(self): data_paths = self._find_existing_data_paths() meta_paths = self._find_existing_meta_paths() assert len(data_paths) == len(meta_paths) return zip(data_paths, meta_paths) def _find_ids_from_paths(self, paths): return [int(re.findall(r'\d+', os.path.basename(p))[0]) for p in paths] def _chunk_ids(self): data_paths = self._find_existing_data_paths() meta_paths = self._find_existing_meta_paths() data_ids = self._find_ids_from_paths(data_paths) meta_ids = self._find_ids_from_paths(meta_paths) assert data_ids == meta_ids return data_ids def _next_chunk_id(self): existing_chunk_ids = self._chunk_ids() next_chunk_id = 0 if len(existing_chunk_ids) > 0: next_chunk_id = max([int(i) for i in existing_chunk_ids]) + 1 return next_chunk_id def _allocate_new_file_paths(self, total_new_chunks): next_chunk_id = self._next_chunk_id() return [self._initialize_filenames(i) for i in range(next_chunk_id, next_chunk_id + total_new_chunks)] def _initialize_filenames(self, chunk_id): data_file_path = os.path.join( self.output_dir, 'data_{}.gulp'.format(chunk_id)) meta_file_path = os.path.join( self.output_dir, 'meta_{}.gmeta'.format(chunk_id)) return data_file_path, meta_file_path class GulpChunk(object): """ Represents a gulp chunk on disk. Parameters ---------- data_file_path: str Path to the *.gulp file. meta_file_path: str Path to the *.gmeta file. serializer: subclass of AbstractSerializer The type of serializer to use. jpeg_decoder: callable that takes a JPEG stored as :py:class:`bytes` and returns the desired decoded image format (e.g. np.ndarray) """ def __init__(self, data_file_path, meta_file_path, serializer=json_serializer, jpeg_decoder=jpeg_bytes_to_img): self.jpeg_decoder = jpeg_decoder self.serializer = serializer self.data_file_path = data_file_path self.meta_file_path = meta_file_path self.meta_dict = self._get_or_create_dict() self._img_info = {} self.fp = None def __contains__(self, id_): return str(id_) in self.meta_dict def __getitem__(self, element): id_, slice_ = extract_input_for_getitem(element) return self.read_frames(id_, slice_) def __iter__(self): return self.iter_all() def _get_frame_infos(self, id_): id_ = str(id_) if id_ in self.meta_dict: return (self._get_or_create_img_info(id_), self._copy_meta_data(id_)) def _copy_meta_data(self, id_): return dict(self.meta_dict[id_]['meta_data'][0]) def _get_or_create_img_info(self, id_): if id_ not in self._img_info: self._img_info[id_] = [ImgInfo(*info) for info in self.meta_dict[id_]['frame_info']] return self._img_info[id_] def _get_or_create_dict(self): if os.path.exists(self.meta_file_path): return self.serializer.load(self.meta_file_path) else: return OrderedDict() @staticmethod def _default_factory(): return OrderedDict([('frame_info', []), ('meta_data', [])]) @staticmethod def _pad_image(number): return (4 - (number % 4)) % 4 def _append_meta(self, id_, meta_data): id_ = str(id_) if id_ not in self.meta_dict: # implements an OrderedDefaultDict self.meta_dict[id_] = self._default_factory() self.meta_dict[id_]['meta_data'].append(meta_data) def _write_frame(self, id_, image, jpeg_encode_quality=_DEFAULT_JPEG_QUALITY): loc = self.fp.tell() if isinstance(image, (str, Path)): # If image is a string or pathlib Path, assume that it is a path to a jpeg file # and add it directly without decoding and encoding it. with open(str(image), 'rb') as image_file: img_str = image_file.read() if not is_jpeg(img_str): raise FileFormatException(f'Image file from path {image} does not appear to be a JPEG file.') else: # np.array img_str = img_to_jpeg_bytes(image, jpeg_encode_quality) assert len(img_str) > 0 pad = self._pad_image(len(img_str)) record = img_str.ljust(len(img_str) + pad, b'\0') assert len(record) > 0 img_info = ImgInfo(loc=loc, length=len(record), pad=pad) id_ = str(id_) if id_ not in self.meta_dict: # implements an OrderedDefaultDict self.meta_dict[id_] = self._default_factory() self.meta_dict[id_]['frame_info'].append(img_info) self.fp.write(record) def _write_frames(self, id_, frames, jpeg_encode_quality=_DEFAULT_JPEG_QUALITY): for frame in frames: self._write_frame(id_, frame, jpeg_encode_quality) @contextmanager def open(self, flag='rb'): """Open the gulp chunk for reading. Parameters ---------- flag: str 'rb': Read binary 'wb': Write binary 'ab': Append to binary Notes ----- Works as a context manager but returns None. """ if flag in ['wb', 'rb', 'ab']: self.fp = open(self.data_file_path, flag) else: m = "This file does not support the mode: '{}'".format(flag) raise NotImplementedError(m) yield if flag in ['wb', 'ab']: self.flush() self.fp.close() def flush(self): """Flush all buffers and write the meta file.""" self.fp.flush() self.serializer.dump(self.meta_dict, self.meta_file_path) def append(self, id_, meta_data, frames, jpeg_encode_quality=_DEFAULT_JPEG_QUALITY): """ Append an item to the gulp. Parameters ---------- id_ : str The ID of the item meta_data: dict The meta-data associated with the item. frames: list of numpy arrays The frames of the item as a list of numpy dictionaries consisting of image pixel values. """ self._append_meta(id_, meta_data) self._write_frames(id_, frames, jpeg_encode_quality=jpeg_encode_quality) def read_frames(self, id_, slice_=None): """ Read frames for a single item. Parameters ---------- id_: str The ID of the item slice_: slice or list of ints: A slice or list of indices with which to select frames. Returns ------- frames (int), meta(dict) The frames of the item as a list of numpy arrays consisting of image pixel values. And the metadata. """ frame_infos, meta_data = self._get_frame_infos(id_) slice_element = slice_ if slice_ is not None else slice(0, len(frame_infos)) def extract_frame(frame_info): self.fp.seek(frame_info.loc) record = self.fp.read(frame_info.length) img_str = record[:len(record)-frame_info.pad] img = self.jpeg_decoder(img_str) return img if isinstance(slice_element, (list, np.ndarray)): selected_frame_infos = [frame_infos[idx] for idx in slice_element] else: selected_frame_infos = frame_infos[slice_element] frames = [extract_frame(frame_info) for frame_info in selected_frame_infos] return frames, meta_data def iter_all(self, accepted_ids=None, shuffle=False): """ Iterate over all frames in the gulp. Parameters ---------- accepted_ids: list of str A filter for accepted ids. shuffle: bool Shuffle the items or not. Returns ------- iterator An iterator that yield a series of frames,meta tuples. See `read_frames` for details. """ ids = self.meta_dict.keys() if accepted_ids is not None: intersection = list(set(ids) & set(accepted_ids)) ids = [id_ for id_ in ids if id_ in intersection] if shuffle: ids = list(ids) np.random.shuffle(ids) with self.open('rb'): for id_ in ids: frames, meta = self.read_frames(id_) yield frames, meta class ChunkWriter(object): """Can write from an adapter to a gulp chunk. Parameters ---------- adapter: subclass of AbstractDatasetAdapter The adapter to get items from. """ def __init__(self, adapter): self.adapter = adapter def write_chunk(self, output_chunk, input_slice): """Write from an input slice in the adapter to an output chunk. Parameters ---------- output_chunk: GulpChunk The chunk to write to input_slice: slice The slice to use from the adapter. """ with output_chunk.open('wb'): for video in self.adapter.iter_data(input_slice): id_ = video['id'] meta_data = video['meta'] frames = video['frames'] if len(frames) > 0: output_chunk.append(id_, meta_data, frames, self.adapter.jpeg_encode_quality()) else: print("Failed to write video with id: {}; no frames" .format(id_)) def calculate_chunk_slices(items_per_chunk, num_items): """Calculate slices for indexing an adapter. Parameters ---------- items_per_chunk: int Approximate number of items per chunk. num_items: int Total number of items. Returns ------- list of slices """ assert items_per_chunk > 0 assert num_items > 0 return [slice(i, min(i + items_per_chunk, num_items)) for i in range(0, num_items, items_per_chunk)] class GulpIngestor(object): """Ingest items from an adapter into an gulp chunks. Parameters ---------- adapter: subclass of AbstractDatasetAdapter The adapter to ingest from. output_folder: str The folder/directory to write to. videos_per_chunk: int The total number of items per chunk. num_workers: int The level of parallelism. """ def __init__(self, adapter, output_folder, videos_per_chunk, num_workers): assert int(num_workers) > 0 self.adapter = adapter self.output_folder = output_folder self.videos_per_chunk = int(videos_per_chunk) self.num_workers = int(num_workers) def __call__(self): os.makedirs(self.output_folder, exist_ok=True) chunk_slices = calculate_chunk_slices(self.videos_per_chunk, len(self.adapter)) gulp_directory = GulpDirectory(self.output_folder) new_chunks = gulp_directory.new_chunks(len(chunk_slices)) chunk_writer = ChunkWriter(self.adapter) with ProcessPoolExecutor(max_workers=self.num_workers) as executor: result = executor.map(chunk_writer.write_chunk, new_chunks, chunk_slices) for r in tqdm(result, desc='Chunks finished', unit='chunk', dynamic_ncols=True, total=len(chunk_slices)): pass
__init__
Constructs a NsynthConfig. Args: gansynth_subset: bool, whether to use the subset of the dataset introduced in the ICLR 2019 GANSynth paper (Engel, et al. 2018). This subset uses acoustic-only instrument sources and limits the pitches to the interval [24, 84]. The train and test splits are also modified so that instruments (but not specific notes) overlap between them. See https://arxiv.org/abs/1902.08710 for more details. estimate_f0_and_loudness: bool, whether to estimate fundamental frequency (F0) and loudness for the audio (at 250 Hz) and add them to the set of features. **kwargs: keyword arguments forwarded to super.
# coding=utf-8 # Copyright 2019 The TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """NSynth Dataset.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import csv import os import numpy as np import tensorflow as tf import tensorflow_datasets.public_api as tfds _DESCRIPTION = """\ The NSynth Dataset is an audio dataset containing ~300k musical notes, each with a unique pitch, timbre, and envelope. Each note is annotated with three additional pieces of information based on a combination of human evaluation and heuristic algorithms: Source, Family, and Qualities. """ _FULL_DESCRIPTION = """\ Full NSynth Dataset is split into train, valid, and test sets, with no instruments overlapping between the train set and the valid/test sets. """ _GANSYNTH_DESCRIPTION = """\ NSynth Dataset limited to acoustic instruments in the MIDI pitch interval [24, 84]. Uses alternate splits that have overlap in instruments (but not exact notes) between the train set and valid/test sets. This variant was originally introduced in the ICLR 2019 GANSynth paper (https://arxiv.org/abs/1902.08710). """ _F0_AND_LOUDNESS_ADDENDUM = """\ This version additionally contains estimates for F0 using CREPE (Kim et al., 2018) and A-weighted perceptual loudness. Both signals are provided at a frame rate of 250Hz. """ # From http://proceedings.mlr.press/v70/engel17a.html _CITATION = """\ @InProceedings{pmlr-v70-engel17a, title = {Neural Audio Synthesis of Musical Notes with {W}ave{N}et Autoencoders}, author = {Jesse Engel and Cinjon Resnick and Adam Roberts and Sander Dieleman and Mohammad Norouzi and Douglas Eck and Karen Simonyan}, booktitle = {Proceedings of the 34th International Conference on Machine Learning}, pages = {1068--1077}, year = {2017}, editor = {Doina Precup and Yee Whye Teh}, volume = {70}, series = {Proceedings of Machine Learning Research}, address = {International Convention Centre, Sydney, Australia}, month = {06--11 Aug}, publisher = {PMLR}, pdf = {http://proceedings.mlr.press/v70/engel17a/engel17a.pdf}, url = {http://proceedings.mlr.press/v70/engel17a.html}, } """ _NUM_SECS = 4 _AUDIO_RATE = 16000 # 16 kHz _F0_AND_LOUDNESS_RATE = 250 # 250 Hz _INSTRUMENT_FAMILIES = [ "bass", "brass", "flute", "guitar", "keyboard", "mallet", "organ", "reed", "string", "synth_lead", "vocal"] _INSTRUMENT_SOURCES = ["acoustic", "electronic", "synthetic"] _QUALITIES = [ "bright", "dark", "distortion", "fast_decay", "long_release", "multiphonic", "nonlinear_env", "percussive", "reverb", "tempo-synced"] _BASE_DOWNLOAD_PATH = "http://download.magenta.tensorflow.org/datasets/nsynth/nsynth-" _SPLITS = ["train", "valid", "test"] _SPLIT_SHARDS = { "train": 512, "valid": 32, "test": 8, } class NsynthConfig(tfds.core.BuilderConfig): """BuilderConfig for NSynth Dataset.""" # MASKED: __init__ function (lines 107-138) class Nsynth(tfds.core.BeamBasedBuilder): """A large-scale and high-quality dataset of annotated musical notes.""" BUILDER_CONFIGS = [ NsynthConfig(description=_FULL_DESCRIPTION), NsynthConfig( gansynth_subset=True, description=_GANSYNTH_DESCRIPTION), NsynthConfig( gansynth_subset=True, estimate_f0_and_loudness=True, description=_GANSYNTH_DESCRIPTION + _F0_AND_LOUDNESS_ADDENDUM), ] def _info(self): features = { "id": tf.string, "audio": tfds.features.Tensor( shape=(_AUDIO_RATE * _NUM_SECS,), dtype=tf.float32), "pitch": tfds.features.ClassLabel(num_classes=128), "velocity": tfds.features.ClassLabel(num_classes=128), "instrument": { # We read the list of labels in _split_generators. "label": tfds.features.ClassLabel(num_classes=1006), "family": tfds.features.ClassLabel(names=_INSTRUMENT_FAMILIES), "source": tfds.features.ClassLabel(names=_INSTRUMENT_SOURCES), }, "qualities": {quality: tf.bool for quality in _QUALITIES}, } if self.builder_config.estimate_f0_and_loudness: f0_and_ld_shape = (_F0_AND_LOUDNESS_RATE * _NUM_SECS + 1,) features["f0"] = { "hz": tfds.features.Tensor(shape=f0_and_ld_shape, dtype=tf.float32), "midi": tfds.features.Tensor(shape=f0_and_ld_shape, dtype=tf.float32), "confidence": tfds.features.Tensor(shape=f0_and_ld_shape, dtype=tf.float32) } features["loudness"] = { "db": tfds.features.Tensor(shape=f0_and_ld_shape, dtype=tf.float32) } return tfds.core.DatasetInfo( builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict(features), homepage="https://g.co/magenta/nsynth-dataset", citation=_CITATION, metadata=tfds.core.BeamMetadataDict(), ) def _split_generators(self, dl_manager): """Returns splits.""" dl_urls = {} dl_urls["examples"] = { split: _BASE_DOWNLOAD_PATH + "%s.tfrecord.tar" % split for split in _SPLITS } dl_urls["instrument_labels"] = ( _BASE_DOWNLOAD_PATH + "instrument_labels.txt") if self.builder_config.gansynth_subset: dl_urls["gansynth_splits"] = ( _BASE_DOWNLOAD_PATH + "gansynth_splits.csv") dl_paths = dl_manager.download_and_extract(dl_urls) with tf.io.gfile.GFile(dl_paths["instrument_labels"]) as f: instrument_labels = f.read().strip().splitlines() self.info.features["instrument"]["label"].names = instrument_labels split_ids = {s: set() for s in _SPLITS} split_dirs = {s: [dl_paths["examples"][s]] for s in _SPLITS} if self.builder_config.gansynth_subset: # Generator needs to see all original splits for each new split. split_dirs = {s: dl_paths["examples"].values() for s in _SPLITS} with tf.io.gfile.GFile(dl_paths["gansynth_splits"]) as f: reader = csv.DictReader(f) for row in reader: split_ids[row["split"]].add(row["id"]) return [ tfds.core.SplitGenerator( # pylint: disable=g-complex-comprehension name=split, num_shards=_SPLIT_SHARDS[split], gen_kwargs={ "tfrecord_dirs": split_dirs[split], "ids": split_ids[split], "split": split, }) for split in _SPLITS ] def _build_pcollection(self, pipeline, tfrecord_dirs, ids, split): """Build PCollection of examples for split.""" beam = tfds.core.lazy_imports.apache_beam def _emit_base_example(ex): """Maps an input example to a TFDS example.""" beam.metrics.Metrics.counter(split, "base-examples").inc() features = ex.features.feature return { "id": features["note_str"].bytes_list.value[0], "audio": np.array(features["audio"].float_list.value, dtype=np.float32), "pitch": features["pitch"].int64_list.value[0], "velocity": features["velocity"].int64_list.value[0], "instrument": { "label": tf.compat.as_text( features["instrument_str"].bytes_list.value[0]), "family": tf.compat.as_text( features["instrument_family_str"].bytes_list.value[0]), "source": tf.compat.as_text( features["instrument_source_str"].bytes_list.value[0]) }, "qualities": { q: features["qualities"].int64_list.value[i] for (i, q) in enumerate(_QUALITIES) } } def _in_split(ex, split_ids): if not split_ids or tf.compat.as_text(ex["id"]) in split_ids: beam.metrics.Metrics.counter(split, "in-split").inc() return True return False def _estimate_f0(ex): """Estimate the fundamental frequency using CREPE and add to example.""" ex = ex.copy() beam.metrics.Metrics.counter(split, "estimate-f0").inc() _, f0_hz, f0_confidence, _ = tfds.core.lazy_imports.crepe.predict( ex["audio"], sr=_AUDIO_RATE, viterbi=True, step_size=1000 / _F0_AND_LOUDNESS_RATE, verbose=0) f0_midi = tfds.core.lazy_imports.librosa.core.hz_to_midi(f0_hz) # Set -infs introduced by hz_to_midi to 0. f0_midi[f0_midi == -np.inf] = 0 # Set nans to 0 in confidence. f0_confidence = np.nan_to_num(f0_confidence) ex["f0"] = { "hz": f0_hz.astype(np.float32), "midi": f0_midi.astype(np.float32), "confidence": f0_confidence.astype(np.float32), } return ex def _compute_loudness(ex): """Compute loudness and add to example.""" ex = ex.copy() beam.metrics.Metrics.counter(split, "compute-loudness").inc() librosa = tfds.core.lazy_imports.librosa n_fft = 2048 amin = 1e-15 top_db = 200.0 stft = librosa.stft( ex["audio"], n_fft=n_fft, hop_length=int(_AUDIO_RATE // _F0_AND_LOUDNESS_RATE)) loudness_db = librosa.perceptual_weighting( np.abs(stft)**2, librosa.fft_frequencies(_AUDIO_RATE, n_fft=n_fft), amin=amin, top_db=top_db) # Average across freq in linear scale. mean_loudness_amp = np.mean(librosa.db_to_amplitude(loudness_db), axis=0) mean_loudness_db = librosa.amplitude_to_db( mean_loudness_amp, amin=amin, top_db=top_db) ex["loudness"] = {"db": mean_loudness_db.astype(np.float32)} return ex examples = ( pipeline | beam.Create([os.path.join(dir_, "*") for dir_ in tfrecord_dirs]) | beam.io.tfrecordio.ReadAllFromTFRecord( coder=beam.coders.ProtoCoder(tf.train.Example)) | beam.Map(_emit_base_example) | beam.Filter(_in_split, split_ids=ids)) if self.builder_config.estimate_f0_and_loudness: examples = ( examples | beam.Reshuffle() | beam.Map(_estimate_f0) | beam.Map(_compute_loudness)) if split == tfds.Split.TRAIN: # Output mean and variance of loudness for TRAIN split. loudness = examples | beam.Map(lambda x: np.mean(x["loudness"]["db"])) loudness_mean = ( loudness | "loudness_mean" >> beam.combiners.Mean.Globally()) loudness_variance = ( loudness | beam.Map(lambda ld, ld_mean: (ld - ld_mean)**2, ld_mean=beam.pvalue.AsSingleton(loudness_mean)) | "loudness_variance" >> beam.combiners.Mean.Globally()) self.info.metadata["loudness_db_mean"] = loudness_mean self.info.metadata["loudness_db_variance"] = loudness_variance return examples
def __init__(self, gansynth_subset=False, estimate_f0_and_loudness=False, **kwargs): """Constructs a NsynthConfig. Args: gansynth_subset: bool, whether to use the subset of the dataset introduced in the ICLR 2019 GANSynth paper (Engel, et al. 2018). This subset uses acoustic-only instrument sources and limits the pitches to the interval [24, 84]. The train and test splits are also modified so that instruments (but not specific notes) overlap between them. See https://arxiv.org/abs/1902.08710 for more details. estimate_f0_and_loudness: bool, whether to estimate fundamental frequency (F0) and loudness for the audio (at 250 Hz) and add them to the set of features. **kwargs: keyword arguments forwarded to super. """ name_parts = [] if gansynth_subset: name_parts.append("gansynth_subset") else: name_parts.append("full") if estimate_f0_and_loudness: name_parts.append("f0_and_loudness") super(NsynthConfig, self).__init__( name=".".join(name_parts), version=tfds.core.Version( "1.1.0", experiments={tfds.core.Experiment.S3: False}), **kwargs) self.gansynth_subset = gansynth_subset self.estimate_f0_and_loudness = estimate_f0_and_loudness
107
138
# coding=utf-8 # Copyright 2019 The TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """NSynth Dataset.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import csv import os import numpy as np import tensorflow as tf import tensorflow_datasets.public_api as tfds _DESCRIPTION = """\ The NSynth Dataset is an audio dataset containing ~300k musical notes, each with a unique pitch, timbre, and envelope. Each note is annotated with three additional pieces of information based on a combination of human evaluation and heuristic algorithms: Source, Family, and Qualities. """ _FULL_DESCRIPTION = """\ Full NSynth Dataset is split into train, valid, and test sets, with no instruments overlapping between the train set and the valid/test sets. """ _GANSYNTH_DESCRIPTION = """\ NSynth Dataset limited to acoustic instruments in the MIDI pitch interval [24, 84]. Uses alternate splits that have overlap in instruments (but not exact notes) between the train set and valid/test sets. This variant was originally introduced in the ICLR 2019 GANSynth paper (https://arxiv.org/abs/1902.08710). """ _F0_AND_LOUDNESS_ADDENDUM = """\ This version additionally contains estimates for F0 using CREPE (Kim et al., 2018) and A-weighted perceptual loudness. Both signals are provided at a frame rate of 250Hz. """ # From http://proceedings.mlr.press/v70/engel17a.html _CITATION = """\ @InProceedings{pmlr-v70-engel17a, title = {Neural Audio Synthesis of Musical Notes with {W}ave{N}et Autoencoders}, author = {Jesse Engel and Cinjon Resnick and Adam Roberts and Sander Dieleman and Mohammad Norouzi and Douglas Eck and Karen Simonyan}, booktitle = {Proceedings of the 34th International Conference on Machine Learning}, pages = {1068--1077}, year = {2017}, editor = {Doina Precup and Yee Whye Teh}, volume = {70}, series = {Proceedings of Machine Learning Research}, address = {International Convention Centre, Sydney, Australia}, month = {06--11 Aug}, publisher = {PMLR}, pdf = {http://proceedings.mlr.press/v70/engel17a/engel17a.pdf}, url = {http://proceedings.mlr.press/v70/engel17a.html}, } """ _NUM_SECS = 4 _AUDIO_RATE = 16000 # 16 kHz _F0_AND_LOUDNESS_RATE = 250 # 250 Hz _INSTRUMENT_FAMILIES = [ "bass", "brass", "flute", "guitar", "keyboard", "mallet", "organ", "reed", "string", "synth_lead", "vocal"] _INSTRUMENT_SOURCES = ["acoustic", "electronic", "synthetic"] _QUALITIES = [ "bright", "dark", "distortion", "fast_decay", "long_release", "multiphonic", "nonlinear_env", "percussive", "reverb", "tempo-synced"] _BASE_DOWNLOAD_PATH = "http://download.magenta.tensorflow.org/datasets/nsynth/nsynth-" _SPLITS = ["train", "valid", "test"] _SPLIT_SHARDS = { "train": 512, "valid": 32, "test": 8, } class NsynthConfig(tfds.core.BuilderConfig): """BuilderConfig for NSynth Dataset.""" def __init__(self, gansynth_subset=False, estimate_f0_and_loudness=False, **kwargs): """Constructs a NsynthConfig. Args: gansynth_subset: bool, whether to use the subset of the dataset introduced in the ICLR 2019 GANSynth paper (Engel, et al. 2018). This subset uses acoustic-only instrument sources and limits the pitches to the interval [24, 84]. The train and test splits are also modified so that instruments (but not specific notes) overlap between them. See https://arxiv.org/abs/1902.08710 for more details. estimate_f0_and_loudness: bool, whether to estimate fundamental frequency (F0) and loudness for the audio (at 250 Hz) and add them to the set of features. **kwargs: keyword arguments forwarded to super. """ name_parts = [] if gansynth_subset: name_parts.append("gansynth_subset") else: name_parts.append("full") if estimate_f0_and_loudness: name_parts.append("f0_and_loudness") super(NsynthConfig, self).__init__( name=".".join(name_parts), version=tfds.core.Version( "1.1.0", experiments={tfds.core.Experiment.S3: False}), **kwargs) self.gansynth_subset = gansynth_subset self.estimate_f0_and_loudness = estimate_f0_and_loudness class Nsynth(tfds.core.BeamBasedBuilder): """A large-scale and high-quality dataset of annotated musical notes.""" BUILDER_CONFIGS = [ NsynthConfig(description=_FULL_DESCRIPTION), NsynthConfig( gansynth_subset=True, description=_GANSYNTH_DESCRIPTION), NsynthConfig( gansynth_subset=True, estimate_f0_and_loudness=True, description=_GANSYNTH_DESCRIPTION + _F0_AND_LOUDNESS_ADDENDUM), ] def _info(self): features = { "id": tf.string, "audio": tfds.features.Tensor( shape=(_AUDIO_RATE * _NUM_SECS,), dtype=tf.float32), "pitch": tfds.features.ClassLabel(num_classes=128), "velocity": tfds.features.ClassLabel(num_classes=128), "instrument": { # We read the list of labels in _split_generators. "label": tfds.features.ClassLabel(num_classes=1006), "family": tfds.features.ClassLabel(names=_INSTRUMENT_FAMILIES), "source": tfds.features.ClassLabel(names=_INSTRUMENT_SOURCES), }, "qualities": {quality: tf.bool for quality in _QUALITIES}, } if self.builder_config.estimate_f0_and_loudness: f0_and_ld_shape = (_F0_AND_LOUDNESS_RATE * _NUM_SECS + 1,) features["f0"] = { "hz": tfds.features.Tensor(shape=f0_and_ld_shape, dtype=tf.float32), "midi": tfds.features.Tensor(shape=f0_and_ld_shape, dtype=tf.float32), "confidence": tfds.features.Tensor(shape=f0_and_ld_shape, dtype=tf.float32) } features["loudness"] = { "db": tfds.features.Tensor(shape=f0_and_ld_shape, dtype=tf.float32) } return tfds.core.DatasetInfo( builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict(features), homepage="https://g.co/magenta/nsynth-dataset", citation=_CITATION, metadata=tfds.core.BeamMetadataDict(), ) def _split_generators(self, dl_manager): """Returns splits.""" dl_urls = {} dl_urls["examples"] = { split: _BASE_DOWNLOAD_PATH + "%s.tfrecord.tar" % split for split in _SPLITS } dl_urls["instrument_labels"] = ( _BASE_DOWNLOAD_PATH + "instrument_labels.txt") if self.builder_config.gansynth_subset: dl_urls["gansynth_splits"] = ( _BASE_DOWNLOAD_PATH + "gansynth_splits.csv") dl_paths = dl_manager.download_and_extract(dl_urls) with tf.io.gfile.GFile(dl_paths["instrument_labels"]) as f: instrument_labels = f.read().strip().splitlines() self.info.features["instrument"]["label"].names = instrument_labels split_ids = {s: set() for s in _SPLITS} split_dirs = {s: [dl_paths["examples"][s]] for s in _SPLITS} if self.builder_config.gansynth_subset: # Generator needs to see all original splits for each new split. split_dirs = {s: dl_paths["examples"].values() for s in _SPLITS} with tf.io.gfile.GFile(dl_paths["gansynth_splits"]) as f: reader = csv.DictReader(f) for row in reader: split_ids[row["split"]].add(row["id"]) return [ tfds.core.SplitGenerator( # pylint: disable=g-complex-comprehension name=split, num_shards=_SPLIT_SHARDS[split], gen_kwargs={ "tfrecord_dirs": split_dirs[split], "ids": split_ids[split], "split": split, }) for split in _SPLITS ] def _build_pcollection(self, pipeline, tfrecord_dirs, ids, split): """Build PCollection of examples for split.""" beam = tfds.core.lazy_imports.apache_beam def _emit_base_example(ex): """Maps an input example to a TFDS example.""" beam.metrics.Metrics.counter(split, "base-examples").inc() features = ex.features.feature return { "id": features["note_str"].bytes_list.value[0], "audio": np.array(features["audio"].float_list.value, dtype=np.float32), "pitch": features["pitch"].int64_list.value[0], "velocity": features["velocity"].int64_list.value[0], "instrument": { "label": tf.compat.as_text( features["instrument_str"].bytes_list.value[0]), "family": tf.compat.as_text( features["instrument_family_str"].bytes_list.value[0]), "source": tf.compat.as_text( features["instrument_source_str"].bytes_list.value[0]) }, "qualities": { q: features["qualities"].int64_list.value[i] for (i, q) in enumerate(_QUALITIES) } } def _in_split(ex, split_ids): if not split_ids or tf.compat.as_text(ex["id"]) in split_ids: beam.metrics.Metrics.counter(split, "in-split").inc() return True return False def _estimate_f0(ex): """Estimate the fundamental frequency using CREPE and add to example.""" ex = ex.copy() beam.metrics.Metrics.counter(split, "estimate-f0").inc() _, f0_hz, f0_confidence, _ = tfds.core.lazy_imports.crepe.predict( ex["audio"], sr=_AUDIO_RATE, viterbi=True, step_size=1000 / _F0_AND_LOUDNESS_RATE, verbose=0) f0_midi = tfds.core.lazy_imports.librosa.core.hz_to_midi(f0_hz) # Set -infs introduced by hz_to_midi to 0. f0_midi[f0_midi == -np.inf] = 0 # Set nans to 0 in confidence. f0_confidence = np.nan_to_num(f0_confidence) ex["f0"] = { "hz": f0_hz.astype(np.float32), "midi": f0_midi.astype(np.float32), "confidence": f0_confidence.astype(np.float32), } return ex def _compute_loudness(ex): """Compute loudness and add to example.""" ex = ex.copy() beam.metrics.Metrics.counter(split, "compute-loudness").inc() librosa = tfds.core.lazy_imports.librosa n_fft = 2048 amin = 1e-15 top_db = 200.0 stft = librosa.stft( ex["audio"], n_fft=n_fft, hop_length=int(_AUDIO_RATE // _F0_AND_LOUDNESS_RATE)) loudness_db = librosa.perceptual_weighting( np.abs(stft)**2, librosa.fft_frequencies(_AUDIO_RATE, n_fft=n_fft), amin=amin, top_db=top_db) # Average across freq in linear scale. mean_loudness_amp = np.mean(librosa.db_to_amplitude(loudness_db), axis=0) mean_loudness_db = librosa.amplitude_to_db( mean_loudness_amp, amin=amin, top_db=top_db) ex["loudness"] = {"db": mean_loudness_db.astype(np.float32)} return ex examples = ( pipeline | beam.Create([os.path.join(dir_, "*") for dir_ in tfrecord_dirs]) | beam.io.tfrecordio.ReadAllFromTFRecord( coder=beam.coders.ProtoCoder(tf.train.Example)) | beam.Map(_emit_base_example) | beam.Filter(_in_split, split_ids=ids)) if self.builder_config.estimate_f0_and_loudness: examples = ( examples | beam.Reshuffle() | beam.Map(_estimate_f0) | beam.Map(_compute_loudness)) if split == tfds.Split.TRAIN: # Output mean and variance of loudness for TRAIN split. loudness = examples | beam.Map(lambda x: np.mean(x["loudness"]["db"])) loudness_mean = ( loudness | "loudness_mean" >> beam.combiners.Mean.Globally()) loudness_variance = ( loudness | beam.Map(lambda ld, ld_mean: (ld - ld_mean)**2, ld_mean=beam.pvalue.AsSingleton(loudness_mean)) | "loudness_variance" >> beam.combiners.Mean.Globally()) self.info.metadata["loudness_db_mean"] = loudness_mean self.info.metadata["loudness_db_variance"] = loudness_variance return examples
validate_linux_host_name
Validates a string as a legal host name component. This validation will also occur server-side in the ARM API, but that may take a minute or two before the user sees it. So it's more user-friendly to validate in the CLI pre-flight.
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- import os import os.path import re from math import ceil from ipaddress import ip_network from knack.log import get_logger from azure.cli.core.util import CLIError import azure.cli.core.keys as keys logger = get_logger(__name__) def validate_ssh_key(namespace): if hasattr(namespace, 'no_ssh_key') and namespace.no_ssh_key: return string_or_file = (namespace.ssh_key_value or os.path.join(os.path.expanduser('~'), '.ssh', 'id_rsa.pub')) content = string_or_file if os.path.exists(string_or_file): logger.info('Use existing SSH public key file: %s', string_or_file) with open(string_or_file, 'r') as f: content = f.read() elif not keys.is_valid_ssh_rsa_public_key(content): if namespace.generate_ssh_keys: # figure out appropriate file names: # 'base_name'(with private keys), and 'base_name.pub'(with public keys) public_key_filepath = string_or_file if public_key_filepath[-4:].lower() == '.pub': private_key_filepath = public_key_filepath[:-4] else: private_key_filepath = public_key_filepath + '.private' content = keys.generate_ssh_keys(private_key_filepath, public_key_filepath) logger.warning("SSH key files '%s' and '%s' have been generated under ~/.ssh to " "allow SSH access to the VM. If using machines without " "permanent storage like Azure Cloud Shell without an attached " "file share, back up your keys to a safe location", private_key_filepath, public_key_filepath) else: raise CLIError('An RSA key file or key value must be supplied to SSH Key Value. ' 'You can use --generate-ssh-keys to let CLI generate one for you') namespace.ssh_key_value = content def validate_create_parameters(namespace): if not namespace.name: raise CLIError('--name has no value') if namespace.dns_name_prefix is not None and not namespace.dns_name_prefix: raise CLIError('--dns-prefix has no value') def validate_k8s_version(namespace): """Validates a string as a possible Kubernetes version. An empty string is also valid, which tells the server to use its default version.""" if namespace.kubernetes_version: k8s_release_regex = re.compile(r'^[v|V]?(\d+\.\d+\.\d+.*)$') found = k8s_release_regex.findall(namespace.kubernetes_version) if found: namespace.kubernetes_version = found[0] else: raise CLIError('--kubernetes-version should be the full version number, ' 'such as "1.7.12" or "1.8.7"') # MASKED: validate_linux_host_name function (lines 71-83) def validate_max_pods(namespace): """Validates that max_pods is set to a reasonable minimum number.""" # kube-proxy and kube-svc reside each nodes, # 2 kube-proxy pods, 1 azureproxy/heapster/dashboard/tunnelfront are in kube-system minimum_pods_required = ceil((namespace.node_count * 2 + 6 + 1) / namespace.node_count) if namespace.max_pods != 0 and namespace.max_pods < minimum_pods_required: raise CLIError('--max-pods must be at least {} for a managed Kubernetes cluster to function.' .format(minimum_pods_required)) def validate_nodes_count(namespace): """Validate that min_count and max_count is set to 1-100""" if namespace.min_count is not None: if namespace.min_count < 1 or namespace.min_count > 100: raise CLIError('--min-count must be in the range [1,100]') if namespace.max_count is not None: if namespace.max_count < 1 or namespace.max_count > 100: raise CLIError('--max-count must be in the range [1,100]') def validate_ip_ranges(namespace): if namespace.api_server_authorized_ip_ranges is not None: if namespace.api_server_authorized_ip_ranges == '': return for ip in namespace.api_server_authorized_ip_ranges.split(','): try: ip_network(ip) except ValueError: raise CLIError("--api-server-authorized-ip-ranges should be list of IPv4 addresses or CIDRs") def validate_nodepool_name(namespace): """Validates a nodepool name to be at most 12 characters, alphanumeric only.""" if namespace.nodepool_name != "": if len(namespace.nodepool_name) > 12: raise CLIError('--nodepool-name can contain atmost 12 characters') if not namespace.nodepool_name.isalnum(): raise CLIError('--nodepool-name should only contain alphanumeric characters') def validate_vm_set_type(namespace): """Validates the vm set type string.""" if namespace.vm_set_type is not None: if namespace.vm_set_type == '': return if namespace.vm_set_type.lower() != "availabilityset" and \ namespace.vm_set_type.lower() != "virtualmachinescalesets": raise CLIError("--vm-set-type can only be VirtualMachineScaleSets or AvailabilitySet") def validate_load_balancer_sku(namespace): """Validates the load balancer sku string.""" if namespace.load_balancer_sku is not None: if namespace.load_balancer_sku == '': return if namespace.load_balancer_sku.lower() != "basic" and namespace.load_balancer_sku.lower() != "standard": raise CLIError("--load-balancer-sku can only be standard or basic") def validate_load_balancer_outbound_ips(namespace): """validate load balancer profile outbound IP ids""" if namespace.load_balancer_outbound_ips is not None: ip_id_list = [x.strip() for x in namespace.load_balancer_outbound_ips.split(',')] if not all(ip_id_list): raise CLIError("--load-balancer-outbound-ips cannot contain whitespace") def validate_load_balancer_outbound_ip_prefixes(namespace): """validate load balancer profile outbound IP prefix ids""" if namespace.load_balancer_outbound_ip_prefixes is not None: ip_prefix_id_list = [x.strip() for x in namespace.load_balancer_outbound_ip_prefixes.split(',')] if not all(ip_prefix_id_list): raise CLIError("--load-balancer-outbound-ip-prefixes cannot contain whitespace") def validate_taints(namespace): """Validates that provided taint is a valid format""" regex = re.compile(r"^[a-zA-Z\d][\w\-\.\/]{0,252}=[a-zA-Z\d][\w\-\.]{0,62}:(NoSchedule|PreferNoSchedule|NoExecute)$") # pylint: disable=line-too-long if namespace.node_taints is not None and namespace.node_taints != '': for taint in namespace.node_taints.split(','): if taint == "": continue found = regex.findall(taint) if not found: raise CLIError('Invalid node taint: %s' % taint) def validate_priority(namespace): """Validates the node pool priority string.""" if namespace.priority is not None: if namespace.priority == '': return if namespace.priority != "Low" and \ namespace.priority != "Regular": raise CLIError("--priority can only be Low or Regular") def validate_eviction_policy(namespace): """Validates the node pool priority string.""" if namespace.eviction_policy is not None: if namespace.eviction_policy == '': return if namespace.eviction_policy != "Delete" and \ namespace.eviction_policy != "Deallocate": raise CLIError("--eviction-policy can only be Delete or Deallocate")
def validate_linux_host_name(namespace): """Validates a string as a legal host name component. This validation will also occur server-side in the ARM API, but that may take a minute or two before the user sees it. So it's more user-friendly to validate in the CLI pre-flight. """ # https://stackoverflow.com/questions/106179/regular-expression-to-match-dns-hostname-or-ip-address rfc1123_regex = re.compile(r'^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$') # pylint:disable=line-too-long found = rfc1123_regex.findall(namespace.name) if not found: raise CLIError('--name cannot exceed 63 characters and can only contain ' 'letters, numbers, or dashes (-).')
71
83
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- import os import os.path import re from math import ceil from ipaddress import ip_network from knack.log import get_logger from azure.cli.core.util import CLIError import azure.cli.core.keys as keys logger = get_logger(__name__) def validate_ssh_key(namespace): if hasattr(namespace, 'no_ssh_key') and namespace.no_ssh_key: return string_or_file = (namespace.ssh_key_value or os.path.join(os.path.expanduser('~'), '.ssh', 'id_rsa.pub')) content = string_or_file if os.path.exists(string_or_file): logger.info('Use existing SSH public key file: %s', string_or_file) with open(string_or_file, 'r') as f: content = f.read() elif not keys.is_valid_ssh_rsa_public_key(content): if namespace.generate_ssh_keys: # figure out appropriate file names: # 'base_name'(with private keys), and 'base_name.pub'(with public keys) public_key_filepath = string_or_file if public_key_filepath[-4:].lower() == '.pub': private_key_filepath = public_key_filepath[:-4] else: private_key_filepath = public_key_filepath + '.private' content = keys.generate_ssh_keys(private_key_filepath, public_key_filepath) logger.warning("SSH key files '%s' and '%s' have been generated under ~/.ssh to " "allow SSH access to the VM. If using machines without " "permanent storage like Azure Cloud Shell without an attached " "file share, back up your keys to a safe location", private_key_filepath, public_key_filepath) else: raise CLIError('An RSA key file or key value must be supplied to SSH Key Value. ' 'You can use --generate-ssh-keys to let CLI generate one for you') namespace.ssh_key_value = content def validate_create_parameters(namespace): if not namespace.name: raise CLIError('--name has no value') if namespace.dns_name_prefix is not None and not namespace.dns_name_prefix: raise CLIError('--dns-prefix has no value') def validate_k8s_version(namespace): """Validates a string as a possible Kubernetes version. An empty string is also valid, which tells the server to use its default version.""" if namespace.kubernetes_version: k8s_release_regex = re.compile(r'^[v|V]?(\d+\.\d+\.\d+.*)$') found = k8s_release_regex.findall(namespace.kubernetes_version) if found: namespace.kubernetes_version = found[0] else: raise CLIError('--kubernetes-version should be the full version number, ' 'such as "1.7.12" or "1.8.7"') def validate_linux_host_name(namespace): """Validates a string as a legal host name component. This validation will also occur server-side in the ARM API, but that may take a minute or two before the user sees it. So it's more user-friendly to validate in the CLI pre-flight. """ # https://stackoverflow.com/questions/106179/regular-expression-to-match-dns-hostname-or-ip-address rfc1123_regex = re.compile(r'^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$') # pylint:disable=line-too-long found = rfc1123_regex.findall(namespace.name) if not found: raise CLIError('--name cannot exceed 63 characters and can only contain ' 'letters, numbers, or dashes (-).') def validate_max_pods(namespace): """Validates that max_pods is set to a reasonable minimum number.""" # kube-proxy and kube-svc reside each nodes, # 2 kube-proxy pods, 1 azureproxy/heapster/dashboard/tunnelfront are in kube-system minimum_pods_required = ceil((namespace.node_count * 2 + 6 + 1) / namespace.node_count) if namespace.max_pods != 0 and namespace.max_pods < minimum_pods_required: raise CLIError('--max-pods must be at least {} for a managed Kubernetes cluster to function.' .format(minimum_pods_required)) def validate_nodes_count(namespace): """Validate that min_count and max_count is set to 1-100""" if namespace.min_count is not None: if namespace.min_count < 1 or namespace.min_count > 100: raise CLIError('--min-count must be in the range [1,100]') if namespace.max_count is not None: if namespace.max_count < 1 or namespace.max_count > 100: raise CLIError('--max-count must be in the range [1,100]') def validate_ip_ranges(namespace): if namespace.api_server_authorized_ip_ranges is not None: if namespace.api_server_authorized_ip_ranges == '': return for ip in namespace.api_server_authorized_ip_ranges.split(','): try: ip_network(ip) except ValueError: raise CLIError("--api-server-authorized-ip-ranges should be list of IPv4 addresses or CIDRs") def validate_nodepool_name(namespace): """Validates a nodepool name to be at most 12 characters, alphanumeric only.""" if namespace.nodepool_name != "": if len(namespace.nodepool_name) > 12: raise CLIError('--nodepool-name can contain atmost 12 characters') if not namespace.nodepool_name.isalnum(): raise CLIError('--nodepool-name should only contain alphanumeric characters') def validate_vm_set_type(namespace): """Validates the vm set type string.""" if namespace.vm_set_type is not None: if namespace.vm_set_type == '': return if namespace.vm_set_type.lower() != "availabilityset" and \ namespace.vm_set_type.lower() != "virtualmachinescalesets": raise CLIError("--vm-set-type can only be VirtualMachineScaleSets or AvailabilitySet") def validate_load_balancer_sku(namespace): """Validates the load balancer sku string.""" if namespace.load_balancer_sku is not None: if namespace.load_balancer_sku == '': return if namespace.load_balancer_sku.lower() != "basic" and namespace.load_balancer_sku.lower() != "standard": raise CLIError("--load-balancer-sku can only be standard or basic") def validate_load_balancer_outbound_ips(namespace): """validate load balancer profile outbound IP ids""" if namespace.load_balancer_outbound_ips is not None: ip_id_list = [x.strip() for x in namespace.load_balancer_outbound_ips.split(',')] if not all(ip_id_list): raise CLIError("--load-balancer-outbound-ips cannot contain whitespace") def validate_load_balancer_outbound_ip_prefixes(namespace): """validate load balancer profile outbound IP prefix ids""" if namespace.load_balancer_outbound_ip_prefixes is not None: ip_prefix_id_list = [x.strip() for x in namespace.load_balancer_outbound_ip_prefixes.split(',')] if not all(ip_prefix_id_list): raise CLIError("--load-balancer-outbound-ip-prefixes cannot contain whitespace") def validate_taints(namespace): """Validates that provided taint is a valid format""" regex = re.compile(r"^[a-zA-Z\d][\w\-\.\/]{0,252}=[a-zA-Z\d][\w\-\.]{0,62}:(NoSchedule|PreferNoSchedule|NoExecute)$") # pylint: disable=line-too-long if namespace.node_taints is not None and namespace.node_taints != '': for taint in namespace.node_taints.split(','): if taint == "": continue found = regex.findall(taint) if not found: raise CLIError('Invalid node taint: %s' % taint) def validate_priority(namespace): """Validates the node pool priority string.""" if namespace.priority is not None: if namespace.priority == '': return if namespace.priority != "Low" and \ namespace.priority != "Regular": raise CLIError("--priority can only be Low or Regular") def validate_eviction_policy(namespace): """Validates the node pool priority string.""" if namespace.eviction_policy is not None: if namespace.eviction_policy == '': return if namespace.eviction_policy != "Delete" and \ namespace.eviction_policy != "Deallocate": raise CLIError("--eviction-policy can only be Delete or Deallocate")
__init__
Provides a proxy protocol policy, which allows an ELB to carry a client connection information to a backend. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[list] instance_ports: List of instance ports to which the policy should be applied. This can be specified if the protocol is SSL or TCP. :param pulumi.Input[str] load_balancer: The load balancer to which the policy should be attached.
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import json import warnings import pulumi import pulumi.runtime from .. import utilities, tables class ProxyProtocolPolicy(pulumi.CustomResource): instance_ports: pulumi.Output[list] """ List of instance ports to which the policy should be applied. This can be specified if the protocol is SSL or TCP. """ load_balancer: pulumi.Output[str] """ The load balancer to which the policy should be attached. """ # MASKED: __init__ function (lines 22-60) def translate_output_property(self, prop): return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop def translate_input_property(self, prop): return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
def __init__(__self__, resource_name, opts=None, instance_ports=None, load_balancer=None, __name__=None, __opts__=None): """ Provides a proxy protocol policy, which allows an ELB to carry a client connection information to a backend. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[list] instance_ports: List of instance ports to which the policy should be applied. This can be specified if the protocol is SSL or TCP. :param pulumi.Input[str] load_balancer: The load balancer to which the policy should be attached. """ if __name__ is not None: warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning) resource_name = __name__ if __opts__ is not None: warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if not resource_name: raise TypeError('Missing resource name argument (for URN creation)') if not isinstance(resource_name, str): raise TypeError('Expected resource name to be a string') if opts and not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') __props__ = dict() if instance_ports is None: raise TypeError('Missing required property instance_ports') __props__['instance_ports'] = instance_ports if load_balancer is None: raise TypeError('Missing required property load_balancer') __props__['load_balancer'] = load_balancer super(ProxyProtocolPolicy, __self__).__init__( 'aws:ec2/proxyProtocolPolicy:ProxyProtocolPolicy', resource_name, __props__, opts)
22
60
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import json import warnings import pulumi import pulumi.runtime from .. import utilities, tables class ProxyProtocolPolicy(pulumi.CustomResource): instance_ports: pulumi.Output[list] """ List of instance ports to which the policy should be applied. This can be specified if the protocol is SSL or TCP. """ load_balancer: pulumi.Output[str] """ The load balancer to which the policy should be attached. """ def __init__(__self__, resource_name, opts=None, instance_ports=None, load_balancer=None, __name__=None, __opts__=None): """ Provides a proxy protocol policy, which allows an ELB to carry a client connection information to a backend. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[list] instance_ports: List of instance ports to which the policy should be applied. This can be specified if the protocol is SSL or TCP. :param pulumi.Input[str] load_balancer: The load balancer to which the policy should be attached. """ if __name__ is not None: warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning) resource_name = __name__ if __opts__ is not None: warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if not resource_name: raise TypeError('Missing resource name argument (for URN creation)') if not isinstance(resource_name, str): raise TypeError('Expected resource name to be a string') if opts and not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') __props__ = dict() if instance_ports is None: raise TypeError('Missing required property instance_ports') __props__['instance_ports'] = instance_ports if load_balancer is None: raise TypeError('Missing required property load_balancer') __props__['load_balancer'] = load_balancer super(ProxyProtocolPolicy, __self__).__init__( 'aws:ec2/proxyProtocolPolicy:ProxyProtocolPolicy', resource_name, __props__, opts) def translate_output_property(self, prop): return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop def translate_input_property(self, prop): return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
transform_audio
Add background noise audio. Note that this is an in-place transformation. :param audio_segment: Audio segment to add effects to. :type audio_segment: AudioSegmenet|SpeechSegment
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains the noise perturb augmentation model.""" from deepspeech.frontend.audio import AudioSegment from deepspeech.frontend.augmentor.base import AugmentorBase from deepspeech.frontend.utility import read_manifest class NoisePerturbAugmentor(AugmentorBase): """Augmentation model for adding background noise. :param rng: Random generator object. :type rng: random.Random :param min_snr_dB: Minimal signal noise ratio, in decibels. :type min_snr_dB: float :param max_snr_dB: Maximal signal noise ratio, in decibels. :type max_snr_dB: float :param noise_manifest_path: Manifest path for noise audio data. :type noise_manifest_path: str """ def __init__(self, rng, min_snr_dB, max_snr_dB, noise_manifest_path): self._min_snr_dB = min_snr_dB self._max_snr_dB = max_snr_dB self._rng = rng self._noise_manifest = read_manifest(manifest_path=noise_manifest_path) def __call__(self, x, uttid=None, train=True): if not train: return x self.transform_audio(x) return x # MASKED: transform_audio function (lines 45-64)
def transform_audio(self, audio_segment): """Add background noise audio. Note that this is an in-place transformation. :param audio_segment: Audio segment to add effects to. :type audio_segment: AudioSegmenet|SpeechSegment """ noise_json = self._rng.choice(self._noise_manifest, 1, replace=False)[0] if noise_json['duration'] < audio_segment.duration: raise RuntimeError("The duration of sampled noise audio is smaller " "than the audio segment to add effects to.") diff_duration = noise_json['duration'] - audio_segment.duration start = self._rng.uniform(0, diff_duration) end = start + audio_segment.duration noise_segment = AudioSegment.slice_from_file( noise_json['audio_filepath'], start=start, end=end) snr_dB = self._rng.uniform(self._min_snr_dB, self._max_snr_dB) audio_segment.add_noise( noise_segment, snr_dB, allow_downsampling=True, rng=self._rng)
45
64
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains the noise perturb augmentation model.""" from deepspeech.frontend.audio import AudioSegment from deepspeech.frontend.augmentor.base import AugmentorBase from deepspeech.frontend.utility import read_manifest class NoisePerturbAugmentor(AugmentorBase): """Augmentation model for adding background noise. :param rng: Random generator object. :type rng: random.Random :param min_snr_dB: Minimal signal noise ratio, in decibels. :type min_snr_dB: float :param max_snr_dB: Maximal signal noise ratio, in decibels. :type max_snr_dB: float :param noise_manifest_path: Manifest path for noise audio data. :type noise_manifest_path: str """ def __init__(self, rng, min_snr_dB, max_snr_dB, noise_manifest_path): self._min_snr_dB = min_snr_dB self._max_snr_dB = max_snr_dB self._rng = rng self._noise_manifest = read_manifest(manifest_path=noise_manifest_path) def __call__(self, x, uttid=None, train=True): if not train: return x self.transform_audio(x) return x def transform_audio(self, audio_segment): """Add background noise audio. Note that this is an in-place transformation. :param audio_segment: Audio segment to add effects to. :type audio_segment: AudioSegmenet|SpeechSegment """ noise_json = self._rng.choice(self._noise_manifest, 1, replace=False)[0] if noise_json['duration'] < audio_segment.duration: raise RuntimeError("The duration of sampled noise audio is smaller " "than the audio segment to add effects to.") diff_duration = noise_json['duration'] - audio_segment.duration start = self._rng.uniform(0, diff_duration) end = start + audio_segment.duration noise_segment = AudioSegment.slice_from_file( noise_json['audio_filepath'], start=start, end=end) snr_dB = self._rng.uniform(self._min_snr_dB, self._max_snr_dB) audio_segment.add_noise( noise_segment, snr_dB, allow_downsampling=True, rng=self._rng)
symrcm
Returns the permutation array that orders a sparse CSR or CSC matrix or Qobj in Reverse-Cuthill McKee ordering. Since the input matrix must be symmetric, this routine works on the matrix A+Trans(A) if the sym flag is set to False (Default). It is assumed by default (*sym=False*) that the input matrix is not symmetric. This is because it is faster to do A+Trans(A) than it is to check for symmetry for a generic matrix. If you are guaranteed that the matrix is symmetric in structure (values of matrix element do not matter) then set *sym=True* Parameters ---------- A : csr_matrix, qobj Input sparse csr_matrix or Qobj. sym : bool {False, True} Flag to set whether input matrix is symmetric. Returns ------- perm : array Array of permuted row and column indices. Notes ----- This routine is used primarily for internal reordering of Lindblad super-operators for use in iterative solver routines. References ---------- E. Cuthill and J. McKee, "Reducing the Bandwidth of Sparse Symmetric Matrices", ACM '69 Proceedings of the 1969 24th national conference, (1969).
# This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### """ This module contains a collection of graph theory routines used mainly to reorder matrices for iterative steady state solvers. """ import numpy as np import scipy.sparse as sp from qutip.cy.graph_utils import ( _pseudo_peripheral_node, _breadth_first_search, _node_degrees, _rcm, _bfs_matching, _weighted_bfs_matching) from qutip.settings import debug from warnings import warn if debug: import inspect def graph_degree(A): """ Returns the degree for the nodes (rows) of a symmetric graph in sparse CSR or CSC format, or a qobj. Parameters ---------- A : qobj, csr_matrix, csc_matrix Input quantum object or csr_matrix. Returns ------- degree : array Array of integers giving the degree for each node (row). """ if A.__class__.__name__=='Qobj': return _node_degrees(A.data.indices, A.data.indptr, A.shape[0]) else: return _node_degrees(A.indices, A.indptr, A.shape[0]) def breadth_first_search(A,start): """ Breadth-First-Search (BFS) of a graph in CSR or CSC matrix format starting from a given node (row). Takes Qobjs and CSR or CSC matrices as inputs. This function requires a matrix with symmetric structure. Use A+trans(A) if original matrix is not symmetric or not sure. Parameters ---------- A : qobj, csr_matrix Input graph in CSR matrix form start : int Staring node for BFS traversal. Returns ------- order : array Order in which nodes are traversed from starting node. levels : array Level of the nodes in the order that they are traversed. """ if A.__class__.__name__=='Qobj': A=A.data num_rows=A.shape[0] start=int(start) order, levels = _breadth_first_search(A.indices,A.indptr, num_rows, start) #since maybe not all nodes are in search, check for unused entires in arrays return order[order!=-1], levels[levels!=-1] # MASKED: symrcm function (lines 104-149) def bfs_matching(A): """ Returns an array of row permutations that removes nonzero elements from the diagonal of a nonsingular square CSC sparse matrix. Such a permutation is always possible provided that the matrix is nonsingular. This function looks at the structure of the matrix only. Parameters ---------- A : csc_matrix Input matrix Returns ------- perm : array Array of row permutations. Notes ----- This function relies on a maximum cardinality bipartite matching algorithm based on a breadth-first search (BFS) of the underlying graph[1]_. References ---------- .. [1] I. S. Duff, K. Kaya, and B. Ucar, "Design, Implementation, and Analysis of Maximum Transversal Algorithms", ACM Trans. Math. Softw. 38, no. 2, (2011). """ nrows = A.shape[0] if A.shape[0]!=A.shape[1]: raise ValueError('bfs_matching requires a square matrix.') if A.__class__.__name__=='Qobj': A = A.data.tocsc() elif not sp.isspmatrix_csc(A): A = sp.csc_matrix(A) warn('bfs_matching requires CSC matrix format.', sp.SparseEfficiencyWarning) perm = _bfs_matching(A.indices, A.indptr, nrows) if np.any(perm==-1): raise Exception('Possibly singular input matrix.') return perm def weighted_bfs_matching(A): """ Returns an array of row permutations that attempts to maximize the product of the ABS values of the diagonal elements in a nonsingular square CSC sparse matrix. Such a permutation is always possible provided that the matrix is nonsingular. This function looks at both the structure and ABS values of the underlying matrix. Parameters ---------- A : csc_matrix Input matrix Returns ------- perm : array Array of row permutations. Notes ----- This function uses a weighted maximum cardinality bipartite matching algorithm based on breadth-first search (BFS). The columns are weighted according to the element of max ABS value in the associated rows and are traversed in descending order by weight. When performing the BFS traversal, the row associated to a given column is the one with maximum weight. Unlike other techniques[1]_, this algorithm does not guarantee the product of the diagonal is maximized. However, this limitation is offset by the substantially faster runtime of this method. References ---------- .. [1] I. S. Duff and J. Koster, "The design and use of algorithms for permuting large entries to the diagonal of sparse matrices", SIAM J. Matrix Anal. and Applics. 20, no. 4, 889 (1997). """ nrows = A.shape[0] if A.shape[0]!=A.shape[1]: raise ValueError('weighted_bfs_matching requires a square matrix.') if A.__class__.__name__=='Qobj': A = A.data.tocsc() elif not sp.isspmatrix_csc(A): A = sp.csc_matrix(A) warn('weighted_bfs_matching requires CSC matrix format', sp.SparseEfficiencyWarning) perm = _weighted_bfs_matching( np.asarray(np.abs(A.data), dtype=float), A.indices, A.indptr, nrows) if np.any(perm==-1): raise Exception('Possibly singular input matrix.') return perm
def symrcm(A, sym=False): """ Returns the permutation array that orders a sparse CSR or CSC matrix or Qobj in Reverse-Cuthill McKee ordering. Since the input matrix must be symmetric, this routine works on the matrix A+Trans(A) if the sym flag is set to False (Default). It is assumed by default (*sym=False*) that the input matrix is not symmetric. This is because it is faster to do A+Trans(A) than it is to check for symmetry for a generic matrix. If you are guaranteed that the matrix is symmetric in structure (values of matrix element do not matter) then set *sym=True* Parameters ---------- A : csr_matrix, qobj Input sparse csr_matrix or Qobj. sym : bool {False, True} Flag to set whether input matrix is symmetric. Returns ------- perm : array Array of permuted row and column indices. Notes ----- This routine is used primarily for internal reordering of Lindblad super-operators for use in iterative solver routines. References ---------- E. Cuthill and J. McKee, "Reducing the Bandwidth of Sparse Symmetric Matrices", ACM '69 Proceedings of the 1969 24th national conference, (1969). """ nrows = A.shape[0] if A.__class__.__name__=='Qobj': if not sym: A = A.data+A.data.transpose() return _rcm(A.indices, A.indptr, nrows) else: return _rcm(A.data.indices, A.data.indptr, nrows) else: if not sym: A=A+A.transpose() return _rcm(A.indices, A.indptr, nrows)
104
149
# This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### """ This module contains a collection of graph theory routines used mainly to reorder matrices for iterative steady state solvers. """ import numpy as np import scipy.sparse as sp from qutip.cy.graph_utils import ( _pseudo_peripheral_node, _breadth_first_search, _node_degrees, _rcm, _bfs_matching, _weighted_bfs_matching) from qutip.settings import debug from warnings import warn if debug: import inspect def graph_degree(A): """ Returns the degree for the nodes (rows) of a symmetric graph in sparse CSR or CSC format, or a qobj. Parameters ---------- A : qobj, csr_matrix, csc_matrix Input quantum object or csr_matrix. Returns ------- degree : array Array of integers giving the degree for each node (row). """ if A.__class__.__name__=='Qobj': return _node_degrees(A.data.indices, A.data.indptr, A.shape[0]) else: return _node_degrees(A.indices, A.indptr, A.shape[0]) def breadth_first_search(A,start): """ Breadth-First-Search (BFS) of a graph in CSR or CSC matrix format starting from a given node (row). Takes Qobjs and CSR or CSC matrices as inputs. This function requires a matrix with symmetric structure. Use A+trans(A) if original matrix is not symmetric or not sure. Parameters ---------- A : qobj, csr_matrix Input graph in CSR matrix form start : int Staring node for BFS traversal. Returns ------- order : array Order in which nodes are traversed from starting node. levels : array Level of the nodes in the order that they are traversed. """ if A.__class__.__name__=='Qobj': A=A.data num_rows=A.shape[0] start=int(start) order, levels = _breadth_first_search(A.indices,A.indptr, num_rows, start) #since maybe not all nodes are in search, check for unused entires in arrays return order[order!=-1], levels[levels!=-1] def symrcm(A, sym=False): """ Returns the permutation array that orders a sparse CSR or CSC matrix or Qobj in Reverse-Cuthill McKee ordering. Since the input matrix must be symmetric, this routine works on the matrix A+Trans(A) if the sym flag is set to False (Default). It is assumed by default (*sym=False*) that the input matrix is not symmetric. This is because it is faster to do A+Trans(A) than it is to check for symmetry for a generic matrix. If you are guaranteed that the matrix is symmetric in structure (values of matrix element do not matter) then set *sym=True* Parameters ---------- A : csr_matrix, qobj Input sparse csr_matrix or Qobj. sym : bool {False, True} Flag to set whether input matrix is symmetric. Returns ------- perm : array Array of permuted row and column indices. Notes ----- This routine is used primarily for internal reordering of Lindblad super-operators for use in iterative solver routines. References ---------- E. Cuthill and J. McKee, "Reducing the Bandwidth of Sparse Symmetric Matrices", ACM '69 Proceedings of the 1969 24th national conference, (1969). """ nrows = A.shape[0] if A.__class__.__name__=='Qobj': if not sym: A = A.data+A.data.transpose() return _rcm(A.indices, A.indptr, nrows) else: return _rcm(A.data.indices, A.data.indptr, nrows) else: if not sym: A=A+A.transpose() return _rcm(A.indices, A.indptr, nrows) def bfs_matching(A): """ Returns an array of row permutations that removes nonzero elements from the diagonal of a nonsingular square CSC sparse matrix. Such a permutation is always possible provided that the matrix is nonsingular. This function looks at the structure of the matrix only. Parameters ---------- A : csc_matrix Input matrix Returns ------- perm : array Array of row permutations. Notes ----- This function relies on a maximum cardinality bipartite matching algorithm based on a breadth-first search (BFS) of the underlying graph[1]_. References ---------- .. [1] I. S. Duff, K. Kaya, and B. Ucar, "Design, Implementation, and Analysis of Maximum Transversal Algorithms", ACM Trans. Math. Softw. 38, no. 2, (2011). """ nrows = A.shape[0] if A.shape[0]!=A.shape[1]: raise ValueError('bfs_matching requires a square matrix.') if A.__class__.__name__=='Qobj': A = A.data.tocsc() elif not sp.isspmatrix_csc(A): A = sp.csc_matrix(A) warn('bfs_matching requires CSC matrix format.', sp.SparseEfficiencyWarning) perm = _bfs_matching(A.indices, A.indptr, nrows) if np.any(perm==-1): raise Exception('Possibly singular input matrix.') return perm def weighted_bfs_matching(A): """ Returns an array of row permutations that attempts to maximize the product of the ABS values of the diagonal elements in a nonsingular square CSC sparse matrix. Such a permutation is always possible provided that the matrix is nonsingular. This function looks at both the structure and ABS values of the underlying matrix. Parameters ---------- A : csc_matrix Input matrix Returns ------- perm : array Array of row permutations. Notes ----- This function uses a weighted maximum cardinality bipartite matching algorithm based on breadth-first search (BFS). The columns are weighted according to the element of max ABS value in the associated rows and are traversed in descending order by weight. When performing the BFS traversal, the row associated to a given column is the one with maximum weight. Unlike other techniques[1]_, this algorithm does not guarantee the product of the diagonal is maximized. However, this limitation is offset by the substantially faster runtime of this method. References ---------- .. [1] I. S. Duff and J. Koster, "The design and use of algorithms for permuting large entries to the diagonal of sparse matrices", SIAM J. Matrix Anal. and Applics. 20, no. 4, 889 (1997). """ nrows = A.shape[0] if A.shape[0]!=A.shape[1]: raise ValueError('weighted_bfs_matching requires a square matrix.') if A.__class__.__name__=='Qobj': A = A.data.tocsc() elif not sp.isspmatrix_csc(A): A = sp.csc_matrix(A) warn('weighted_bfs_matching requires CSC matrix format', sp.SparseEfficiencyWarning) perm = _weighted_bfs_matching( np.asarray(np.abs(A.data), dtype=float), A.indices, A.indptr, nrows) if np.any(perm==-1): raise Exception('Possibly singular input matrix.') return perm
inner
Inner is called when a generated method (publicGetX) is called. _self is a reference to self created by function.__get__(exchange, type(exchange)) https://en.wikipedia.org/wiki/Closure_(computer_programming) equivalent to functools.partial
# -*- coding: utf-8 -*- """Base exchange class""" # ----------------------------------------------------------------------------- __version__ = '1.18.575' # ----------------------------------------------------------------------------- from ccxt.base.errors import ExchangeError from ccxt.base.errors import NetworkError from ccxt.base.errors import NotSupported from ccxt.base.errors import AuthenticationError from ccxt.base.errors import DDoSProtection from ccxt.base.errors import RequestTimeout from ccxt.base.errors import ExchangeNotAvailable from ccxt.base.errors import InvalidAddress # ----------------------------------------------------------------------------- from ccxt.base.decimal_to_precision import decimal_to_precision from ccxt.base.decimal_to_precision import DECIMAL_PLACES, TRUNCATE, ROUND from ccxt.base.decimal_to_precision import number_to_string # ----------------------------------------------------------------------------- __all__ = [ 'Exchange', ] # ----------------------------------------------------------------------------- # Python 2 & 3 import types import logging import base64 import calendar import collections import datetime from email.utils import parsedate import functools import gzip import hashlib import hmac import io import json import math from numbers import Number import re from requests import Session from requests.utils import default_user_agent from requests.exceptions import HTTPError, Timeout, TooManyRedirects, RequestException # import socket from ssl import SSLError # import sys import time import uuid import zlib from decimal import Decimal # ----------------------------------------------------------------------------- try: basestring # basestring was removed in Python 3 except NameError: basestring = str try: long # long integer was removed in Python 3 except NameError: long = int # ----------------------------------------------------------------------------- try: import urllib.parse as _urlencode # Python 3 except ImportError: import urllib as _urlencode # Python 2 # ----------------------------------------------------------------------------- # web3/0x imports try: # from web3.auto import w3 from web3 import Web3, HTTPProvider from web3.utils.encoding import hex_encode_abi_type except ImportError: Web3 = HTTPProvider = None # web3/0x not supported in Python 2 # ----------------------------------------------------------------------------- class Exchange(object): """Base exchange class""" id = None version = None certified = False # rate limiter settings enableRateLimit = False rateLimit = 2000 # milliseconds = seconds * 1000 timeout = 10000 # milliseconds = seconds * 1000 asyncio_loop = None aiohttp_proxy = None aiohttp_trust_env = False session = None # Session () by default logger = None # logging.getLogger(__name__) by default userAgent = None userAgents = { 'chrome': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36', 'chrome39': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36', } verbose = False markets = None symbols = None fees = { 'trading': { 'percentage': True, # subclasses should rarely have to redefine this }, 'funding': { 'withdraw': {}, 'deposit': {}, }, } loaded_fees = { 'trading': { 'percentage': True, }, 'funding': { 'withdraw': {}, 'deposit': {}, }, } ids = None tickers = None api = None parseJsonResponse = True proxy = '' origin = '*' # CORS origin proxies = None hostname = None # in case of inaccessibility of the "main" domain apiKey = '' secret = '' password = '' uid = '' privateKey = '' # a "0x"-prefixed hexstring private key for a wallet walletAddress = '' # the wallet address "0x"-prefixed hexstring token = '' # reserved for HTTP auth in some cases twofa = None marketsById = None markets_by_id = None currencies_by_id = None precision = None exceptions = None limits = { 'amount': { 'min': None, 'max': None, }, 'price': { 'min': None, 'max': None, }, 'cost': { 'min': None, 'max': None, }, } httpExceptions = { '422': ExchangeError, '418': DDoSProtection, '429': DDoSProtection, '404': ExchangeNotAvailable, '409': ExchangeNotAvailable, '500': ExchangeNotAvailable, '501': ExchangeNotAvailable, '502': ExchangeNotAvailable, '520': ExchangeNotAvailable, '521': ExchangeNotAvailable, '522': ExchangeNotAvailable, '525': ExchangeNotAvailable, '526': ExchangeNotAvailable, '400': ExchangeNotAvailable, '403': ExchangeNotAvailable, '405': ExchangeNotAvailable, '503': ExchangeNotAvailable, '530': ExchangeNotAvailable, '408': RequestTimeout, '504': RequestTimeout, '401': AuthenticationError, '511': AuthenticationError, } headers = None balance = None orderbooks = None orders = None trades = None transactions = None currencies = None options = None # Python does not allow to define properties in run-time with setattr accounts = None requiredCredentials = { 'apiKey': True, 'secret': True, 'uid': False, 'login': False, 'password': False, 'twofa': False, # 2-factor authentication (one-time password key) 'privateKey': False, # a "0x"-prefixed hexstring private key for a wallet 'walletAddress': False, # the wallet address "0x"-prefixed hexstring 'token': False, # reserved for HTTP auth in some cases } # API method metainfo has = { 'cancelAllOrders': False, 'cancelOrder': True, 'cancelOrders': False, 'CORS': False, 'createDepositAddress': False, 'createLimitOrder': True, 'createMarketOrder': True, 'createOrder': True, 'deposit': False, 'editOrder': 'emulated', 'fetchBalance': True, 'fetchClosedOrders': False, 'fetchCurrencies': False, 'fetchDepositAddress': False, 'fetchDeposits': False, 'fetchFundingFees': False, 'fetchL2OrderBook': True, 'fetchLedger': False, 'fetchMarkets': True, 'fetchMyTrades': False, 'fetchOHLCV': 'emulated', 'fetchOpenOrders': False, 'fetchOrder': False, 'fetchOrderBook': True, 'fetchOrderBooks': False, 'fetchOrders': False, 'fetchTicker': True, 'fetchTickers': False, 'fetchTrades': True, 'fetchTradingFee': False, 'fetchTradingFees': False, 'fetchFundingFee': False, 'fetchFundingFees': False, 'fetchTradingLimits': False, 'fetchTransactions': False, 'fetchWithdrawals': False, 'privateAPI': True, 'publicAPI': True, 'withdraw': False, } precisionMode = DECIMAL_PLACES minFundingAddressLength = 1 # used in check_address substituteCommonCurrencyCodes = True lastRestRequestTimestamp = 0 lastRestPollTimestamp = 0 restRequestQueue = None restPollerLoopIsRunning = False rateLimitTokens = 16 rateLimitMaxTokens = 16 rateLimitUpdateTime = 0 enableLastHttpResponse = True enableLastJsonResponse = True enableLastResponseHeaders = True last_http_response = None last_json_response = None last_response_headers = None requiresWeb3 = False web3 = None commonCurrencies = { 'XBT': 'BTC', 'BCC': 'BCH', 'DRK': 'DASH', 'BCHABC': 'BCH', 'BCHSV': 'BSV', } def __init__(self, config={}): self.precision = dict() if self.precision is None else self.precision self.limits = dict() if self.limits is None else self.limits self.exceptions = dict() if self.exceptions is None else self.exceptions self.headers = dict() if self.headers is None else self.headers self.balance = dict() if self.balance is None else self.balance self.orderbooks = dict() if self.orderbooks is None else self.orderbooks self.orders = dict() if self.orders is None else self.orders self.trades = dict() if self.trades is None else self.trades self.transactions = dict() if self.transactions is None else self.transactions self.currencies = dict() if self.currencies is None else self.currencies self.options = dict() if self.options is None else self.options # Python does not allow to define properties in run-time with setattr self.decimal_to_precision = decimal_to_precision self.number_to_string = number_to_string # version = '.'.join(map(str, sys.version_info[:3])) # self.userAgent = { # 'User-Agent': 'ccxt/' + __version__ + ' (+https://github.com/ccxt/ccxt) Python/' + version # } self.userAgent = default_user_agent() settings = self.deep_extend(self.describe(), config) for key in settings: if hasattr(self, key) and isinstance(getattr(self, key), dict): setattr(self, key, self.deep_extend(getattr(self, key), settings[key])) else: setattr(self, key, settings[key]) if self.api: self.define_rest_api(self.api, 'request') if self.markets: self.set_markets(self.markets) # convert all properties from underscore notation foo_bar to camelcase notation fooBar cls = type(self) for name in dir(self): if name[0] != '_' and name[-1] != '_' and '_' in name: parts = name.split('_') camelcase = parts[0] + ''.join(self.capitalize(i) for i in parts[1:]) attr = getattr(self, name) if isinstance(attr, types.MethodType): setattr(cls, camelcase, getattr(cls, name)) else: setattr(self, camelcase, attr) self.tokenBucket = self.extend({ 'refillRate': 1.0 / self.rateLimit, 'delay': 0.001, 'capacity': 1.0, 'defaultCost': 1.0, }, getattr(self, 'tokenBucket') if hasattr(self, 'tokenBucket') else {}) self.session = self.session if self.session else Session() self.logger = self.logger if self.logger else logging.getLogger(__name__) if self.requiresWeb3 and Web3 and not self.web3: # self.web3 = w3 if w3 else Web3(HTTPProvider()) self.web3 = Web3(HTTPProvider()) def __del__(self): if self.session: self.session.close() def describe(self): return {} def set_sandbox_mode(self, enabled): if enabled: if 'test' in self.urls: self.urls['api_backup'] = self.urls['api'] self.urls['api'] = self.urls['test'] else: raise NotSupported(self.id + ' does not have a sandbox URL') elif 'api_backup' in self.urls: self.urls['api'] = self.urls['api_backup'] del self.urls['api_backup'] @classmethod def define_rest_api(cls, api, method_name, options={}): delimiters = re.compile('[^a-zA-Z0-9]') entry = getattr(cls, method_name) # returns a function (instead of a bound method) for api_type, methods in api.items(): for http_method, urls in methods.items(): for url in urls: url = url.strip() split_path = delimiters.split(url) uppercase_method = http_method.upper() lowercase_method = http_method.lower() camelcase_method = lowercase_method.capitalize() camelcase_suffix = ''.join([Exchange.capitalize(x) for x in split_path]) lowercase_path = [x.strip().lower() for x in split_path] underscore_suffix = '_'.join([k for k in lowercase_path if len(k)]) camelcase = api_type + camelcase_method + Exchange.capitalize(camelcase_suffix) underscore = api_type + '_' + lowercase_method + '_' + underscore_suffix.lower() if 'suffixes' in options: if 'camelcase' in options['suffixes']: camelcase += options['suffixes']['camelcase'] if 'underscore' in options['suffixes']: underscore += options['suffixes']['underscore'] def partialer(): outer_kwargs = {'path': url, 'api': api_type, 'method': uppercase_method} # MASKED: inner function (lines 396-406) return inner to_bind = partialer() setattr(cls, camelcase, to_bind) setattr(cls, underscore, to_bind) def raise_error(self, exception_type, url=None, method=None, error=None, details=None): if error: error = str(error) output = ' '.join([self.id] + [var for var in (url, method, error, details) if var is not None]) raise exception_type(output) def throttle(self): now = float(self.milliseconds()) elapsed = now - self.lastRestRequestTimestamp if elapsed < self.rateLimit: delay = self.rateLimit - elapsed time.sleep(delay / 1000.0) def fetch2(self, path, api='public', method='GET', params={}, headers=None, body=None): """A better wrapper over request for deferred signing""" if self.enableRateLimit: self.throttle() self.lastRestRequestTimestamp = self.milliseconds() request = self.sign(path, api, method, params, headers, body) return self.fetch(request['url'], request['method'], request['headers'], request['body']) def request(self, path, api='public', method='GET', params={}, headers=None, body=None): """Exchange.request is the entry point for all generated methods""" return self.fetch2(path, api, method, params, headers, body) @staticmethod def gzip_deflate(response, text): encoding = response.info().get('Content-Encoding') if encoding in ('gzip', 'x-gzip', 'deflate'): if encoding == 'deflate': return zlib.decompress(text, -zlib.MAX_WBITS) else: return gzip.GzipFile('', 'rb', 9, io.BytesIO(text)).read() return text def find_broadly_matched_key(self, broad, string): """A helper method for matching error strings exactly vs broadly""" keys = list(broad.keys()) for i in range(0, len(keys)): key = keys[i] if string.find(key) >= 0: return key return None def handle_errors(self, code, reason, url, method, headers, body, response): pass def prepare_request_headers(self, headers=None): headers = headers or {} headers.update(self.headers) if self.userAgent: if type(self.userAgent) is str: headers.update({'User-Agent': self.userAgent}) elif (type(self.userAgent) is dict) and ('User-Agent' in self.userAgent): headers.update(self.userAgent) if self.proxy: headers.update({'Origin': self.origin}) headers.update({'Accept-Encoding': 'gzip, deflate'}) return headers def fetch(self, url, method='GET', headers=None, body=None): """Perform a HTTP request and return decoded JSON data""" request_headers = self.prepare_request_headers(headers) url = self.proxy + url if self.verbose: print("\nRequest:", method, url, request_headers, body) self.logger.debug("%s %s, Request: %s %s", method, url, request_headers, body) if body: body = body.encode() self.session.cookies.clear() response = None http_response = None json_response = None try: response = self.session.request( method, url, data=body, headers=request_headers, timeout=int(self.timeout / 1000), proxies=self.proxies ) http_response = response.text json_response = self.parse_json(http_response) if self.is_json_encoded_object(http_response) else None headers = response.headers # FIXME remove last_x_responses from subclasses if self.enableLastHttpResponse: self.last_http_response = http_response if self.enableLastJsonResponse: self.last_json_response = json_response if self.enableLastResponseHeaders: self.last_response_headers = headers if self.verbose: print("\nResponse:", method, url, response.status_code, headers, http_response) self.logger.debug("%s %s, Response: %s %s %s", method, url, response.status_code, headers, http_response) response.raise_for_status() except Timeout as e: self.raise_error(RequestTimeout, method, url, e) except TooManyRedirects as e: self.raise_error(ExchangeError, url, method, e) except SSLError as e: self.raise_error(ExchangeError, url, method, e) except HTTPError as e: self.handle_errors(response.status_code, response.reason, url, method, headers, http_response, json_response) self.handle_rest_errors(e, response.status_code, http_response, url, method) self.raise_error(ExchangeError, url, method, e, http_response) except RequestException as e: # base exception class error_string = str(e) if ('ECONNRESET' in error_string) or ('Connection aborted.' in error_string): self.raise_error(NetworkError, url, method, e) else: self.raise_error(ExchangeError, url, method, e) self.handle_errors(response.status_code, response.reason, url, method, headers, http_response, json_response) self.handle_rest_response(http_response, json_response, url, method, headers, body) if json_response is not None: return json_response return http_response def handle_rest_errors(self, exception, http_status_code, response, url, method='GET'): error = None string_code = str(http_status_code) if string_code in self.httpExceptions: error = self.httpExceptions[string_code] if error == ExchangeNotAvailable: if re.search('(cloudflare|incapsula|overload|ddos)', response, flags=re.IGNORECASE): error = DDoSProtection if error: self.raise_error(error, url, method, exception if exception else http_status_code, response) def handle_rest_response(self, response, json_response, url, method='GET', headers=None, body=None): if self.is_json_encoded_object(response) and json_response is None: ddos_protection = re.search('(cloudflare|incapsula|overload|ddos)', response, flags=re.IGNORECASE) exchange_not_available = re.search('(offline|busy|retry|wait|unavailable|maintain|maintenance|maintenancing)', response, flags=re.IGNORECASE) if ddos_protection: self.raise_error(DDoSProtection, method, url, None, response) if exchange_not_available: message = response + ' exchange downtime, exchange closed for maintenance or offline, DDoS protection or rate-limiting in effect' self.raise_error(ExchangeNotAvailable, method, url, None, message) self.raise_error(ExchangeError, method, url, ValueError('failed to decode json'), response) def parse_json(self, http_response): try: if Exchange.is_json_encoded_object(http_response): return json.loads(http_response) except ValueError: # superclass of JsonDecodeError (python2) pass @staticmethod def safe_float(dictionary, key, default_value=None): value = default_value try: if isinstance(dictionary, list) and isinstance(key, int) and len(dictionary) > key: value = float(dictionary[key]) else: value = float(dictionary[key]) if (key is not None) and (key in dictionary) and (dictionary[key] is not None) else default_value except ValueError as e: value = default_value return value @staticmethod def safe_string(dictionary, key, default_value=None): return str(dictionary[key]) if key is not None and (key in dictionary) and dictionary[key] is not None else default_value @staticmethod def safe_integer(dictionary, key, default_value=None): if key is None or (key not in dictionary): return default_value value = dictionary[key] if isinstance(value, Number) or (isinstance(value, basestring) and value.isnumeric()): return int(value) return default_value @staticmethod def safe_value(dictionary, key, default_value=None): return dictionary[key] if key is not None and (key in dictionary) and dictionary[key] is not None else default_value # we're not using safe_floats with a list argument as we're trying to save some cycles here # we're not using safe_float_3 either because those cases are too rare to deserve their own optimization @staticmethod def safe_float_2(dictionary, key1, key2, default_value=None): return Exchange.safe_either(Exchange.safe_float, dictionary, key1, key2, default_value) @staticmethod def safe_string_2(dictionary, key1, key2, default_value=None): return Exchange.safe_either(Exchange.safe_string, dictionary, key1, key2, default_value) @staticmethod def safe_integer_2(dictionary, key1, key2, default_value=None): return Exchange.safe_either(Exchange.safe_integer, dictionary, key1, key2, default_value) @staticmethod def safe_value_2(dictionary, key1, key2, default_value=None): return Exchange.safe_either(Exchange.safe_value, dictionary, key1, key2, default_value) @staticmethod def safe_either(method, dictionary, key1, key2, default_value=None): """A helper-wrapper for the safe_value_2() family.""" value = method(dictionary, key1) return value if value is not None else method(dictionary, key2, default_value) @staticmethod def truncate(num, precision=0): """Deprecated, use decimal_to_precision instead""" if precision > 0: decimal_precision = math.pow(10, precision) return math.trunc(num * decimal_precision) / decimal_precision return int(Exchange.truncate_to_string(num, precision)) @staticmethod def truncate_to_string(num, precision=0): """Deprecated, todo: remove references from subclasses""" if precision > 0: parts = ('{0:.%df}' % precision).format(Decimal(num)).split('.') decimal_digits = parts[1][:precision].rstrip('0') decimal_digits = decimal_digits if len(decimal_digits) else '0' return parts[0] + '.' + decimal_digits return ('%d' % num) @staticmethod def uuid(): return str(uuid.uuid4()) @staticmethod def capitalize(string): # first character only, rest characters unchanged # the native pythonic .capitalize() method lowercases all other characters # which is an unwanted behaviour, therefore we use this custom implementation # check it yourself: print('foobar'.capitalize(), 'fooBar'.capitalize()) if len(string) > 1: return "%s%s" % (string[0].upper(), string[1:]) return string.upper() @staticmethod def keysort(dictionary): return collections.OrderedDict(sorted(dictionary.items(), key=lambda t: t[0])) @staticmethod def extend(*args): if args is not None: result = None if type(args[0]) is collections.OrderedDict: result = collections.OrderedDict() else: result = {} for arg in args: result.update(arg) return result return {} @staticmethod def deep_extend(*args): result = None for arg in args: if isinstance(arg, dict): if not isinstance(result, dict): result = {} for key in arg: result[key] = Exchange.deep_extend(result[key] if key in result else None, arg[key]) else: result = arg return result @staticmethod def filter_by(array, key, value=None): if value: grouped = Exchange.group_by(array, key) if value in grouped: return grouped[value] return [] return array @staticmethod def filterBy(self, array, key, value=None): return Exchange.filter_by(array, key, value) @staticmethod def group_by(array, key): result = {} array = Exchange.to_array(array) array = [entry for entry in array if (key in entry) and (entry[key] is not None)] for entry in array: if entry[key] not in result: result[entry[key]] = [] result[entry[key]].append(entry) return result @staticmethod def groupBy(array, key): return Exchange.group_by(array, key) @staticmethod def index_by(array, key): result = {} if type(array) is dict: array = Exchange.keysort(array).values() for element in array: if (key in element) and (element[key] is not None): k = element[key] result[k] = element return result @staticmethod def sort_by(array, key, descending=False): return sorted(array, key=lambda k: k[key] if k[key] is not None else "", reverse=descending) @staticmethod def array_concat(a, b): return a + b @staticmethod def in_array(needle, haystack): return needle in haystack @staticmethod def is_empty(object): return not object @staticmethod def extract_params(string): return re.findall(r'{([\w-]+)}', string) @staticmethod def implode_params(string, params): for key in params: string = string.replace('{' + key + '}', str(params[key])) return string @staticmethod def url(path, params={}): result = Exchange.implode_params(path, params) query = Exchange.omit(params, Exchange.extract_params(path)) if query: result += '?' + _urlencode.urlencode(query) return result @staticmethod def urlencode(params={}): if (type(params) is dict) or isinstance(params, collections.OrderedDict): return _urlencode.urlencode(params) return params @staticmethod def rawencode(params={}): return _urlencode.unquote(Exchange.urlencode(params)) @staticmethod def encode_uri_component(uri): return _urlencode.quote(uri, safe="~()*!.'") @staticmethod def omit(d, *args): result = d.copy() for arg in args: if type(arg) is list: for key in arg: if key in result: del result[key] else: if arg in result: del result[arg] return result @staticmethod def unique(array): return list(set(array)) @staticmethod def pluck(array, key): return [ element[key] for element in array if (key in element) and (element[key] is not None) ] @staticmethod def sum(*args): return sum([arg for arg in args if isinstance(arg, (float, int))]) @staticmethod def ordered(array): return collections.OrderedDict(array) @staticmethod def aggregate(bidasks): ordered = Exchange.ordered({}) for [price, volume] in bidasks: if volume > 0: ordered[price] = (ordered[price] if price in ordered else 0) + volume result = [] items = list(ordered.items()) for price, volume in items: result.append([price, volume]) return result @staticmethod def sec(): return Exchange.seconds() @staticmethod def msec(): return Exchange.milliseconds() @staticmethod def usec(): return Exchange.microseconds() @staticmethod def seconds(): return int(time.time()) @staticmethod def milliseconds(): return int(time.time() * 1000) @staticmethod def microseconds(): return int(time.time() * 1000000) @staticmethod def iso8601(timestamp=None): if timestamp is None: return timestamp if not isinstance(timestamp, (int, long)): return None if int(timestamp) < 0: return None try: utc = datetime.datetime.utcfromtimestamp(timestamp // 1000) return utc.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-6] + "{:03d}".format(int(timestamp) % 1000) + 'Z' except (TypeError, OverflowError, OSError): return None @staticmethod def dmy(timestamp, infix='-'): utc_datetime = datetime.datetime.utcfromtimestamp(int(round(timestamp / 1000))) return utc_datetime.strftime('%m' + infix + '%d' + infix + '%Y') @staticmethod def ymd(timestamp, infix='-'): utc_datetime = datetime.datetime.utcfromtimestamp(int(round(timestamp / 1000))) return utc_datetime.strftime('%Y' + infix + '%m' + infix + '%d') @staticmethod def ymdhms(timestamp, infix=' '): utc_datetime = datetime.datetime.utcfromtimestamp(int(round(timestamp / 1000))) return utc_datetime.strftime('%Y-%m-%d' + infix + '%H:%M:%S') @staticmethod def parse_date(timestamp=None): if timestamp is None: return timestamp if not isinstance(timestamp, str): return None if 'GMT' in timestamp: try: string = ''.join([str(value) for value in parsedate(timestamp)[:6]]) + '.000Z' dt = datetime.datetime.strptime(string, "%Y%m%d%H%M%S.%fZ") return calendar.timegm(dt.utctimetuple()) * 1000 except (TypeError, OverflowError, OSError): return None else: return Exchange.parse8601(timestamp) @staticmethod def parse8601(timestamp=None): if timestamp is None: return timestamp yyyy = '([0-9]{4})-?' mm = '([0-9]{2})-?' dd = '([0-9]{2})(?:T|[\\s])?' h = '([0-9]{2}):?' m = '([0-9]{2}):?' s = '([0-9]{2})' ms = '(\\.[0-9]{1,3})?' tz = '(?:(\\+|\\-)([0-9]{2})\\:?([0-9]{2})|Z)?' regex = r'' + yyyy + mm + dd + h + m + s + ms + tz try: match = re.search(regex, timestamp, re.IGNORECASE) if match is None: return None yyyy, mm, dd, h, m, s, ms, sign, hours, minutes = match.groups() ms = ms or '.000' msint = int(ms[1:]) sign = sign or '' sign = int(sign + '1') * -1 hours = int(hours or 0) * sign minutes = int(minutes or 0) * sign offset = datetime.timedelta(hours=hours, minutes=minutes) string = yyyy + mm + dd + h + m + s + ms + 'Z' dt = datetime.datetime.strptime(string, "%Y%m%d%H%M%S.%fZ") dt = dt + offset return calendar.timegm(dt.utctimetuple()) * 1000 + msint except (TypeError, OverflowError, OSError, ValueError): return None @staticmethod def hash(request, algorithm='md5', digest='hex'): h = hashlib.new(algorithm, request) if digest == 'hex': return h.hexdigest() elif digest == 'base64': return base64.b64encode(h.digest()) return h.digest() @staticmethod def hmac(request, secret, algorithm=hashlib.sha256, digest='hex'): h = hmac.new(secret, request, algorithm) if digest == 'hex': return h.hexdigest() elif digest == 'base64': return base64.b64encode(h.digest()) return h.digest() @staticmethod def binary_concat(*args): result = bytes() for arg in args: result = result + arg return result @staticmethod def binary_to_string(s): return s.decode('ascii') @staticmethod def base64urlencode(s): return Exchange.decode(base64.urlsafe_b64encode(s)).replace('=', '') @staticmethod def jwt(request, secret, algorithm=hashlib.sha256, alg='HS256'): header = Exchange.encode(Exchange.json({ 'alg': alg, 'typ': 'JWT', })) encodedHeader = Exchange.base64urlencode(header) encodedData = Exchange.base64urlencode(Exchange.encode(Exchange.json(request))) token = encodedHeader + '.' + encodedData hmac = Exchange.hmac(Exchange.encode(token), Exchange.encode(secret), algorithm, 'binary') signature = Exchange.base64urlencode(hmac) return token + '.' + signature @staticmethod def unjson(input): return json.loads(input) @staticmethod def json(data, params=None): return json.dumps(data, separators=(',', ':')) @staticmethod def is_json_encoded_object(input): return (isinstance(input, basestring) and (len(input) >= 2) and ((input[0] == '{') or (input[0] == '['))) @staticmethod def encode(string): return string.encode() @staticmethod def decode(string): return string.decode() @staticmethod def to_array(value): return list(value.values()) if type(value) is dict else value def nonce(self): return Exchange.seconds() def check_required_credentials(self, error=True): keys = list(self.requiredCredentials.keys()) for key in keys: if self.requiredCredentials[key] and not getattr(self, key): if error: self.raise_error(AuthenticationError, details='requires `' + key + '`') else: return error def check_address(self, address): """Checks an address is not the same character repeated or an empty sequence""" if address is None: self.raise_error(InvalidAddress, details='address is None') if all(letter == address[0] for letter in address) or len(address) < self.minFundingAddressLength or ' ' in address: self.raise_error(InvalidAddress, details='address is invalid or has less than ' + str(self.minFundingAddressLength) + ' characters: "' + str(address) + '"') return address def account(self): return { 'free': 0.0, 'used': 0.0, 'total': 0.0, } def common_currency_code(self, currency): if not self.substituteCommonCurrencyCodes: return currency return self.safe_string(self.commonCurrencies, currency, currency) def currency_id(self, commonCode): if self.currencies: if commonCode in self.currencies: return self.currencies[commonCode]['id'] currencyIds = {v: k for k, v in self.commonCurrencies.items()} return self.safe_string(currencyIds, commonCode, commonCode) def precision_from_string(self, string): parts = re.sub(r'0+$', '', string).split('.') return len(parts[1]) if len(parts) > 1 else 0 def cost_to_precision(self, symbol, cost): return self.decimal_to_precision(cost, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode) def price_to_precision(self, symbol, price): return self.decimal_to_precision(price, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode) def amount_to_precision(self, symbol, amount): return self.decimal_to_precision(amount, TRUNCATE, self.markets[symbol]['precision']['amount'], self.precisionMode) def fee_to_precision(self, symbol, fee): return self.decimal_to_precision(fee, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode) def currency_to_precision(self, currency, fee): return self.decimal_to_precision(fee, ROUND, self.currencies[currency]['precision'], self.precisionMode) def set_markets(self, markets, currencies=None): values = list(markets.values()) if type(markets) is dict else markets for i in range(0, len(values)): values[i] = self.extend( self.fees['trading'], {'precision': self.precision, 'limits': self.limits}, values[i] ) self.markets = self.index_by(values, 'symbol') self.markets_by_id = self.index_by(values, 'id') self.marketsById = self.markets_by_id self.symbols = sorted(list(self.markets.keys())) self.ids = sorted(list(self.markets_by_id.keys())) if currencies: self.currencies = self.deep_extend(currencies, self.currencies) else: base_currencies = [{ 'id': market['baseId'] if 'baseId' in market else market['base'], 'numericId': market['baseNumericId'] if 'baseNumericId' in market else None, 'code': market['base'], 'precision': ( market['precision']['base'] if 'base' in market['precision'] else ( market['precision']['amount'] if 'amount' in market['precision'] else None ) ) if 'precision' in market else 8, } for market in values if 'base' in market] quote_currencies = [{ 'id': market['quoteId'] if 'quoteId' in market else market['quote'], 'numericId': market['quoteNumericId'] if 'quoteNumericId' in market else None, 'code': market['quote'], 'precision': ( market['precision']['quote'] if 'quote' in market['precision'] else ( market['precision']['price'] if 'price' in market['precision'] else None ) ) if 'precision' in market else 8, } for market in values if 'quote' in market] currencies = self.sort_by(base_currencies + quote_currencies, 'code') self.currencies = self.deep_extend(self.index_by(currencies, 'code'), self.currencies) self.currencies_by_id = self.index_by(list(self.currencies.values()), 'id') return self.markets def load_markets(self, reload=False, params={}): if not reload: if self.markets: if not self.markets_by_id: return self.set_markets(self.markets) return self.markets currencies = None if self.has['fetchCurrencies']: currencies = self.fetch_currencies() markets = self.fetch_markets(params) return self.set_markets(markets, currencies) def load_accounts(self, reload=False, params={}): if reload: self.accounts = self.fetch_accounts(params) else: if self.accounts: return self.accounts else: self.accounts = self.fetch_accounts(params) self.accountsById = self.index_by(self.accounts, 'id') return self.accounts def load_fees(self, reload=False): if not reload: if self.loaded_fees != Exchange.loaded_fees: return self.loaded_fees self.loaded_fees = self.deep_extend(self.loaded_fees, self.fetch_fees()) return self.loaded_fees def fetch_markets(self, params={}): # markets are returned as a list # currencies are returned as a dict # this is for historical reasons # and may be changed for consistency later return self.to_array(self.markets) def fetch_currencies(self, params={}): # markets are returned as a list # currencies are returned as a dict # this is for historical reasons # and may be changed for consistency later return self.currencies def fetch_fees(self): trading = {} funding = {} if self.has['fetchTradingFees']: trading = self.fetch_trading_fees() if self.has['fetchFundingFees']: funding = self.fetch_funding_fees() return { 'trading': trading, 'funding': funding, } def create_order(self, symbol, type, side, amount, price=None, params={}): self.raise_error(NotSupported, details='create_order() not supported yet') def cancel_order(self, id, symbol=None, params={}): self.raise_error(NotSupported, details='cancel_order() not supported yet') def fetch_bids_asks(self, symbols=None, params={}): self.raise_error(NotSupported, details='API does not allow to fetch all prices at once with a single call to fetch_bids_asks() for now') def fetch_tickers(self, symbols=None, params={}): self.raise_error(NotSupported, details='API does not allow to fetch all tickers at once with a single call to fetch_tickers() for now') def fetch_order_status(self, id, symbol=None, params={}): order = self.fetch_order(id, symbol, params) return order['status'] def purge_cached_orders(self, before): orders = self.to_array(self.orders) orders = [order for order in orders if (order['status'] == 'open') or (order['timestamp'] >= before)] self.orders = self.index_by(orders, 'id') return self.orders def fetch_order(self, id, symbol=None, params={}): self.raise_error(NotSupported, details='fetch_order() is not supported yet') def fetch_orders(self, symbol=None, since=None, limit=None, params={}): self.raise_error(NotSupported, details='fetch_orders() is not supported yet') def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}): self.raise_error(NotSupported, details='fetch_open_orders() is not supported yet') def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}): self.raise_error(NotSupported, details='fetch_closed_orders() is not supported yet') def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}): self.raise_error(NotSupported, details='fetch_my_trades() is not supported yet') def fetch_order_trades(self, id, symbol=None, params={}): self.raise_error(NotSupported, details='fetch_order_trades() is not supported yet') def fetch_transactions(self, symbol=None, since=None, limit=None, params={}): self.raise_error(NotSupported, details='fetch_transactions() is not supported yet') def fetch_deposits(self, symbol=None, since=None, limit=None, params={}): self.raise_error(NotSupported, details='fetch_deposits() is not supported yet') def fetch_withdrawals(self, symbol=None, since=None, limit=None, params={}): self.raise_error(NotSupported, details='fetch_withdrawals() is not supported yet') def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None): return ohlcv[0:6] if isinstance(ohlcv, list) else ohlcv def parse_ohlcvs(self, ohlcvs, market=None, timeframe='1m', since=None, limit=None): ohlcvs = self.to_array(ohlcvs) num_ohlcvs = len(ohlcvs) result = [] i = 0 while i < num_ohlcvs: if limit and (len(result) >= limit): break ohlcv = self.parse_ohlcv(ohlcvs[i], market, timeframe, since, limit) i = i + 1 if since and (ohlcv[0] < since): continue result.append(ohlcv) return self.sort_by(result, 0) def parse_bid_ask(self, bidask, price_key=0, amount_key=0): return [float(bidask[price_key]), float(bidask[amount_key])] def parse_bids_asks(self, bidasks, price_key=0, amount_key=1): result = [] if len(bidasks): if type(bidasks[0]) is list: for bidask in bidasks: if bidask[price_key] and bidask[amount_key]: result.append(self.parse_bid_ask(bidask, price_key, amount_key)) elif type(bidasks[0]) is dict: for bidask in bidasks: if (price_key in bidask) and (amount_key in bidask) and (bidask[price_key] and bidask[amount_key]): result.append(self.parse_bid_ask(bidask, price_key, amount_key)) else: self.raise_error(ExchangeError, details='unrecognized bidask format: ' + str(bidasks[0])) return result def fetch_l2_order_book(self, symbol, limit=None, params={}): orderbook = self.fetch_order_book(symbol, limit, params) return self.extend(orderbook, { 'bids': self.sort_by(self.aggregate(orderbook['bids']), 0, True), 'asks': self.sort_by(self.aggregate(orderbook['asks']), 0), }) def parse_order_book(self, orderbook, timestamp=None, bids_key='bids', asks_key='asks', price_key=0, amount_key=1): return { 'bids': self.sort_by(self.parse_bids_asks(orderbook[bids_key], price_key, amount_key) if (bids_key in orderbook) and isinstance(orderbook[bids_key], list) else [], 0, True), 'asks': self.sort_by(self.parse_bids_asks(orderbook[asks_key], price_key, amount_key) if (asks_key in orderbook) and isinstance(orderbook[asks_key], list) else [], 0), 'timestamp': timestamp, 'datetime': self.iso8601(timestamp) if timestamp is not None else None, 'nonce': None, } def parse_balance(self, balance): currencies = self.omit(balance, 'info').keys() for account in ['free', 'used', 'total']: balance[account] = {} for currency in currencies: balance[account][currency] = balance[currency][account] return balance def fetch_partial_balance(self, part, params={}): balance = self.fetch_balance(params) return balance[part] def fetch_free_balance(self, params={}): return self.fetch_partial_balance('free', params) def fetch_used_balance(self, params={}): return self.fetch_partial_balance('used', params) def fetch_total_balance(self, params={}): return self.fetch_partial_balance('total', params) def fetch_trading_fees(self, symbol, params={}): self.raise_error(NotSupported, details='fetch_trading_fees() not supported yet') def fetch_trading_fee(self, symbol, params={}): if not self.has['fetchTradingFees']: self.raise_error(NotSupported, details='fetch_trading_fee() not supported yet') return self.fetch_trading_fees(params) def fetch_funding_fees(self, params={}): self.raise_error(NotSupported, details='fetch_funding_fees() not supported yet') def fetch_funding_fee(self, code, params={}): if not self.has['fetchFundingFees']: self.raise_error(NotSupported, details='fetch_funding_fee() not supported yet') return self.fetch_funding_fees(params) def load_trading_limits(self, symbols=None, reload=False, params={}): if self.has['fetchTradingLimits']: if reload or not('limitsLoaded' in list(self.options.keys())): response = self.fetch_trading_limits(symbols) for i in range(0, len(symbols)): symbol = symbols[i] self.markets[symbol] = self.deep_extend(self.markets[symbol], response[symbol]) self.options['limitsLoaded'] = self.milliseconds() return self.markets def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}): if not self.has['fetchTrades']: self.raise_error(NotSupported, details='fetch_ohlcv() not supported yet') self.load_markets() trades = self.fetch_trades(symbol, since, limit, params) return self.build_ohlcv(trades, timeframe, since, limit) def fetchOHLCV(self, symbol, timeframe='1m', since=None, limit=None, params={}): return self.fetch_ohlcv(symbol, timeframe, since, limit, params) def parse_trading_view_ohlcv(self, ohlcvs, market=None, timeframe='1m', since=None, limit=None): result = self.convert_trading_view_to_ohlcv(ohlcvs) return self.parse_ohlcvs(result, market, timeframe, since, limit) def convert_trading_view_to_ohlcv(self, ohlcvs): result = [] for i in range(0, len(ohlcvs['t'])): result.append([ ohlcvs['t'][i] * 1000, ohlcvs['o'][i], ohlcvs['h'][i], ohlcvs['l'][i], ohlcvs['c'][i], ohlcvs['v'][i], ]) return result def convert_ohlcv_to_trading_view(self, ohlcvs): result = { 't': [], 'o': [], 'h': [], 'l': [], 'c': [], 'v': [], } for i in range(0, len(ohlcvs)): result['t'].append(int(ohlcvs[i][0] / 1000)) result['o'].append(ohlcvs[i][1]) result['h'].append(ohlcvs[i][2]) result['l'].append(ohlcvs[i][3]) result['c'].append(ohlcvs[i][4]) result['v'].append(ohlcvs[i][5]) return result def build_ohlcv(self, trades, timeframe='1m', since=None, limit=None): ms = self.parse_timeframe(timeframe) * 1000 ohlcvs = [] (high, low, close, volume) = (2, 3, 4, 5) num_trades = len(trades) oldest = (num_trades - 1) if limit is None else min(num_trades - 1, limit) for i in range(0, oldest): trade = trades[i] if (since is not None) and (trade['timestamp'] < since): continue opening_time = int(math.floor(trade['timestamp'] / ms) * ms) # Shift the edge of the m/h/d (but not M) j = len(ohlcvs) if (j == 0) or opening_time >= ohlcvs[j - 1][0] + ms: # moved to a new timeframe -> create a new candle from opening trade ohlcvs.append([ opening_time, trade['price'], trade['price'], trade['price'], trade['price'], trade['amount'], ]) else: # still processing the same timeframe -> update opening trade ohlcvs[j - 1][high] = max(ohlcvs[j - 1][high], trade['price']) ohlcvs[j - 1][low] = min(ohlcvs[j - 1][low], trade['price']) ohlcvs[j - 1][close] = trade['price'] ohlcvs[j - 1][volume] += trade['amount'] return ohlcvs @staticmethod def parse_timeframe(timeframe): amount = int(timeframe[0:-1]) unit = timeframe[-1] if 'y' in unit: scale = 60 * 60 * 24 * 365 elif 'M' in unit: scale = 60 * 60 * 24 * 30 elif 'w' in unit: scale = 60 * 60 * 24 * 7 elif 'd' in unit: scale = 60 * 60 * 24 elif 'h' in unit: scale = 60 * 60 else: scale = 60 # 1m by default return amount * scale def parse_trades(self, trades, market=None, since=None, limit=None): array = self.to_array(trades) array = [self.parse_trade(trade, market) for trade in array] array = self.sort_by(array, 'timestamp') symbol = market['symbol'] if market else None return self.filter_by_symbol_since_limit(array, symbol, since, limit) def parse_ledger(self, data, currency=None, since=None, limit=None): array = self.to_array(data) array = [self.parse_ledger_entry(item, currency) for item in array] array = self.sort_by(array, 'timestamp') code = currency['code'] if currency else None return self.filter_by_currency_since_limit(array, code, since, limit) def parse_transactions(self, transactions, currency=None, since=None, limit=None, params={}): array = self.to_array(transactions) array = [self.extend(self.parse_transaction(transaction, currency), params) for transaction in array] array = self.sort_by(array, 'timestamp') code = currency['code'] if currency else None return self.filter_by_currency_since_limit(array, code, since, limit) def parse_orders(self, orders, market=None, since=None, limit=None): array = self.to_array(orders) array = [self.parse_order(order, market) for order in array] array = self.sort_by(array, 'timestamp') symbol = market['symbol'] if market else None return self.filter_by_symbol_since_limit(array, symbol, since, limit) def safe_currency_code(self, data, key, currency=None): code = None currency_id = self.safe_string(data, key) if currency_id in self.currencies_by_id: currency = self.currencies_by_id[currency_id] else: code = self.common_currency_code(currency_id) if currency is not None: code = currency['code'] return code def filter_by_value_since_limit(self, array, field, value=None, since=None, limit=None): array = self.to_array(array) if value: array = [entry for entry in array if entry[field] == value] if since: array = [entry for entry in array if entry['timestamp'] >= since] if limit: array = array[0:limit] return array def filter_by_symbol_since_limit(self, array, symbol=None, since=None, limit=None): return self.filter_by_value_since_limit(array, 'symbol', symbol, since, limit) def filter_by_currency_since_limit(self, array, code=None, since=None, limit=None): return self.filter_by_value_since_limit(array, 'currency', code, since, limit) def filter_by_since_limit(self, array, since=None, limit=None): array = self.to_array(array) if since: array = [entry for entry in array if entry['timestamp'] >= since] if limit: array = array[0:limit] return array def filter_by_symbol(self, array, symbol=None): array = self.to_array(array) if symbol: return [entry for entry in array if entry['symbol'] == symbol] return array def filter_by_array(self, objects, key, values=None, indexed=True): objects = self.to_array(objects) # return all of them if no values were passed in if values is None: return self.index_by(objects, key) if indexed else objects result = [] for i in range(0, len(objects)): value = objects[i][key] if key in objects[i] else None if value in values: result.append(objects[i]) return self.index_by(result, key) if indexed else result def currency(self, code): if not self.currencies: self.raise_error(ExchangeError, details='Currencies not loaded') if isinstance(code, basestring) and (code in self.currencies): return self.currencies[code] self.raise_error(ExchangeError, details='Does not have currency code ' + str(code)) def find_market(self, string): if not self.markets: self.raise_error(ExchangeError, details='Markets not loaded') if isinstance(string, basestring): if string in self.markets_by_id: return self.markets_by_id[string] if string in self.markets: return self.markets[string] return string def find_symbol(self, string, market=None): if market is None: market = self.find_market(string) if isinstance(market, dict): return market['symbol'] return string def market(self, symbol): if not self.markets: self.raise_error(ExchangeError, details='Markets not loaded') if isinstance(symbol, basestring) and (symbol in self.markets): return self.markets[symbol] self.raise_error(ExchangeError, details='No market symbol ' + str(symbol)) def market_ids(self, symbols): return [self.market_id(symbol) for symbol in symbols] def market_id(self, symbol): market = self.market(symbol) return market['id'] if type(market) is dict else symbol def calculate_fee(self, symbol, type, side, amount, price, takerOrMaker='taker', params={}): market = self.markets[symbol] rate = market[takerOrMaker] cost = float(self.cost_to_precision(symbol, amount * price)) return { 'rate': rate, 'type': takerOrMaker, 'currency': market['quote'], 'cost': float(self.fee_to_precision(symbol, rate * cost)), } def edit_limit_buy_order(self, id, symbol, *args): return self.edit_limit_order(id, symbol, 'buy', *args) def edit_limit_sell_order(self, id, symbol, *args): return self.edit_limit_order(id, symbol, 'sell', *args) def edit_limit_order(self, id, symbol, *args): return self.edit_order(id, symbol, 'limit', *args) def edit_order(self, id, symbol, *args): if not self.enableRateLimit: self.raise_error(ExchangeError, details='edit_order() requires enableRateLimit = true') self.cancel_order(id, symbol) return self.create_order(symbol, *args) def create_limit_order(self, symbol, *args): return self.create_order(symbol, 'limit', *args) def create_market_order(self, symbol, *args): return self.create_order(symbol, 'market', *args) def create_limit_buy_order(self, symbol, *args): return self.create_order(symbol, 'limit', 'buy', *args) def create_limit_sell_order(self, symbol, *args): return self.create_order(symbol, 'limit', 'sell', *args) def create_market_buy_order(self, symbol, amount, params={}): return self.create_order(symbol, 'market', 'buy', amount, None, params) def create_market_sell_order(self, symbol, amount, params={}): return self.create_order(symbol, 'market', 'sell', amount, None, params) def sign(self, path, api='public', method='GET', params={}, headers=None, body=None): raise NotSupported(self.id + ' sign() pure method must be redefined in derived classes') # ------------------------------------------------------------------------- # web3 / 0x methods @staticmethod def has_web3(): return Web3 is not None def check_required_dependencies(self): if not Exchange.has_web3(): raise NotSupported("Web3 functionality requires Python3 and web3 package installed: https://github.com/ethereum/web3.py") def eth_decimals(self, unit='ether'): units = { 'wei': 0, # 1 'kwei': 3, # 1000 'babbage': 3, # 1000 'femtoether': 3, # 1000 'mwei': 6, # 1000000 'lovelace': 6, # 1000000 'picoether': 6, # 1000000 'gwei': 9, # 1000000000 'shannon': 9, # 1000000000 'nanoether': 9, # 1000000000 'nano': 9, # 1000000000 'szabo': 12, # 1000000000000 'microether': 12, # 1000000000000 'micro': 12, # 1000000000000 'finney': 15, # 1000000000000000 'milliether': 15, # 1000000000000000 'milli': 15, # 1000000000000000 'ether': 18, # 1000000000000000000 'kether': 21, # 1000000000000000000000 'grand': 21, # 1000000000000000000000 'mether': 24, # 1000000000000000000000000 'gether': 27, # 1000000000000000000000000000 'tether': 30, # 1000000000000000000000000000000 } return self.safe_value(units, unit) def eth_unit(self, decimals=18): units = { 0: 'wei', # 1000000000000000000 3: 'kwei', # 1000000000000000 6: 'mwei', # 1000000000000 9: 'gwei', # 1000000000 12: 'szabo', # 1000000 15: 'finney', # 1000 18: 'ether', # 1 21: 'kether', # 0.001 24: 'mether', # 0.000001 27: 'gether', # 0.000000001 30: 'tether', # 0.000000000001 } return self.safe_value(units, decimals) def fromWei(self, amount, unit='ether', decimals=18): if Web3 is None: self.raise_error(NotSupported, details="ethereum web3 methods require Python 3: https://pythonclock.org") if amount is None: return amount if decimals != 18: if decimals % 3: amount = int(amount) * (10 ** (18 - decimals)) else: unit = self.eth_unit(decimals) return float(Web3.fromWei(int(amount), unit)) def toWei(self, amount, unit='ether', decimals=18): if Web3 is None: self.raise_error(NotSupported, details="ethereum web3 methods require Python 3: https://pythonclock.org") if amount is None: return amount if decimals != 18: if decimals % 3: # this case has known yet unsolved problems: # toWei(1.999, 'ether', 17) == '199900000000000011' # toWei(1.999, 'ether', 19) == '19989999999999999991' # the best solution should not involve additional dependencies amount = Decimal(amount) / Decimal(10 ** (18 - decimals)) else: unit = self.eth_unit(decimals) return str(Web3.toWei(amount, unit)) def decryptAccountFromJSON(self, value, password): return self.decryptAccount(json.loads(value) if isinstance(value, basestring) else value, password) def decryptAccount(self, key, password): return self.web3.eth.accounts.decrypt(key, password) def decryptAccountFromPrivateKey(self, privateKey): return self.web3.eth.accounts.privateKeyToAccount(privateKey) def soliditySha3(self, array): values = self.solidityValues(array) types = self.solidityTypes(values) return self.web3.soliditySha3(types, values).hex() def soliditySha256(self, values): types = self.solidityTypes(values) solidity_values = self.solidityValues(values) encoded_values = [hex_encode_abi_type(abi_type, value)[2:] for abi_type, value in zip(types, solidity_values)] hex_string = '0x' + ''.join(encoded_values) return '0x' + self.hash(self.encode(self.web3.toText(hex_string)), 'sha256') def solidityTypes(self, array): return ['address' if self.web3.isAddress(value) else 'uint256' for value in array] def solidityValues(self, array): return [self.web3.toChecksumAddress(value) if self.web3.isAddress(value) else int(value) for value in array] def getZeroExOrderHash2(self, order): return self.soliditySha3([ order['exchangeContractAddress'], # address order['maker'], # address order['taker'], # address order['makerTokenAddress'], # address order['takerTokenAddress'], # address order['feeRecipient'], # address order['makerTokenAmount'], # uint256 order['takerTokenAmount'], # uint256 order['makerFee'], # uint256 order['takerFee'], # uint256 order['expirationUnixTimestampSec'], # uint256 order['salt'], # uint256 ]) def getZeroExOrderHash(self, order): unpacked = [ self.web3.toChecksumAddress(order['exchangeContractAddress']), # { value: order.exchangeContractAddress, type: types_1.SolidityTypes.Address }, self.web3.toChecksumAddress(order['maker']), # { value: order.maker, type: types_1.SolidityTypes.Address }, self.web3.toChecksumAddress(order['taker']), # { value: order.taker, type: types_1.SolidityTypes.Address }, self.web3.toChecksumAddress(order['makerTokenAddress']), # { value: order.makerTokenAddress, type: types_1.SolidityTypes.Address }, self.web3.toChecksumAddress(order['takerTokenAddress']), # { value: order.takerTokenAddress, type: types_1.SolidityTypes.Address }, self.web3.toChecksumAddress(order['feeRecipient']), # { value: order.feeRecipient, type: types_1.SolidityTypes.Address }, int(order['makerTokenAmount']), # { value: bigNumberToBN(order.makerTokenAmount), type: types_1.SolidityTypes.Uint256, }, int(order['takerTokenAmount']), # { value: bigNumberToBN(order.takerTokenAmount), type: types_1.SolidityTypes.Uint256, }, int(order['makerFee']), # { value: bigNumberToBN(order.makerFee), type: types_1.SolidityTypes.Uint256, }, int(order['takerFee']), # { value: bigNumberToBN(order.takerFee), type: types_1.SolidityTypes.Uint256, }, int(order['expirationUnixTimestampSec']), # { value: bigNumberToBN(order.expirationUnixTimestampSec), type: types_1.SolidityTypes.Uint256, }, int(order['salt']), # { value: bigNumberToBN(order.salt), type: types_1.SolidityTypes.Uint256 }, ] types = [ 'address', # { value: order.exchangeContractAddress, type: types_1.SolidityTypes.Address }, 'address', # { value: order.maker, type: types_1.SolidityTypes.Address }, 'address', # { value: order.taker, type: types_1.SolidityTypes.Address }, 'address', # { value: order.makerTokenAddress, type: types_1.SolidityTypes.Address }, 'address', # { value: order.takerTokenAddress, type: types_1.SolidityTypes.Address }, 'address', # { value: order.feeRecipient, type: types_1.SolidityTypes.Address }, 'uint256', # { value: bigNumberToBN(order.makerTokenAmount), type: types_1.SolidityTypes.Uint256, }, 'uint256', # { value: bigNumberToBN(order.takerTokenAmount), type: types_1.SolidityTypes.Uint256, }, 'uint256', # { value: bigNumberToBN(order.makerFee), type: types_1.SolidityTypes.Uint256, }, 'uint256', # { value: bigNumberToBN(order.takerFee), type: types_1.SolidityTypes.Uint256, }, 'uint256', # { value: bigNumberToBN(order.expirationUnixTimestampSec), type: types_1.SolidityTypes.Uint256, }, 'uint256', # { value: bigNumberToBN(order.salt), type: types_1.SolidityTypes.Uint256 }, ] return self.web3.soliditySha3(types, unpacked).hex() def remove_0x_prefix(self, value): if value[:2] == '0x': return value[2:] return value def getZeroExOrderHashV2(self, order): # https://github.com/0xProject/0x-monorepo/blob/development/python-packages/order_utils/src/zero_ex/order_utils/__init__.py def pad_20_bytes_to_32(twenty_bytes): return bytes(12) + twenty_bytes def int_to_32_big_endian_bytes(i): return i.to_bytes(32, byteorder="big") def to_bytes(value): if not isinstance(value, str): raise TypeError("Value must be an instance of str") if len(value) % 2: value = "0x0" + self.remove_0x_prefix(value) return base64.b16decode(self.remove_0x_prefix(value), casefold=True) domain_struct_header = b"\x91\xab=\x17\xe3\xa5\n\x9d\x89\xe6?\xd3\x0b\x92\xbe\x7fS6\xb0;({\xb9Fxz\x83\xa9\xd6*'f\xf0\xf2F\x18\xf4\xc4\xbe\x1eb\xe0&\xfb\x03\x9a \xef\x96\xf4IR\x94\x81}\x10'\xff\xaam\x1fp\xe6\x1e\xad|[\xef\x02x\x16\xa8\x00\xda\x176DO\xb5\x8a\x80~\xf4\xc9`;xHg?~:h\xeb\x14\xa5" order_schema_hash = b'w\x05\x01\xf8\x8a&\xed\xe5\xc0J \xef\x87yi\xe9a\xeb\x11\xfc\x13\xb7\x8a\xafAKc=\xa0\xd4\xf8o' header = b"\x19\x01" domain_struct_hash = self.web3.sha3( domain_struct_header + pad_20_bytes_to_32(to_bytes(order["exchangeAddress"])) ) order_struct_hash = self.web3.sha3( order_schema_hash + pad_20_bytes_to_32(to_bytes(order["makerAddress"])) + pad_20_bytes_to_32(to_bytes(order["takerAddress"])) + pad_20_bytes_to_32(to_bytes(order["feeRecipientAddress"])) + pad_20_bytes_to_32(to_bytes(order["senderAddress"])) + int_to_32_big_endian_bytes(int(order["makerAssetAmount"])) + int_to_32_big_endian_bytes(int(order["takerAssetAmount"])) + int_to_32_big_endian_bytes(int(order["makerFee"])) + int_to_32_big_endian_bytes(int(order["takerFee"])) + int_to_32_big_endian_bytes(int(order["expirationTimeSeconds"])) + int_to_32_big_endian_bytes(int(order["salt"])) + self.web3.sha3(to_bytes(order["makerAssetData"])) + self.web3.sha3(to_bytes(order["takerAssetData"])) ) sha3 = self.web3.sha3( header + domain_struct_hash + order_struct_hash ) return '0x' + base64.b16encode(sha3).decode('ascii').lower() def signZeroExOrder(self, order, privateKey): orderHash = self.getZeroExOrderHash(order) signature = self.signMessage(orderHash[-64:], privateKey) return self.extend(order, { 'orderHash': orderHash, 'ecSignature': signature, # todo fix v if needed }) def signZeroExOrderV2(self, order, privateKey): orderHash = self.getZeroExOrderHashV2(order) signature = self.signMessage(orderHash[-64:], privateKey) return self.extend(order, { 'orderHash': orderHash, 'signature': self._convertECSignatureToSignatureHex(signature), }) def _convertECSignatureToSignatureHex(self, signature): # https://github.com/0xProject/0x-monorepo/blob/development/packages/order-utils/src/signature_utils.ts v = signature["v"] if v != 27 and v != 28: v = v + 27 return ( "0x" + self.remove_0x_prefix(hex(v)) + self.remove_0x_prefix(signature["r"]) + self.remove_0x_prefix(signature["s"]) + "03" ) def hashMessage(self, message): message_bytes = bytes.fromhex(message) return self.web3.sha3(b"\x19Ethereum Signed Message:\n" + str(len(message_bytes)).encode() + message_bytes).hex() def signHash(self, hash, privateKey): signature = self.web3.eth.account.signHash(hash[-64:], private_key=privateKey[-64:]) return { 'v': signature.v, # integer 'r': self.web3.toHex(signature.r), # '0x'-prefixed hex string 's': self.web3.toHex(signature.s), # '0x'-prefixed hex string } def signMessage(self, message, privateKey): # # The following comment is related to MetaMask, we use the upper type of signature prefix: # # z.ecSignOrderHashAsync ('0xcfdb0a485324ff37699b4c8557f6858f25916fc6fce5993b32fe018aea510b9f', # '0x731fc101bbe102221c91c31ed0489f1ddfc439a3', { # prefixType: 'ETH_SIGN', # shouldAddPrefixBeforeCallingEthSign: true # }).then ((e, r) => console.log (e,r)) # # { ↓ # v: 28, # r: "0xea7a68268b47c48d5d7a4c900e6f9af0015bf70951b3db2f1d835c5d544aaec2", # s: "0x5d1db2a060c955c1fde4c967237b995c2361097405407b33c6046c8aeb3ccbdf" # } # # -------------------------------------------------------------------- # # z.ecSignOrderHashAsync ('0xcfdb0a485324ff37699b4c8557f6858f25916fc6fce5993b32fe018aea510b9f', # '0x731fc101bbe102221c91c31ed0489f1ddfc439a3', { # prefixType: 'NONE', # shouldAddPrefixBeforeCallingEthSign: true # }).then ((e, r) => console.log (e,r)) # # { ↓ # v: 27, # r: "0xc8c710022c57de4f529d448e9b40517dd9bfb49ff1eb245f5856664b865d14a6", # s: "0x0740bb21f4f094fbbdbafa903bb8f057f82e0c6e4fe65d19a1daed4ed97cd394" # } # message_hash = self.hashMessage(message) signature = self.signHash(message_hash[-64:], privateKey[-64:]) return signature def oath(self): if self.twofa is not None: return self.totp(self.twofa) else: raise ExchangeError(self.id + ' set .twofa to use this feature') @staticmethod def totp(key): def dec_to_bytes(n): if n > 0: return dec_to_bytes(n // 256) + bytes([n % 256]) else: return b'' def hex_to_dec(n): return int(n, base=16) def base32_to_bytes(n): missing_padding = len(n) % 8 padding = 8 - missing_padding if missing_padding > 0 else 0 padded = n.upper() + ('=' * padding) return base64.b32decode(padded) # throws an error if the key is invalid epoch = int(time.time()) // 30 hmac_res = Exchange.hmac(dec_to_bytes(epoch).rjust(8, b'\x00'), base32_to_bytes(key.replace(' ', '')), hashlib.sha1, 'hex') offset = hex_to_dec(hmac_res[-1]) * 2 otp = str(hex_to_dec(hmac_res[offset: offset + 8]) & 0x7fffffff) return otp[-6:]
@functools.wraps(entry) def inner(_self, params=None): """ Inner is called when a generated method (publicGetX) is called. _self is a reference to self created by function.__get__(exchange, type(exchange)) https://en.wikipedia.org/wiki/Closure_(computer_programming) equivalent to functools.partial """ inner_kwargs = dict(outer_kwargs) # avoid mutation if params is not None: inner_kwargs['params'] = params return entry(_self, **inner_kwargs)
396
406
# -*- coding: utf-8 -*- """Base exchange class""" # ----------------------------------------------------------------------------- __version__ = '1.18.575' # ----------------------------------------------------------------------------- from ccxt.base.errors import ExchangeError from ccxt.base.errors import NetworkError from ccxt.base.errors import NotSupported from ccxt.base.errors import AuthenticationError from ccxt.base.errors import DDoSProtection from ccxt.base.errors import RequestTimeout from ccxt.base.errors import ExchangeNotAvailable from ccxt.base.errors import InvalidAddress # ----------------------------------------------------------------------------- from ccxt.base.decimal_to_precision import decimal_to_precision from ccxt.base.decimal_to_precision import DECIMAL_PLACES, TRUNCATE, ROUND from ccxt.base.decimal_to_precision import number_to_string # ----------------------------------------------------------------------------- __all__ = [ 'Exchange', ] # ----------------------------------------------------------------------------- # Python 2 & 3 import types import logging import base64 import calendar import collections import datetime from email.utils import parsedate import functools import gzip import hashlib import hmac import io import json import math from numbers import Number import re from requests import Session from requests.utils import default_user_agent from requests.exceptions import HTTPError, Timeout, TooManyRedirects, RequestException # import socket from ssl import SSLError # import sys import time import uuid import zlib from decimal import Decimal # ----------------------------------------------------------------------------- try: basestring # basestring was removed in Python 3 except NameError: basestring = str try: long # long integer was removed in Python 3 except NameError: long = int # ----------------------------------------------------------------------------- try: import urllib.parse as _urlencode # Python 3 except ImportError: import urllib as _urlencode # Python 2 # ----------------------------------------------------------------------------- # web3/0x imports try: # from web3.auto import w3 from web3 import Web3, HTTPProvider from web3.utils.encoding import hex_encode_abi_type except ImportError: Web3 = HTTPProvider = None # web3/0x not supported in Python 2 # ----------------------------------------------------------------------------- class Exchange(object): """Base exchange class""" id = None version = None certified = False # rate limiter settings enableRateLimit = False rateLimit = 2000 # milliseconds = seconds * 1000 timeout = 10000 # milliseconds = seconds * 1000 asyncio_loop = None aiohttp_proxy = None aiohttp_trust_env = False session = None # Session () by default logger = None # logging.getLogger(__name__) by default userAgent = None userAgents = { 'chrome': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36', 'chrome39': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36', } verbose = False markets = None symbols = None fees = { 'trading': { 'percentage': True, # subclasses should rarely have to redefine this }, 'funding': { 'withdraw': {}, 'deposit': {}, }, } loaded_fees = { 'trading': { 'percentage': True, }, 'funding': { 'withdraw': {}, 'deposit': {}, }, } ids = None tickers = None api = None parseJsonResponse = True proxy = '' origin = '*' # CORS origin proxies = None hostname = None # in case of inaccessibility of the "main" domain apiKey = '' secret = '' password = '' uid = '' privateKey = '' # a "0x"-prefixed hexstring private key for a wallet walletAddress = '' # the wallet address "0x"-prefixed hexstring token = '' # reserved for HTTP auth in some cases twofa = None marketsById = None markets_by_id = None currencies_by_id = None precision = None exceptions = None limits = { 'amount': { 'min': None, 'max': None, }, 'price': { 'min': None, 'max': None, }, 'cost': { 'min': None, 'max': None, }, } httpExceptions = { '422': ExchangeError, '418': DDoSProtection, '429': DDoSProtection, '404': ExchangeNotAvailable, '409': ExchangeNotAvailable, '500': ExchangeNotAvailable, '501': ExchangeNotAvailable, '502': ExchangeNotAvailable, '520': ExchangeNotAvailable, '521': ExchangeNotAvailable, '522': ExchangeNotAvailable, '525': ExchangeNotAvailable, '526': ExchangeNotAvailable, '400': ExchangeNotAvailable, '403': ExchangeNotAvailable, '405': ExchangeNotAvailable, '503': ExchangeNotAvailable, '530': ExchangeNotAvailable, '408': RequestTimeout, '504': RequestTimeout, '401': AuthenticationError, '511': AuthenticationError, } headers = None balance = None orderbooks = None orders = None trades = None transactions = None currencies = None options = None # Python does not allow to define properties in run-time with setattr accounts = None requiredCredentials = { 'apiKey': True, 'secret': True, 'uid': False, 'login': False, 'password': False, 'twofa': False, # 2-factor authentication (one-time password key) 'privateKey': False, # a "0x"-prefixed hexstring private key for a wallet 'walletAddress': False, # the wallet address "0x"-prefixed hexstring 'token': False, # reserved for HTTP auth in some cases } # API method metainfo has = { 'cancelAllOrders': False, 'cancelOrder': True, 'cancelOrders': False, 'CORS': False, 'createDepositAddress': False, 'createLimitOrder': True, 'createMarketOrder': True, 'createOrder': True, 'deposit': False, 'editOrder': 'emulated', 'fetchBalance': True, 'fetchClosedOrders': False, 'fetchCurrencies': False, 'fetchDepositAddress': False, 'fetchDeposits': False, 'fetchFundingFees': False, 'fetchL2OrderBook': True, 'fetchLedger': False, 'fetchMarkets': True, 'fetchMyTrades': False, 'fetchOHLCV': 'emulated', 'fetchOpenOrders': False, 'fetchOrder': False, 'fetchOrderBook': True, 'fetchOrderBooks': False, 'fetchOrders': False, 'fetchTicker': True, 'fetchTickers': False, 'fetchTrades': True, 'fetchTradingFee': False, 'fetchTradingFees': False, 'fetchFundingFee': False, 'fetchFundingFees': False, 'fetchTradingLimits': False, 'fetchTransactions': False, 'fetchWithdrawals': False, 'privateAPI': True, 'publicAPI': True, 'withdraw': False, } precisionMode = DECIMAL_PLACES minFundingAddressLength = 1 # used in check_address substituteCommonCurrencyCodes = True lastRestRequestTimestamp = 0 lastRestPollTimestamp = 0 restRequestQueue = None restPollerLoopIsRunning = False rateLimitTokens = 16 rateLimitMaxTokens = 16 rateLimitUpdateTime = 0 enableLastHttpResponse = True enableLastJsonResponse = True enableLastResponseHeaders = True last_http_response = None last_json_response = None last_response_headers = None requiresWeb3 = False web3 = None commonCurrencies = { 'XBT': 'BTC', 'BCC': 'BCH', 'DRK': 'DASH', 'BCHABC': 'BCH', 'BCHSV': 'BSV', } def __init__(self, config={}): self.precision = dict() if self.precision is None else self.precision self.limits = dict() if self.limits is None else self.limits self.exceptions = dict() if self.exceptions is None else self.exceptions self.headers = dict() if self.headers is None else self.headers self.balance = dict() if self.balance is None else self.balance self.orderbooks = dict() if self.orderbooks is None else self.orderbooks self.orders = dict() if self.orders is None else self.orders self.trades = dict() if self.trades is None else self.trades self.transactions = dict() if self.transactions is None else self.transactions self.currencies = dict() if self.currencies is None else self.currencies self.options = dict() if self.options is None else self.options # Python does not allow to define properties in run-time with setattr self.decimal_to_precision = decimal_to_precision self.number_to_string = number_to_string # version = '.'.join(map(str, sys.version_info[:3])) # self.userAgent = { # 'User-Agent': 'ccxt/' + __version__ + ' (+https://github.com/ccxt/ccxt) Python/' + version # } self.userAgent = default_user_agent() settings = self.deep_extend(self.describe(), config) for key in settings: if hasattr(self, key) and isinstance(getattr(self, key), dict): setattr(self, key, self.deep_extend(getattr(self, key), settings[key])) else: setattr(self, key, settings[key]) if self.api: self.define_rest_api(self.api, 'request') if self.markets: self.set_markets(self.markets) # convert all properties from underscore notation foo_bar to camelcase notation fooBar cls = type(self) for name in dir(self): if name[0] != '_' and name[-1] != '_' and '_' in name: parts = name.split('_') camelcase = parts[0] + ''.join(self.capitalize(i) for i in parts[1:]) attr = getattr(self, name) if isinstance(attr, types.MethodType): setattr(cls, camelcase, getattr(cls, name)) else: setattr(self, camelcase, attr) self.tokenBucket = self.extend({ 'refillRate': 1.0 / self.rateLimit, 'delay': 0.001, 'capacity': 1.0, 'defaultCost': 1.0, }, getattr(self, 'tokenBucket') if hasattr(self, 'tokenBucket') else {}) self.session = self.session if self.session else Session() self.logger = self.logger if self.logger else logging.getLogger(__name__) if self.requiresWeb3 and Web3 and not self.web3: # self.web3 = w3 if w3 else Web3(HTTPProvider()) self.web3 = Web3(HTTPProvider()) def __del__(self): if self.session: self.session.close() def describe(self): return {} def set_sandbox_mode(self, enabled): if enabled: if 'test' in self.urls: self.urls['api_backup'] = self.urls['api'] self.urls['api'] = self.urls['test'] else: raise NotSupported(self.id + ' does not have a sandbox URL') elif 'api_backup' in self.urls: self.urls['api'] = self.urls['api_backup'] del self.urls['api_backup'] @classmethod def define_rest_api(cls, api, method_name, options={}): delimiters = re.compile('[^a-zA-Z0-9]') entry = getattr(cls, method_name) # returns a function (instead of a bound method) for api_type, methods in api.items(): for http_method, urls in methods.items(): for url in urls: url = url.strip() split_path = delimiters.split(url) uppercase_method = http_method.upper() lowercase_method = http_method.lower() camelcase_method = lowercase_method.capitalize() camelcase_suffix = ''.join([Exchange.capitalize(x) for x in split_path]) lowercase_path = [x.strip().lower() for x in split_path] underscore_suffix = '_'.join([k for k in lowercase_path if len(k)]) camelcase = api_type + camelcase_method + Exchange.capitalize(camelcase_suffix) underscore = api_type + '_' + lowercase_method + '_' + underscore_suffix.lower() if 'suffixes' in options: if 'camelcase' in options['suffixes']: camelcase += options['suffixes']['camelcase'] if 'underscore' in options['suffixes']: underscore += options['suffixes']['underscore'] def partialer(): outer_kwargs = {'path': url, 'api': api_type, 'method': uppercase_method} @functools.wraps(entry) def inner(_self, params=None): """ Inner is called when a generated method (publicGetX) is called. _self is a reference to self created by function.__get__(exchange, type(exchange)) https://en.wikipedia.org/wiki/Closure_(computer_programming) equivalent to functools.partial """ inner_kwargs = dict(outer_kwargs) # avoid mutation if params is not None: inner_kwargs['params'] = params return entry(_self, **inner_kwargs) return inner to_bind = partialer() setattr(cls, camelcase, to_bind) setattr(cls, underscore, to_bind) def raise_error(self, exception_type, url=None, method=None, error=None, details=None): if error: error = str(error) output = ' '.join([self.id] + [var for var in (url, method, error, details) if var is not None]) raise exception_type(output) def throttle(self): now = float(self.milliseconds()) elapsed = now - self.lastRestRequestTimestamp if elapsed < self.rateLimit: delay = self.rateLimit - elapsed time.sleep(delay / 1000.0) def fetch2(self, path, api='public', method='GET', params={}, headers=None, body=None): """A better wrapper over request for deferred signing""" if self.enableRateLimit: self.throttle() self.lastRestRequestTimestamp = self.milliseconds() request = self.sign(path, api, method, params, headers, body) return self.fetch(request['url'], request['method'], request['headers'], request['body']) def request(self, path, api='public', method='GET', params={}, headers=None, body=None): """Exchange.request is the entry point for all generated methods""" return self.fetch2(path, api, method, params, headers, body) @staticmethod def gzip_deflate(response, text): encoding = response.info().get('Content-Encoding') if encoding in ('gzip', 'x-gzip', 'deflate'): if encoding == 'deflate': return zlib.decompress(text, -zlib.MAX_WBITS) else: return gzip.GzipFile('', 'rb', 9, io.BytesIO(text)).read() return text def find_broadly_matched_key(self, broad, string): """A helper method for matching error strings exactly vs broadly""" keys = list(broad.keys()) for i in range(0, len(keys)): key = keys[i] if string.find(key) >= 0: return key return None def handle_errors(self, code, reason, url, method, headers, body, response): pass def prepare_request_headers(self, headers=None): headers = headers or {} headers.update(self.headers) if self.userAgent: if type(self.userAgent) is str: headers.update({'User-Agent': self.userAgent}) elif (type(self.userAgent) is dict) and ('User-Agent' in self.userAgent): headers.update(self.userAgent) if self.proxy: headers.update({'Origin': self.origin}) headers.update({'Accept-Encoding': 'gzip, deflate'}) return headers def fetch(self, url, method='GET', headers=None, body=None): """Perform a HTTP request and return decoded JSON data""" request_headers = self.prepare_request_headers(headers) url = self.proxy + url if self.verbose: print("\nRequest:", method, url, request_headers, body) self.logger.debug("%s %s, Request: %s %s", method, url, request_headers, body) if body: body = body.encode() self.session.cookies.clear() response = None http_response = None json_response = None try: response = self.session.request( method, url, data=body, headers=request_headers, timeout=int(self.timeout / 1000), proxies=self.proxies ) http_response = response.text json_response = self.parse_json(http_response) if self.is_json_encoded_object(http_response) else None headers = response.headers # FIXME remove last_x_responses from subclasses if self.enableLastHttpResponse: self.last_http_response = http_response if self.enableLastJsonResponse: self.last_json_response = json_response if self.enableLastResponseHeaders: self.last_response_headers = headers if self.verbose: print("\nResponse:", method, url, response.status_code, headers, http_response) self.logger.debug("%s %s, Response: %s %s %s", method, url, response.status_code, headers, http_response) response.raise_for_status() except Timeout as e: self.raise_error(RequestTimeout, method, url, e) except TooManyRedirects as e: self.raise_error(ExchangeError, url, method, e) except SSLError as e: self.raise_error(ExchangeError, url, method, e) except HTTPError as e: self.handle_errors(response.status_code, response.reason, url, method, headers, http_response, json_response) self.handle_rest_errors(e, response.status_code, http_response, url, method) self.raise_error(ExchangeError, url, method, e, http_response) except RequestException as e: # base exception class error_string = str(e) if ('ECONNRESET' in error_string) or ('Connection aborted.' in error_string): self.raise_error(NetworkError, url, method, e) else: self.raise_error(ExchangeError, url, method, e) self.handle_errors(response.status_code, response.reason, url, method, headers, http_response, json_response) self.handle_rest_response(http_response, json_response, url, method, headers, body) if json_response is not None: return json_response return http_response def handle_rest_errors(self, exception, http_status_code, response, url, method='GET'): error = None string_code = str(http_status_code) if string_code in self.httpExceptions: error = self.httpExceptions[string_code] if error == ExchangeNotAvailable: if re.search('(cloudflare|incapsula|overload|ddos)', response, flags=re.IGNORECASE): error = DDoSProtection if error: self.raise_error(error, url, method, exception if exception else http_status_code, response) def handle_rest_response(self, response, json_response, url, method='GET', headers=None, body=None): if self.is_json_encoded_object(response) and json_response is None: ddos_protection = re.search('(cloudflare|incapsula|overload|ddos)', response, flags=re.IGNORECASE) exchange_not_available = re.search('(offline|busy|retry|wait|unavailable|maintain|maintenance|maintenancing)', response, flags=re.IGNORECASE) if ddos_protection: self.raise_error(DDoSProtection, method, url, None, response) if exchange_not_available: message = response + ' exchange downtime, exchange closed for maintenance or offline, DDoS protection or rate-limiting in effect' self.raise_error(ExchangeNotAvailable, method, url, None, message) self.raise_error(ExchangeError, method, url, ValueError('failed to decode json'), response) def parse_json(self, http_response): try: if Exchange.is_json_encoded_object(http_response): return json.loads(http_response) except ValueError: # superclass of JsonDecodeError (python2) pass @staticmethod def safe_float(dictionary, key, default_value=None): value = default_value try: if isinstance(dictionary, list) and isinstance(key, int) and len(dictionary) > key: value = float(dictionary[key]) else: value = float(dictionary[key]) if (key is not None) and (key in dictionary) and (dictionary[key] is not None) else default_value except ValueError as e: value = default_value return value @staticmethod def safe_string(dictionary, key, default_value=None): return str(dictionary[key]) if key is not None and (key in dictionary) and dictionary[key] is not None else default_value @staticmethod def safe_integer(dictionary, key, default_value=None): if key is None or (key not in dictionary): return default_value value = dictionary[key] if isinstance(value, Number) or (isinstance(value, basestring) and value.isnumeric()): return int(value) return default_value @staticmethod def safe_value(dictionary, key, default_value=None): return dictionary[key] if key is not None and (key in dictionary) and dictionary[key] is not None else default_value # we're not using safe_floats with a list argument as we're trying to save some cycles here # we're not using safe_float_3 either because those cases are too rare to deserve their own optimization @staticmethod def safe_float_2(dictionary, key1, key2, default_value=None): return Exchange.safe_either(Exchange.safe_float, dictionary, key1, key2, default_value) @staticmethod def safe_string_2(dictionary, key1, key2, default_value=None): return Exchange.safe_either(Exchange.safe_string, dictionary, key1, key2, default_value) @staticmethod def safe_integer_2(dictionary, key1, key2, default_value=None): return Exchange.safe_either(Exchange.safe_integer, dictionary, key1, key2, default_value) @staticmethod def safe_value_2(dictionary, key1, key2, default_value=None): return Exchange.safe_either(Exchange.safe_value, dictionary, key1, key2, default_value) @staticmethod def safe_either(method, dictionary, key1, key2, default_value=None): """A helper-wrapper for the safe_value_2() family.""" value = method(dictionary, key1) return value if value is not None else method(dictionary, key2, default_value) @staticmethod def truncate(num, precision=0): """Deprecated, use decimal_to_precision instead""" if precision > 0: decimal_precision = math.pow(10, precision) return math.trunc(num * decimal_precision) / decimal_precision return int(Exchange.truncate_to_string(num, precision)) @staticmethod def truncate_to_string(num, precision=0): """Deprecated, todo: remove references from subclasses""" if precision > 0: parts = ('{0:.%df}' % precision).format(Decimal(num)).split('.') decimal_digits = parts[1][:precision].rstrip('0') decimal_digits = decimal_digits if len(decimal_digits) else '0' return parts[0] + '.' + decimal_digits return ('%d' % num) @staticmethod def uuid(): return str(uuid.uuid4()) @staticmethod def capitalize(string): # first character only, rest characters unchanged # the native pythonic .capitalize() method lowercases all other characters # which is an unwanted behaviour, therefore we use this custom implementation # check it yourself: print('foobar'.capitalize(), 'fooBar'.capitalize()) if len(string) > 1: return "%s%s" % (string[0].upper(), string[1:]) return string.upper() @staticmethod def keysort(dictionary): return collections.OrderedDict(sorted(dictionary.items(), key=lambda t: t[0])) @staticmethod def extend(*args): if args is not None: result = None if type(args[0]) is collections.OrderedDict: result = collections.OrderedDict() else: result = {} for arg in args: result.update(arg) return result return {} @staticmethod def deep_extend(*args): result = None for arg in args: if isinstance(arg, dict): if not isinstance(result, dict): result = {} for key in arg: result[key] = Exchange.deep_extend(result[key] if key in result else None, arg[key]) else: result = arg return result @staticmethod def filter_by(array, key, value=None): if value: grouped = Exchange.group_by(array, key) if value in grouped: return grouped[value] return [] return array @staticmethod def filterBy(self, array, key, value=None): return Exchange.filter_by(array, key, value) @staticmethod def group_by(array, key): result = {} array = Exchange.to_array(array) array = [entry for entry in array if (key in entry) and (entry[key] is not None)] for entry in array: if entry[key] not in result: result[entry[key]] = [] result[entry[key]].append(entry) return result @staticmethod def groupBy(array, key): return Exchange.group_by(array, key) @staticmethod def index_by(array, key): result = {} if type(array) is dict: array = Exchange.keysort(array).values() for element in array: if (key in element) and (element[key] is not None): k = element[key] result[k] = element return result @staticmethod def sort_by(array, key, descending=False): return sorted(array, key=lambda k: k[key] if k[key] is not None else "", reverse=descending) @staticmethod def array_concat(a, b): return a + b @staticmethod def in_array(needle, haystack): return needle in haystack @staticmethod def is_empty(object): return not object @staticmethod def extract_params(string): return re.findall(r'{([\w-]+)}', string) @staticmethod def implode_params(string, params): for key in params: string = string.replace('{' + key + '}', str(params[key])) return string @staticmethod def url(path, params={}): result = Exchange.implode_params(path, params) query = Exchange.omit(params, Exchange.extract_params(path)) if query: result += '?' + _urlencode.urlencode(query) return result @staticmethod def urlencode(params={}): if (type(params) is dict) or isinstance(params, collections.OrderedDict): return _urlencode.urlencode(params) return params @staticmethod def rawencode(params={}): return _urlencode.unquote(Exchange.urlencode(params)) @staticmethod def encode_uri_component(uri): return _urlencode.quote(uri, safe="~()*!.'") @staticmethod def omit(d, *args): result = d.copy() for arg in args: if type(arg) is list: for key in arg: if key in result: del result[key] else: if arg in result: del result[arg] return result @staticmethod def unique(array): return list(set(array)) @staticmethod def pluck(array, key): return [ element[key] for element in array if (key in element) and (element[key] is not None) ] @staticmethod def sum(*args): return sum([arg for arg in args if isinstance(arg, (float, int))]) @staticmethod def ordered(array): return collections.OrderedDict(array) @staticmethod def aggregate(bidasks): ordered = Exchange.ordered({}) for [price, volume] in bidasks: if volume > 0: ordered[price] = (ordered[price] if price in ordered else 0) + volume result = [] items = list(ordered.items()) for price, volume in items: result.append([price, volume]) return result @staticmethod def sec(): return Exchange.seconds() @staticmethod def msec(): return Exchange.milliseconds() @staticmethod def usec(): return Exchange.microseconds() @staticmethod def seconds(): return int(time.time()) @staticmethod def milliseconds(): return int(time.time() * 1000) @staticmethod def microseconds(): return int(time.time() * 1000000) @staticmethod def iso8601(timestamp=None): if timestamp is None: return timestamp if not isinstance(timestamp, (int, long)): return None if int(timestamp) < 0: return None try: utc = datetime.datetime.utcfromtimestamp(timestamp // 1000) return utc.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-6] + "{:03d}".format(int(timestamp) % 1000) + 'Z' except (TypeError, OverflowError, OSError): return None @staticmethod def dmy(timestamp, infix='-'): utc_datetime = datetime.datetime.utcfromtimestamp(int(round(timestamp / 1000))) return utc_datetime.strftime('%m' + infix + '%d' + infix + '%Y') @staticmethod def ymd(timestamp, infix='-'): utc_datetime = datetime.datetime.utcfromtimestamp(int(round(timestamp / 1000))) return utc_datetime.strftime('%Y' + infix + '%m' + infix + '%d') @staticmethod def ymdhms(timestamp, infix=' '): utc_datetime = datetime.datetime.utcfromtimestamp(int(round(timestamp / 1000))) return utc_datetime.strftime('%Y-%m-%d' + infix + '%H:%M:%S') @staticmethod def parse_date(timestamp=None): if timestamp is None: return timestamp if not isinstance(timestamp, str): return None if 'GMT' in timestamp: try: string = ''.join([str(value) for value in parsedate(timestamp)[:6]]) + '.000Z' dt = datetime.datetime.strptime(string, "%Y%m%d%H%M%S.%fZ") return calendar.timegm(dt.utctimetuple()) * 1000 except (TypeError, OverflowError, OSError): return None else: return Exchange.parse8601(timestamp) @staticmethod def parse8601(timestamp=None): if timestamp is None: return timestamp yyyy = '([0-9]{4})-?' mm = '([0-9]{2})-?' dd = '([0-9]{2})(?:T|[\\s])?' h = '([0-9]{2}):?' m = '([0-9]{2}):?' s = '([0-9]{2})' ms = '(\\.[0-9]{1,3})?' tz = '(?:(\\+|\\-)([0-9]{2})\\:?([0-9]{2})|Z)?' regex = r'' + yyyy + mm + dd + h + m + s + ms + tz try: match = re.search(regex, timestamp, re.IGNORECASE) if match is None: return None yyyy, mm, dd, h, m, s, ms, sign, hours, minutes = match.groups() ms = ms or '.000' msint = int(ms[1:]) sign = sign or '' sign = int(sign + '1') * -1 hours = int(hours or 0) * sign minutes = int(minutes or 0) * sign offset = datetime.timedelta(hours=hours, minutes=minutes) string = yyyy + mm + dd + h + m + s + ms + 'Z' dt = datetime.datetime.strptime(string, "%Y%m%d%H%M%S.%fZ") dt = dt + offset return calendar.timegm(dt.utctimetuple()) * 1000 + msint except (TypeError, OverflowError, OSError, ValueError): return None @staticmethod def hash(request, algorithm='md5', digest='hex'): h = hashlib.new(algorithm, request) if digest == 'hex': return h.hexdigest() elif digest == 'base64': return base64.b64encode(h.digest()) return h.digest() @staticmethod def hmac(request, secret, algorithm=hashlib.sha256, digest='hex'): h = hmac.new(secret, request, algorithm) if digest == 'hex': return h.hexdigest() elif digest == 'base64': return base64.b64encode(h.digest()) return h.digest() @staticmethod def binary_concat(*args): result = bytes() for arg in args: result = result + arg return result @staticmethod def binary_to_string(s): return s.decode('ascii') @staticmethod def base64urlencode(s): return Exchange.decode(base64.urlsafe_b64encode(s)).replace('=', '') @staticmethod def jwt(request, secret, algorithm=hashlib.sha256, alg='HS256'): header = Exchange.encode(Exchange.json({ 'alg': alg, 'typ': 'JWT', })) encodedHeader = Exchange.base64urlencode(header) encodedData = Exchange.base64urlencode(Exchange.encode(Exchange.json(request))) token = encodedHeader + '.' + encodedData hmac = Exchange.hmac(Exchange.encode(token), Exchange.encode(secret), algorithm, 'binary') signature = Exchange.base64urlencode(hmac) return token + '.' + signature @staticmethod def unjson(input): return json.loads(input) @staticmethod def json(data, params=None): return json.dumps(data, separators=(',', ':')) @staticmethod def is_json_encoded_object(input): return (isinstance(input, basestring) and (len(input) >= 2) and ((input[0] == '{') or (input[0] == '['))) @staticmethod def encode(string): return string.encode() @staticmethod def decode(string): return string.decode() @staticmethod def to_array(value): return list(value.values()) if type(value) is dict else value def nonce(self): return Exchange.seconds() def check_required_credentials(self, error=True): keys = list(self.requiredCredentials.keys()) for key in keys: if self.requiredCredentials[key] and not getattr(self, key): if error: self.raise_error(AuthenticationError, details='requires `' + key + '`') else: return error def check_address(self, address): """Checks an address is not the same character repeated or an empty sequence""" if address is None: self.raise_error(InvalidAddress, details='address is None') if all(letter == address[0] for letter in address) or len(address) < self.minFundingAddressLength or ' ' in address: self.raise_error(InvalidAddress, details='address is invalid or has less than ' + str(self.minFundingAddressLength) + ' characters: "' + str(address) + '"') return address def account(self): return { 'free': 0.0, 'used': 0.0, 'total': 0.0, } def common_currency_code(self, currency): if not self.substituteCommonCurrencyCodes: return currency return self.safe_string(self.commonCurrencies, currency, currency) def currency_id(self, commonCode): if self.currencies: if commonCode in self.currencies: return self.currencies[commonCode]['id'] currencyIds = {v: k for k, v in self.commonCurrencies.items()} return self.safe_string(currencyIds, commonCode, commonCode) def precision_from_string(self, string): parts = re.sub(r'0+$', '', string).split('.') return len(parts[1]) if len(parts) > 1 else 0 def cost_to_precision(self, symbol, cost): return self.decimal_to_precision(cost, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode) def price_to_precision(self, symbol, price): return self.decimal_to_precision(price, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode) def amount_to_precision(self, symbol, amount): return self.decimal_to_precision(amount, TRUNCATE, self.markets[symbol]['precision']['amount'], self.precisionMode) def fee_to_precision(self, symbol, fee): return self.decimal_to_precision(fee, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode) def currency_to_precision(self, currency, fee): return self.decimal_to_precision(fee, ROUND, self.currencies[currency]['precision'], self.precisionMode) def set_markets(self, markets, currencies=None): values = list(markets.values()) if type(markets) is dict else markets for i in range(0, len(values)): values[i] = self.extend( self.fees['trading'], {'precision': self.precision, 'limits': self.limits}, values[i] ) self.markets = self.index_by(values, 'symbol') self.markets_by_id = self.index_by(values, 'id') self.marketsById = self.markets_by_id self.symbols = sorted(list(self.markets.keys())) self.ids = sorted(list(self.markets_by_id.keys())) if currencies: self.currencies = self.deep_extend(currencies, self.currencies) else: base_currencies = [{ 'id': market['baseId'] if 'baseId' in market else market['base'], 'numericId': market['baseNumericId'] if 'baseNumericId' in market else None, 'code': market['base'], 'precision': ( market['precision']['base'] if 'base' in market['precision'] else ( market['precision']['amount'] if 'amount' in market['precision'] else None ) ) if 'precision' in market else 8, } for market in values if 'base' in market] quote_currencies = [{ 'id': market['quoteId'] if 'quoteId' in market else market['quote'], 'numericId': market['quoteNumericId'] if 'quoteNumericId' in market else None, 'code': market['quote'], 'precision': ( market['precision']['quote'] if 'quote' in market['precision'] else ( market['precision']['price'] if 'price' in market['precision'] else None ) ) if 'precision' in market else 8, } for market in values if 'quote' in market] currencies = self.sort_by(base_currencies + quote_currencies, 'code') self.currencies = self.deep_extend(self.index_by(currencies, 'code'), self.currencies) self.currencies_by_id = self.index_by(list(self.currencies.values()), 'id') return self.markets def load_markets(self, reload=False, params={}): if not reload: if self.markets: if not self.markets_by_id: return self.set_markets(self.markets) return self.markets currencies = None if self.has['fetchCurrencies']: currencies = self.fetch_currencies() markets = self.fetch_markets(params) return self.set_markets(markets, currencies) def load_accounts(self, reload=False, params={}): if reload: self.accounts = self.fetch_accounts(params) else: if self.accounts: return self.accounts else: self.accounts = self.fetch_accounts(params) self.accountsById = self.index_by(self.accounts, 'id') return self.accounts def load_fees(self, reload=False): if not reload: if self.loaded_fees != Exchange.loaded_fees: return self.loaded_fees self.loaded_fees = self.deep_extend(self.loaded_fees, self.fetch_fees()) return self.loaded_fees def fetch_markets(self, params={}): # markets are returned as a list # currencies are returned as a dict # this is for historical reasons # and may be changed for consistency later return self.to_array(self.markets) def fetch_currencies(self, params={}): # markets are returned as a list # currencies are returned as a dict # this is for historical reasons # and may be changed for consistency later return self.currencies def fetch_fees(self): trading = {} funding = {} if self.has['fetchTradingFees']: trading = self.fetch_trading_fees() if self.has['fetchFundingFees']: funding = self.fetch_funding_fees() return { 'trading': trading, 'funding': funding, } def create_order(self, symbol, type, side, amount, price=None, params={}): self.raise_error(NotSupported, details='create_order() not supported yet') def cancel_order(self, id, symbol=None, params={}): self.raise_error(NotSupported, details='cancel_order() not supported yet') def fetch_bids_asks(self, symbols=None, params={}): self.raise_error(NotSupported, details='API does not allow to fetch all prices at once with a single call to fetch_bids_asks() for now') def fetch_tickers(self, symbols=None, params={}): self.raise_error(NotSupported, details='API does not allow to fetch all tickers at once with a single call to fetch_tickers() for now') def fetch_order_status(self, id, symbol=None, params={}): order = self.fetch_order(id, symbol, params) return order['status'] def purge_cached_orders(self, before): orders = self.to_array(self.orders) orders = [order for order in orders if (order['status'] == 'open') or (order['timestamp'] >= before)] self.orders = self.index_by(orders, 'id') return self.orders def fetch_order(self, id, symbol=None, params={}): self.raise_error(NotSupported, details='fetch_order() is not supported yet') def fetch_orders(self, symbol=None, since=None, limit=None, params={}): self.raise_error(NotSupported, details='fetch_orders() is not supported yet') def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}): self.raise_error(NotSupported, details='fetch_open_orders() is not supported yet') def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}): self.raise_error(NotSupported, details='fetch_closed_orders() is not supported yet') def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}): self.raise_error(NotSupported, details='fetch_my_trades() is not supported yet') def fetch_order_trades(self, id, symbol=None, params={}): self.raise_error(NotSupported, details='fetch_order_trades() is not supported yet') def fetch_transactions(self, symbol=None, since=None, limit=None, params={}): self.raise_error(NotSupported, details='fetch_transactions() is not supported yet') def fetch_deposits(self, symbol=None, since=None, limit=None, params={}): self.raise_error(NotSupported, details='fetch_deposits() is not supported yet') def fetch_withdrawals(self, symbol=None, since=None, limit=None, params={}): self.raise_error(NotSupported, details='fetch_withdrawals() is not supported yet') def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None): return ohlcv[0:6] if isinstance(ohlcv, list) else ohlcv def parse_ohlcvs(self, ohlcvs, market=None, timeframe='1m', since=None, limit=None): ohlcvs = self.to_array(ohlcvs) num_ohlcvs = len(ohlcvs) result = [] i = 0 while i < num_ohlcvs: if limit and (len(result) >= limit): break ohlcv = self.parse_ohlcv(ohlcvs[i], market, timeframe, since, limit) i = i + 1 if since and (ohlcv[0] < since): continue result.append(ohlcv) return self.sort_by(result, 0) def parse_bid_ask(self, bidask, price_key=0, amount_key=0): return [float(bidask[price_key]), float(bidask[amount_key])] def parse_bids_asks(self, bidasks, price_key=0, amount_key=1): result = [] if len(bidasks): if type(bidasks[0]) is list: for bidask in bidasks: if bidask[price_key] and bidask[amount_key]: result.append(self.parse_bid_ask(bidask, price_key, amount_key)) elif type(bidasks[0]) is dict: for bidask in bidasks: if (price_key in bidask) and (amount_key in bidask) and (bidask[price_key] and bidask[amount_key]): result.append(self.parse_bid_ask(bidask, price_key, amount_key)) else: self.raise_error(ExchangeError, details='unrecognized bidask format: ' + str(bidasks[0])) return result def fetch_l2_order_book(self, symbol, limit=None, params={}): orderbook = self.fetch_order_book(symbol, limit, params) return self.extend(orderbook, { 'bids': self.sort_by(self.aggregate(orderbook['bids']), 0, True), 'asks': self.sort_by(self.aggregate(orderbook['asks']), 0), }) def parse_order_book(self, orderbook, timestamp=None, bids_key='bids', asks_key='asks', price_key=0, amount_key=1): return { 'bids': self.sort_by(self.parse_bids_asks(orderbook[bids_key], price_key, amount_key) if (bids_key in orderbook) and isinstance(orderbook[bids_key], list) else [], 0, True), 'asks': self.sort_by(self.parse_bids_asks(orderbook[asks_key], price_key, amount_key) if (asks_key in orderbook) and isinstance(orderbook[asks_key], list) else [], 0), 'timestamp': timestamp, 'datetime': self.iso8601(timestamp) if timestamp is not None else None, 'nonce': None, } def parse_balance(self, balance): currencies = self.omit(balance, 'info').keys() for account in ['free', 'used', 'total']: balance[account] = {} for currency in currencies: balance[account][currency] = balance[currency][account] return balance def fetch_partial_balance(self, part, params={}): balance = self.fetch_balance(params) return balance[part] def fetch_free_balance(self, params={}): return self.fetch_partial_balance('free', params) def fetch_used_balance(self, params={}): return self.fetch_partial_balance('used', params) def fetch_total_balance(self, params={}): return self.fetch_partial_balance('total', params) def fetch_trading_fees(self, symbol, params={}): self.raise_error(NotSupported, details='fetch_trading_fees() not supported yet') def fetch_trading_fee(self, symbol, params={}): if not self.has['fetchTradingFees']: self.raise_error(NotSupported, details='fetch_trading_fee() not supported yet') return self.fetch_trading_fees(params) def fetch_funding_fees(self, params={}): self.raise_error(NotSupported, details='fetch_funding_fees() not supported yet') def fetch_funding_fee(self, code, params={}): if not self.has['fetchFundingFees']: self.raise_error(NotSupported, details='fetch_funding_fee() not supported yet') return self.fetch_funding_fees(params) def load_trading_limits(self, symbols=None, reload=False, params={}): if self.has['fetchTradingLimits']: if reload or not('limitsLoaded' in list(self.options.keys())): response = self.fetch_trading_limits(symbols) for i in range(0, len(symbols)): symbol = symbols[i] self.markets[symbol] = self.deep_extend(self.markets[symbol], response[symbol]) self.options['limitsLoaded'] = self.milliseconds() return self.markets def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}): if not self.has['fetchTrades']: self.raise_error(NotSupported, details='fetch_ohlcv() not supported yet') self.load_markets() trades = self.fetch_trades(symbol, since, limit, params) return self.build_ohlcv(trades, timeframe, since, limit) def fetchOHLCV(self, symbol, timeframe='1m', since=None, limit=None, params={}): return self.fetch_ohlcv(symbol, timeframe, since, limit, params) def parse_trading_view_ohlcv(self, ohlcvs, market=None, timeframe='1m', since=None, limit=None): result = self.convert_trading_view_to_ohlcv(ohlcvs) return self.parse_ohlcvs(result, market, timeframe, since, limit) def convert_trading_view_to_ohlcv(self, ohlcvs): result = [] for i in range(0, len(ohlcvs['t'])): result.append([ ohlcvs['t'][i] * 1000, ohlcvs['o'][i], ohlcvs['h'][i], ohlcvs['l'][i], ohlcvs['c'][i], ohlcvs['v'][i], ]) return result def convert_ohlcv_to_trading_view(self, ohlcvs): result = { 't': [], 'o': [], 'h': [], 'l': [], 'c': [], 'v': [], } for i in range(0, len(ohlcvs)): result['t'].append(int(ohlcvs[i][0] / 1000)) result['o'].append(ohlcvs[i][1]) result['h'].append(ohlcvs[i][2]) result['l'].append(ohlcvs[i][3]) result['c'].append(ohlcvs[i][4]) result['v'].append(ohlcvs[i][5]) return result def build_ohlcv(self, trades, timeframe='1m', since=None, limit=None): ms = self.parse_timeframe(timeframe) * 1000 ohlcvs = [] (high, low, close, volume) = (2, 3, 4, 5) num_trades = len(trades) oldest = (num_trades - 1) if limit is None else min(num_trades - 1, limit) for i in range(0, oldest): trade = trades[i] if (since is not None) and (trade['timestamp'] < since): continue opening_time = int(math.floor(trade['timestamp'] / ms) * ms) # Shift the edge of the m/h/d (but not M) j = len(ohlcvs) if (j == 0) or opening_time >= ohlcvs[j - 1][0] + ms: # moved to a new timeframe -> create a new candle from opening trade ohlcvs.append([ opening_time, trade['price'], trade['price'], trade['price'], trade['price'], trade['amount'], ]) else: # still processing the same timeframe -> update opening trade ohlcvs[j - 1][high] = max(ohlcvs[j - 1][high], trade['price']) ohlcvs[j - 1][low] = min(ohlcvs[j - 1][low], trade['price']) ohlcvs[j - 1][close] = trade['price'] ohlcvs[j - 1][volume] += trade['amount'] return ohlcvs @staticmethod def parse_timeframe(timeframe): amount = int(timeframe[0:-1]) unit = timeframe[-1] if 'y' in unit: scale = 60 * 60 * 24 * 365 elif 'M' in unit: scale = 60 * 60 * 24 * 30 elif 'w' in unit: scale = 60 * 60 * 24 * 7 elif 'd' in unit: scale = 60 * 60 * 24 elif 'h' in unit: scale = 60 * 60 else: scale = 60 # 1m by default return amount * scale def parse_trades(self, trades, market=None, since=None, limit=None): array = self.to_array(trades) array = [self.parse_trade(trade, market) for trade in array] array = self.sort_by(array, 'timestamp') symbol = market['symbol'] if market else None return self.filter_by_symbol_since_limit(array, symbol, since, limit) def parse_ledger(self, data, currency=None, since=None, limit=None): array = self.to_array(data) array = [self.parse_ledger_entry(item, currency) for item in array] array = self.sort_by(array, 'timestamp') code = currency['code'] if currency else None return self.filter_by_currency_since_limit(array, code, since, limit) def parse_transactions(self, transactions, currency=None, since=None, limit=None, params={}): array = self.to_array(transactions) array = [self.extend(self.parse_transaction(transaction, currency), params) for transaction in array] array = self.sort_by(array, 'timestamp') code = currency['code'] if currency else None return self.filter_by_currency_since_limit(array, code, since, limit) def parse_orders(self, orders, market=None, since=None, limit=None): array = self.to_array(orders) array = [self.parse_order(order, market) for order in array] array = self.sort_by(array, 'timestamp') symbol = market['symbol'] if market else None return self.filter_by_symbol_since_limit(array, symbol, since, limit) def safe_currency_code(self, data, key, currency=None): code = None currency_id = self.safe_string(data, key) if currency_id in self.currencies_by_id: currency = self.currencies_by_id[currency_id] else: code = self.common_currency_code(currency_id) if currency is not None: code = currency['code'] return code def filter_by_value_since_limit(self, array, field, value=None, since=None, limit=None): array = self.to_array(array) if value: array = [entry for entry in array if entry[field] == value] if since: array = [entry for entry in array if entry['timestamp'] >= since] if limit: array = array[0:limit] return array def filter_by_symbol_since_limit(self, array, symbol=None, since=None, limit=None): return self.filter_by_value_since_limit(array, 'symbol', symbol, since, limit) def filter_by_currency_since_limit(self, array, code=None, since=None, limit=None): return self.filter_by_value_since_limit(array, 'currency', code, since, limit) def filter_by_since_limit(self, array, since=None, limit=None): array = self.to_array(array) if since: array = [entry for entry in array if entry['timestamp'] >= since] if limit: array = array[0:limit] return array def filter_by_symbol(self, array, symbol=None): array = self.to_array(array) if symbol: return [entry for entry in array if entry['symbol'] == symbol] return array def filter_by_array(self, objects, key, values=None, indexed=True): objects = self.to_array(objects) # return all of them if no values were passed in if values is None: return self.index_by(objects, key) if indexed else objects result = [] for i in range(0, len(objects)): value = objects[i][key] if key in objects[i] else None if value in values: result.append(objects[i]) return self.index_by(result, key) if indexed else result def currency(self, code): if not self.currencies: self.raise_error(ExchangeError, details='Currencies not loaded') if isinstance(code, basestring) and (code in self.currencies): return self.currencies[code] self.raise_error(ExchangeError, details='Does not have currency code ' + str(code)) def find_market(self, string): if not self.markets: self.raise_error(ExchangeError, details='Markets not loaded') if isinstance(string, basestring): if string in self.markets_by_id: return self.markets_by_id[string] if string in self.markets: return self.markets[string] return string def find_symbol(self, string, market=None): if market is None: market = self.find_market(string) if isinstance(market, dict): return market['symbol'] return string def market(self, symbol): if not self.markets: self.raise_error(ExchangeError, details='Markets not loaded') if isinstance(symbol, basestring) and (symbol in self.markets): return self.markets[symbol] self.raise_error(ExchangeError, details='No market symbol ' + str(symbol)) def market_ids(self, symbols): return [self.market_id(symbol) for symbol in symbols] def market_id(self, symbol): market = self.market(symbol) return market['id'] if type(market) is dict else symbol def calculate_fee(self, symbol, type, side, amount, price, takerOrMaker='taker', params={}): market = self.markets[symbol] rate = market[takerOrMaker] cost = float(self.cost_to_precision(symbol, amount * price)) return { 'rate': rate, 'type': takerOrMaker, 'currency': market['quote'], 'cost': float(self.fee_to_precision(symbol, rate * cost)), } def edit_limit_buy_order(self, id, symbol, *args): return self.edit_limit_order(id, symbol, 'buy', *args) def edit_limit_sell_order(self, id, symbol, *args): return self.edit_limit_order(id, symbol, 'sell', *args) def edit_limit_order(self, id, symbol, *args): return self.edit_order(id, symbol, 'limit', *args) def edit_order(self, id, symbol, *args): if not self.enableRateLimit: self.raise_error(ExchangeError, details='edit_order() requires enableRateLimit = true') self.cancel_order(id, symbol) return self.create_order(symbol, *args) def create_limit_order(self, symbol, *args): return self.create_order(symbol, 'limit', *args) def create_market_order(self, symbol, *args): return self.create_order(symbol, 'market', *args) def create_limit_buy_order(self, symbol, *args): return self.create_order(symbol, 'limit', 'buy', *args) def create_limit_sell_order(self, symbol, *args): return self.create_order(symbol, 'limit', 'sell', *args) def create_market_buy_order(self, symbol, amount, params={}): return self.create_order(symbol, 'market', 'buy', amount, None, params) def create_market_sell_order(self, symbol, amount, params={}): return self.create_order(symbol, 'market', 'sell', amount, None, params) def sign(self, path, api='public', method='GET', params={}, headers=None, body=None): raise NotSupported(self.id + ' sign() pure method must be redefined in derived classes') # ------------------------------------------------------------------------- # web3 / 0x methods @staticmethod def has_web3(): return Web3 is not None def check_required_dependencies(self): if not Exchange.has_web3(): raise NotSupported("Web3 functionality requires Python3 and web3 package installed: https://github.com/ethereum/web3.py") def eth_decimals(self, unit='ether'): units = { 'wei': 0, # 1 'kwei': 3, # 1000 'babbage': 3, # 1000 'femtoether': 3, # 1000 'mwei': 6, # 1000000 'lovelace': 6, # 1000000 'picoether': 6, # 1000000 'gwei': 9, # 1000000000 'shannon': 9, # 1000000000 'nanoether': 9, # 1000000000 'nano': 9, # 1000000000 'szabo': 12, # 1000000000000 'microether': 12, # 1000000000000 'micro': 12, # 1000000000000 'finney': 15, # 1000000000000000 'milliether': 15, # 1000000000000000 'milli': 15, # 1000000000000000 'ether': 18, # 1000000000000000000 'kether': 21, # 1000000000000000000000 'grand': 21, # 1000000000000000000000 'mether': 24, # 1000000000000000000000000 'gether': 27, # 1000000000000000000000000000 'tether': 30, # 1000000000000000000000000000000 } return self.safe_value(units, unit) def eth_unit(self, decimals=18): units = { 0: 'wei', # 1000000000000000000 3: 'kwei', # 1000000000000000 6: 'mwei', # 1000000000000 9: 'gwei', # 1000000000 12: 'szabo', # 1000000 15: 'finney', # 1000 18: 'ether', # 1 21: 'kether', # 0.001 24: 'mether', # 0.000001 27: 'gether', # 0.000000001 30: 'tether', # 0.000000000001 } return self.safe_value(units, decimals) def fromWei(self, amount, unit='ether', decimals=18): if Web3 is None: self.raise_error(NotSupported, details="ethereum web3 methods require Python 3: https://pythonclock.org") if amount is None: return amount if decimals != 18: if decimals % 3: amount = int(amount) * (10 ** (18 - decimals)) else: unit = self.eth_unit(decimals) return float(Web3.fromWei(int(amount), unit)) def toWei(self, amount, unit='ether', decimals=18): if Web3 is None: self.raise_error(NotSupported, details="ethereum web3 methods require Python 3: https://pythonclock.org") if amount is None: return amount if decimals != 18: if decimals % 3: # this case has known yet unsolved problems: # toWei(1.999, 'ether', 17) == '199900000000000011' # toWei(1.999, 'ether', 19) == '19989999999999999991' # the best solution should not involve additional dependencies amount = Decimal(amount) / Decimal(10 ** (18 - decimals)) else: unit = self.eth_unit(decimals) return str(Web3.toWei(amount, unit)) def decryptAccountFromJSON(self, value, password): return self.decryptAccount(json.loads(value) if isinstance(value, basestring) else value, password) def decryptAccount(self, key, password): return self.web3.eth.accounts.decrypt(key, password) def decryptAccountFromPrivateKey(self, privateKey): return self.web3.eth.accounts.privateKeyToAccount(privateKey) def soliditySha3(self, array): values = self.solidityValues(array) types = self.solidityTypes(values) return self.web3.soliditySha3(types, values).hex() def soliditySha256(self, values): types = self.solidityTypes(values) solidity_values = self.solidityValues(values) encoded_values = [hex_encode_abi_type(abi_type, value)[2:] for abi_type, value in zip(types, solidity_values)] hex_string = '0x' + ''.join(encoded_values) return '0x' + self.hash(self.encode(self.web3.toText(hex_string)), 'sha256') def solidityTypes(self, array): return ['address' if self.web3.isAddress(value) else 'uint256' for value in array] def solidityValues(self, array): return [self.web3.toChecksumAddress(value) if self.web3.isAddress(value) else int(value) for value in array] def getZeroExOrderHash2(self, order): return self.soliditySha3([ order['exchangeContractAddress'], # address order['maker'], # address order['taker'], # address order['makerTokenAddress'], # address order['takerTokenAddress'], # address order['feeRecipient'], # address order['makerTokenAmount'], # uint256 order['takerTokenAmount'], # uint256 order['makerFee'], # uint256 order['takerFee'], # uint256 order['expirationUnixTimestampSec'], # uint256 order['salt'], # uint256 ]) def getZeroExOrderHash(self, order): unpacked = [ self.web3.toChecksumAddress(order['exchangeContractAddress']), # { value: order.exchangeContractAddress, type: types_1.SolidityTypes.Address }, self.web3.toChecksumAddress(order['maker']), # { value: order.maker, type: types_1.SolidityTypes.Address }, self.web3.toChecksumAddress(order['taker']), # { value: order.taker, type: types_1.SolidityTypes.Address }, self.web3.toChecksumAddress(order['makerTokenAddress']), # { value: order.makerTokenAddress, type: types_1.SolidityTypes.Address }, self.web3.toChecksumAddress(order['takerTokenAddress']), # { value: order.takerTokenAddress, type: types_1.SolidityTypes.Address }, self.web3.toChecksumAddress(order['feeRecipient']), # { value: order.feeRecipient, type: types_1.SolidityTypes.Address }, int(order['makerTokenAmount']), # { value: bigNumberToBN(order.makerTokenAmount), type: types_1.SolidityTypes.Uint256, }, int(order['takerTokenAmount']), # { value: bigNumberToBN(order.takerTokenAmount), type: types_1.SolidityTypes.Uint256, }, int(order['makerFee']), # { value: bigNumberToBN(order.makerFee), type: types_1.SolidityTypes.Uint256, }, int(order['takerFee']), # { value: bigNumberToBN(order.takerFee), type: types_1.SolidityTypes.Uint256, }, int(order['expirationUnixTimestampSec']), # { value: bigNumberToBN(order.expirationUnixTimestampSec), type: types_1.SolidityTypes.Uint256, }, int(order['salt']), # { value: bigNumberToBN(order.salt), type: types_1.SolidityTypes.Uint256 }, ] types = [ 'address', # { value: order.exchangeContractAddress, type: types_1.SolidityTypes.Address }, 'address', # { value: order.maker, type: types_1.SolidityTypes.Address }, 'address', # { value: order.taker, type: types_1.SolidityTypes.Address }, 'address', # { value: order.makerTokenAddress, type: types_1.SolidityTypes.Address }, 'address', # { value: order.takerTokenAddress, type: types_1.SolidityTypes.Address }, 'address', # { value: order.feeRecipient, type: types_1.SolidityTypes.Address }, 'uint256', # { value: bigNumberToBN(order.makerTokenAmount), type: types_1.SolidityTypes.Uint256, }, 'uint256', # { value: bigNumberToBN(order.takerTokenAmount), type: types_1.SolidityTypes.Uint256, }, 'uint256', # { value: bigNumberToBN(order.makerFee), type: types_1.SolidityTypes.Uint256, }, 'uint256', # { value: bigNumberToBN(order.takerFee), type: types_1.SolidityTypes.Uint256, }, 'uint256', # { value: bigNumberToBN(order.expirationUnixTimestampSec), type: types_1.SolidityTypes.Uint256, }, 'uint256', # { value: bigNumberToBN(order.salt), type: types_1.SolidityTypes.Uint256 }, ] return self.web3.soliditySha3(types, unpacked).hex() def remove_0x_prefix(self, value): if value[:2] == '0x': return value[2:] return value def getZeroExOrderHashV2(self, order): # https://github.com/0xProject/0x-monorepo/blob/development/python-packages/order_utils/src/zero_ex/order_utils/__init__.py def pad_20_bytes_to_32(twenty_bytes): return bytes(12) + twenty_bytes def int_to_32_big_endian_bytes(i): return i.to_bytes(32, byteorder="big") def to_bytes(value): if not isinstance(value, str): raise TypeError("Value must be an instance of str") if len(value) % 2: value = "0x0" + self.remove_0x_prefix(value) return base64.b16decode(self.remove_0x_prefix(value), casefold=True) domain_struct_header = b"\x91\xab=\x17\xe3\xa5\n\x9d\x89\xe6?\xd3\x0b\x92\xbe\x7fS6\xb0;({\xb9Fxz\x83\xa9\xd6*'f\xf0\xf2F\x18\xf4\xc4\xbe\x1eb\xe0&\xfb\x03\x9a \xef\x96\xf4IR\x94\x81}\x10'\xff\xaam\x1fp\xe6\x1e\xad|[\xef\x02x\x16\xa8\x00\xda\x176DO\xb5\x8a\x80~\xf4\xc9`;xHg?~:h\xeb\x14\xa5" order_schema_hash = b'w\x05\x01\xf8\x8a&\xed\xe5\xc0J \xef\x87yi\xe9a\xeb\x11\xfc\x13\xb7\x8a\xafAKc=\xa0\xd4\xf8o' header = b"\x19\x01" domain_struct_hash = self.web3.sha3( domain_struct_header + pad_20_bytes_to_32(to_bytes(order["exchangeAddress"])) ) order_struct_hash = self.web3.sha3( order_schema_hash + pad_20_bytes_to_32(to_bytes(order["makerAddress"])) + pad_20_bytes_to_32(to_bytes(order["takerAddress"])) + pad_20_bytes_to_32(to_bytes(order["feeRecipientAddress"])) + pad_20_bytes_to_32(to_bytes(order["senderAddress"])) + int_to_32_big_endian_bytes(int(order["makerAssetAmount"])) + int_to_32_big_endian_bytes(int(order["takerAssetAmount"])) + int_to_32_big_endian_bytes(int(order["makerFee"])) + int_to_32_big_endian_bytes(int(order["takerFee"])) + int_to_32_big_endian_bytes(int(order["expirationTimeSeconds"])) + int_to_32_big_endian_bytes(int(order["salt"])) + self.web3.sha3(to_bytes(order["makerAssetData"])) + self.web3.sha3(to_bytes(order["takerAssetData"])) ) sha3 = self.web3.sha3( header + domain_struct_hash + order_struct_hash ) return '0x' + base64.b16encode(sha3).decode('ascii').lower() def signZeroExOrder(self, order, privateKey): orderHash = self.getZeroExOrderHash(order) signature = self.signMessage(orderHash[-64:], privateKey) return self.extend(order, { 'orderHash': orderHash, 'ecSignature': signature, # todo fix v if needed }) def signZeroExOrderV2(self, order, privateKey): orderHash = self.getZeroExOrderHashV2(order) signature = self.signMessage(orderHash[-64:], privateKey) return self.extend(order, { 'orderHash': orderHash, 'signature': self._convertECSignatureToSignatureHex(signature), }) def _convertECSignatureToSignatureHex(self, signature): # https://github.com/0xProject/0x-monorepo/blob/development/packages/order-utils/src/signature_utils.ts v = signature["v"] if v != 27 and v != 28: v = v + 27 return ( "0x" + self.remove_0x_prefix(hex(v)) + self.remove_0x_prefix(signature["r"]) + self.remove_0x_prefix(signature["s"]) + "03" ) def hashMessage(self, message): message_bytes = bytes.fromhex(message) return self.web3.sha3(b"\x19Ethereum Signed Message:\n" + str(len(message_bytes)).encode() + message_bytes).hex() def signHash(self, hash, privateKey): signature = self.web3.eth.account.signHash(hash[-64:], private_key=privateKey[-64:]) return { 'v': signature.v, # integer 'r': self.web3.toHex(signature.r), # '0x'-prefixed hex string 's': self.web3.toHex(signature.s), # '0x'-prefixed hex string } def signMessage(self, message, privateKey): # # The following comment is related to MetaMask, we use the upper type of signature prefix: # # z.ecSignOrderHashAsync ('0xcfdb0a485324ff37699b4c8557f6858f25916fc6fce5993b32fe018aea510b9f', # '0x731fc101bbe102221c91c31ed0489f1ddfc439a3', { # prefixType: 'ETH_SIGN', # shouldAddPrefixBeforeCallingEthSign: true # }).then ((e, r) => console.log (e,r)) # # { ↓ # v: 28, # r: "0xea7a68268b47c48d5d7a4c900e6f9af0015bf70951b3db2f1d835c5d544aaec2", # s: "0x5d1db2a060c955c1fde4c967237b995c2361097405407b33c6046c8aeb3ccbdf" # } # # -------------------------------------------------------------------- # # z.ecSignOrderHashAsync ('0xcfdb0a485324ff37699b4c8557f6858f25916fc6fce5993b32fe018aea510b9f', # '0x731fc101bbe102221c91c31ed0489f1ddfc439a3', { # prefixType: 'NONE', # shouldAddPrefixBeforeCallingEthSign: true # }).then ((e, r) => console.log (e,r)) # # { ↓ # v: 27, # r: "0xc8c710022c57de4f529d448e9b40517dd9bfb49ff1eb245f5856664b865d14a6", # s: "0x0740bb21f4f094fbbdbafa903bb8f057f82e0c6e4fe65d19a1daed4ed97cd394" # } # message_hash = self.hashMessage(message) signature = self.signHash(message_hash[-64:], privateKey[-64:]) return signature def oath(self): if self.twofa is not None: return self.totp(self.twofa) else: raise ExchangeError(self.id + ' set .twofa to use this feature') @staticmethod def totp(key): def dec_to_bytes(n): if n > 0: return dec_to_bytes(n // 256) + bytes([n % 256]) else: return b'' def hex_to_dec(n): return int(n, base=16) def base32_to_bytes(n): missing_padding = len(n) % 8 padding = 8 - missing_padding if missing_padding > 0 else 0 padded = n.upper() + ('=' * padding) return base64.b32decode(padded) # throws an error if the key is invalid epoch = int(time.time()) // 30 hmac_res = Exchange.hmac(dec_to_bytes(epoch).rjust(8, b'\x00'), base32_to_bytes(key.replace(' ', '')), hashlib.sha1, 'hex') offset = hex_to_dec(hmac_res[-1]) * 2 otp = str(hex_to_dec(hmac_res[offset: offset + 8]) & 0x7fffffff) return otp[-6:]
is_story_file
Checks if a file is a Rasa story file. Args: file_path: Path of the file which should be checked. Returns: `True` if it's a story file, otherwise `False`.
import logging import os import shutil import tempfile import uuid import re from typing import Tuple, List, Text, Set, Union, Optional, Iterable from rasa.nlu.training_data import loading from rasa.utils.io import DEFAULT_ENCODING logger = logging.getLogger(__name__) def get_core_directory(paths: Optional[Union[Text, List[Text]]],) -> Text: """Recursively collects all Core training files from a list of paths. Args: paths: List of paths to training files or folders containing them. Returns: Path to temporary directory containing all found Core training files. """ core_files, _ = get_core_nlu_files(paths) return _copy_files_to_new_dir(core_files) def get_nlu_directory(paths: Optional[Union[Text, List[Text]]],) -> Text: """Recursively collects all NLU training files from a list of paths. Args: paths: List of paths to training files or folders containing them. Returns: Path to temporary directory containing all found NLU training files. """ _, nlu_files = get_core_nlu_files(paths) return _copy_files_to_new_dir(nlu_files) def get_core_nlu_directories( paths: Optional[Union[Text, List[Text]]], ) -> Tuple[Text, Text]: """Recursively collects all training files from a list of paths. Args: paths: List of paths to training files or folders containing them. Returns: Path to directory containing the Core files and path to directory containing the NLU training files. """ story_files, nlu_data_files = get_core_nlu_files(paths) story_directory = _copy_files_to_new_dir(story_files) nlu_directory = _copy_files_to_new_dir(nlu_data_files) return story_directory, nlu_directory def get_core_nlu_files( paths: Optional[Union[Text, List[Text]]] ) -> Tuple[List[Text], List[Text]]: """Recursively collects all training files from a list of paths. Args: paths: List of paths to training files or folders containing them. Returns: Tuple of paths to story and NLU files. """ story_files = set() nlu_data_files = set() if paths is None: paths = [] elif isinstance(paths, str): paths = [paths] for path in set(paths): if not path: continue if _is_valid_filetype(path): if is_nlu_file(path): nlu_data_files.add(os.path.abspath(path)) elif is_story_file(path): story_files.add(os.path.abspath(path)) else: new_story_files, new_nlu_data_files = _find_core_nlu_files_in_directory( path ) story_files.update(new_story_files) nlu_data_files.update(new_nlu_data_files) return sorted(story_files), sorted(nlu_data_files) def _find_core_nlu_files_in_directory(directory: Text,) -> Tuple[Set[Text], Set[Text]]: story_files = set() nlu_data_files = set() for root, _, files in os.walk(directory, followlinks=True): # we sort the files here to ensure consistent order for repeatable training results for f in sorted(files): full_path = os.path.join(root, f) if not _is_valid_filetype(full_path): continue if is_nlu_file(full_path): nlu_data_files.add(full_path) elif is_story_file(full_path): story_files.add(full_path) return story_files, nlu_data_files def _is_valid_filetype(path: Text) -> bool: is_file = os.path.isfile(path) is_datafile = path.endswith(".json") or path.endswith(".md") return is_file and is_datafile def is_nlu_file(file_path: Text) -> bool: """Checks if a file is a Rasa compatible nlu file. Args: file_path: Path of the file which should be checked. Returns: `True` if it's a nlu file, otherwise `False`. """ return loading.guess_format(file_path) != loading.UNK # MASKED: is_story_file function (lines 140-167) def _contains_story_pattern(text: Text) -> bool: story_pattern = r".*##.+" return re.match(story_pattern, text) is not None def is_domain_file(file_path: Text) -> bool: """Checks whether the given file path is a Rasa domain file. Args: file_path: Path of the file which should be checked. Returns: `True` if it's a domain file, otherwise `False`. """ file_name = os.path.basename(file_path) return file_name in ["domain.yml", "domain.yaml"] def is_config_file(file_path: Text) -> bool: """Checks whether the given file path is a Rasa config file. Args: file_path: Path of the file which should be checked. Returns: `True` if it's a Rasa config file, otherwise `False`. """ file_name = os.path.basename(file_path) return file_name in ["config.yml", "config.yaml"] def _copy_files_to_new_dir(files: Iterable[Text]) -> Text: directory = tempfile.mkdtemp() for f in files: # makes sure files do not overwrite each other, hence the prefix unique_prefix = uuid.uuid4().hex unique_file_name = unique_prefix + "_" + os.path.basename(f) shutil.copy2(f, os.path.join(directory, unique_file_name)) return directory
def is_story_file(file_path: Text) -> bool: """Checks if a file is a Rasa story file. Args: file_path: Path of the file which should be checked. Returns: `True` if it's a story file, otherwise `False`. """ if not file_path.endswith(".md"): return False try: with open( file_path, encoding=DEFAULT_ENCODING, errors="surrogateescape" ) as lines: return any(_contains_story_pattern(line) for line in lines) except Exception as e: # catch-all because we might be loading files we are not expecting to load logger.error( f"Tried to check if '{file_path}' is a story file, but failed to " f"read it. If this file contains story data, you should " f"investigate this error, otherwise it is probably best to " f"move the file to a different location. " f"Error: {e}" ) return False
140
167
import logging import os import shutil import tempfile import uuid import re from typing import Tuple, List, Text, Set, Union, Optional, Iterable from rasa.nlu.training_data import loading from rasa.utils.io import DEFAULT_ENCODING logger = logging.getLogger(__name__) def get_core_directory(paths: Optional[Union[Text, List[Text]]],) -> Text: """Recursively collects all Core training files from a list of paths. Args: paths: List of paths to training files or folders containing them. Returns: Path to temporary directory containing all found Core training files. """ core_files, _ = get_core_nlu_files(paths) return _copy_files_to_new_dir(core_files) def get_nlu_directory(paths: Optional[Union[Text, List[Text]]],) -> Text: """Recursively collects all NLU training files from a list of paths. Args: paths: List of paths to training files or folders containing them. Returns: Path to temporary directory containing all found NLU training files. """ _, nlu_files = get_core_nlu_files(paths) return _copy_files_to_new_dir(nlu_files) def get_core_nlu_directories( paths: Optional[Union[Text, List[Text]]], ) -> Tuple[Text, Text]: """Recursively collects all training files from a list of paths. Args: paths: List of paths to training files or folders containing them. Returns: Path to directory containing the Core files and path to directory containing the NLU training files. """ story_files, nlu_data_files = get_core_nlu_files(paths) story_directory = _copy_files_to_new_dir(story_files) nlu_directory = _copy_files_to_new_dir(nlu_data_files) return story_directory, nlu_directory def get_core_nlu_files( paths: Optional[Union[Text, List[Text]]] ) -> Tuple[List[Text], List[Text]]: """Recursively collects all training files from a list of paths. Args: paths: List of paths to training files or folders containing them. Returns: Tuple of paths to story and NLU files. """ story_files = set() nlu_data_files = set() if paths is None: paths = [] elif isinstance(paths, str): paths = [paths] for path in set(paths): if not path: continue if _is_valid_filetype(path): if is_nlu_file(path): nlu_data_files.add(os.path.abspath(path)) elif is_story_file(path): story_files.add(os.path.abspath(path)) else: new_story_files, new_nlu_data_files = _find_core_nlu_files_in_directory( path ) story_files.update(new_story_files) nlu_data_files.update(new_nlu_data_files) return sorted(story_files), sorted(nlu_data_files) def _find_core_nlu_files_in_directory(directory: Text,) -> Tuple[Set[Text], Set[Text]]: story_files = set() nlu_data_files = set() for root, _, files in os.walk(directory, followlinks=True): # we sort the files here to ensure consistent order for repeatable training results for f in sorted(files): full_path = os.path.join(root, f) if not _is_valid_filetype(full_path): continue if is_nlu_file(full_path): nlu_data_files.add(full_path) elif is_story_file(full_path): story_files.add(full_path) return story_files, nlu_data_files def _is_valid_filetype(path: Text) -> bool: is_file = os.path.isfile(path) is_datafile = path.endswith(".json") or path.endswith(".md") return is_file and is_datafile def is_nlu_file(file_path: Text) -> bool: """Checks if a file is a Rasa compatible nlu file. Args: file_path: Path of the file which should be checked. Returns: `True` if it's a nlu file, otherwise `False`. """ return loading.guess_format(file_path) != loading.UNK def is_story_file(file_path: Text) -> bool: """Checks if a file is a Rasa story file. Args: file_path: Path of the file which should be checked. Returns: `True` if it's a story file, otherwise `False`. """ if not file_path.endswith(".md"): return False try: with open( file_path, encoding=DEFAULT_ENCODING, errors="surrogateescape" ) as lines: return any(_contains_story_pattern(line) for line in lines) except Exception as e: # catch-all because we might be loading files we are not expecting to load logger.error( f"Tried to check if '{file_path}' is a story file, but failed to " f"read it. If this file contains story data, you should " f"investigate this error, otherwise it is probably best to " f"move the file to a different location. " f"Error: {e}" ) return False def _contains_story_pattern(text: Text) -> bool: story_pattern = r".*##.+" return re.match(story_pattern, text) is not None def is_domain_file(file_path: Text) -> bool: """Checks whether the given file path is a Rasa domain file. Args: file_path: Path of the file which should be checked. Returns: `True` if it's a domain file, otherwise `False`. """ file_name = os.path.basename(file_path) return file_name in ["domain.yml", "domain.yaml"] def is_config_file(file_path: Text) -> bool: """Checks whether the given file path is a Rasa config file. Args: file_path: Path of the file which should be checked. Returns: `True` if it's a Rasa config file, otherwise `False`. """ file_name = os.path.basename(file_path) return file_name in ["config.yml", "config.yaml"] def _copy_files_to_new_dir(files: Iterable[Text]) -> Text: directory = tempfile.mkdtemp() for f in files: # makes sure files do not overwrite each other, hence the prefix unique_prefix = uuid.uuid4().hex unique_file_name = unique_prefix + "_" + os.path.basename(f) shutil.copy2(f, os.path.join(directory, unique_file_name)) return directory
predict
predict function Args: model: keras model fit by fit_model X_test: Test features cate_cols: categorical columns list Returns: y_pred
from typing import Tuple, Union import numpy as np import pandas as pd import tensorflow as tf from src.models.dnn_regressor_funcs import ( _compile_model, _create_keras_model, _fit_model, _to_input_list, ) # MASKED: predict function (lines 15-28) def train( X_train: pd.DataFrame, y_train: Union[pd.Series, np.array], X_val: pd.DataFrame, y_val: Union[pd.Series, np.array], layers: list, num_classes: int, cate_cols: list, learning_rate: float, epochs: int, batch_size: int, dropout_rate: float = 0.3, ) -> Tuple[tf.keras.callbacks.History, tf.keras.Model]: """ Training main function that takes dataset and parameters as input and returns the trained model with history Args: X_train: Train features y_train: train labels X_val: Validation labels y_val: validation labels layers: List of nodes in hidden layers num_classes: Number of classes in target variable cate_cols: categorical columns list learning_rate: learning rate epochs: number of epochs batch_size: batch size dropout_rate: dropout rate Returns: history of training, trained model """ X_train_list = _to_input_list(df=X_train, cate_cols=cate_cols) X_val_list = _to_input_list(df=X_val, cate_cols=cate_cols) # if len(y_train.shape) == 1: # y_train_categorical = tf.keras.utils.to_categorical( # y_train, num_classes=num_classes, dtype="float32" # ) # # y_val_categorical = tf.keras.utils.to_categorical( # y_val, num_classes=num_classes, dtype="float32" # ) y_train = np.array(y_train) y_val = np.array(y_val) model = _create_keras_model( X_train=X_train, layers=layers, num_classes=num_classes, dropout_rate=dropout_rate, cate_cols=cate_cols, ) _compile_model(model=model, num_classes=num_classes, learning_rate=learning_rate) history = _fit_model( model=model, X_train_list=X_train_list, y_train=y_train, X_val_list=X_val_list, y_val=y_val, epochs=epochs, batch_size=batch_size, ) return history, model
def predict(model: tf.keras.Model, X_test: pd.DataFrame, cate_cols: list) -> np.array: """ predict function Args: model: keras model fit by fit_model X_test: Test features cate_cols: categorical columns list Returns: y_pred """ X_test_list = _to_input_list(df=X_test, cate_cols=cate_cols) y_pred = model.predict(X_test_list) return y_pred
15
28
from typing import Tuple, Union import numpy as np import pandas as pd import tensorflow as tf from src.models.dnn_regressor_funcs import ( _compile_model, _create_keras_model, _fit_model, _to_input_list, ) def predict(model: tf.keras.Model, X_test: pd.DataFrame, cate_cols: list) -> np.array: """ predict function Args: model: keras model fit by fit_model X_test: Test features cate_cols: categorical columns list Returns: y_pred """ X_test_list = _to_input_list(df=X_test, cate_cols=cate_cols) y_pred = model.predict(X_test_list) return y_pred def train( X_train: pd.DataFrame, y_train: Union[pd.Series, np.array], X_val: pd.DataFrame, y_val: Union[pd.Series, np.array], layers: list, num_classes: int, cate_cols: list, learning_rate: float, epochs: int, batch_size: int, dropout_rate: float = 0.3, ) -> Tuple[tf.keras.callbacks.History, tf.keras.Model]: """ Training main function that takes dataset and parameters as input and returns the trained model with history Args: X_train: Train features y_train: train labels X_val: Validation labels y_val: validation labels layers: List of nodes in hidden layers num_classes: Number of classes in target variable cate_cols: categorical columns list learning_rate: learning rate epochs: number of epochs batch_size: batch size dropout_rate: dropout rate Returns: history of training, trained model """ X_train_list = _to_input_list(df=X_train, cate_cols=cate_cols) X_val_list = _to_input_list(df=X_val, cate_cols=cate_cols) # if len(y_train.shape) == 1: # y_train_categorical = tf.keras.utils.to_categorical( # y_train, num_classes=num_classes, dtype="float32" # ) # # y_val_categorical = tf.keras.utils.to_categorical( # y_val, num_classes=num_classes, dtype="float32" # ) y_train = np.array(y_train) y_val = np.array(y_val) model = _create_keras_model( X_train=X_train, layers=layers, num_classes=num_classes, dropout_rate=dropout_rate, cate_cols=cate_cols, ) _compile_model(model=model, num_classes=num_classes, learning_rate=learning_rate) history = _fit_model( model=model, X_train_list=X_train_list, y_train=y_train, X_val_list=X_val_list, y_val=y_val, epochs=epochs, batch_size=batch_size, ) return history, model
train
Training main function that takes dataset and parameters as input and returns the trained model with history Args: X_train: Train features y_train: train labels X_val: Validation labels y_val: validation labels layers: List of nodes in hidden layers num_classes: Number of classes in target variable cate_cols: categorical columns list learning_rate: learning rate epochs: number of epochs batch_size: batch size dropout_rate: dropout rate Returns: history of training, trained model
from typing import Tuple, Union import numpy as np import pandas as pd import tensorflow as tf from src.models.dnn_regressor_funcs import ( _compile_model, _create_keras_model, _fit_model, _to_input_list, ) def predict(model: tf.keras.Model, X_test: pd.DataFrame, cate_cols: list) -> np.array: """ predict function Args: model: keras model fit by fit_model X_test: Test features cate_cols: categorical columns list Returns: y_pred """ X_test_list = _to_input_list(df=X_test, cate_cols=cate_cols) y_pred = model.predict(X_test_list) return y_pred # MASKED: train function (lines 31-97)
def train( X_train: pd.DataFrame, y_train: Union[pd.Series, np.array], X_val: pd.DataFrame, y_val: Union[pd.Series, np.array], layers: list, num_classes: int, cate_cols: list, learning_rate: float, epochs: int, batch_size: int, dropout_rate: float = 0.3, ) -> Tuple[tf.keras.callbacks.History, tf.keras.Model]: """ Training main function that takes dataset and parameters as input and returns the trained model with history Args: X_train: Train features y_train: train labels X_val: Validation labels y_val: validation labels layers: List of nodes in hidden layers num_classes: Number of classes in target variable cate_cols: categorical columns list learning_rate: learning rate epochs: number of epochs batch_size: batch size dropout_rate: dropout rate Returns: history of training, trained model """ X_train_list = _to_input_list(df=X_train, cate_cols=cate_cols) X_val_list = _to_input_list(df=X_val, cate_cols=cate_cols) # if len(y_train.shape) == 1: # y_train_categorical = tf.keras.utils.to_categorical( # y_train, num_classes=num_classes, dtype="float32" # ) # # y_val_categorical = tf.keras.utils.to_categorical( # y_val, num_classes=num_classes, dtype="float32" # ) y_train = np.array(y_train) y_val = np.array(y_val) model = _create_keras_model( X_train=X_train, layers=layers, num_classes=num_classes, dropout_rate=dropout_rate, cate_cols=cate_cols, ) _compile_model(model=model, num_classes=num_classes, learning_rate=learning_rate) history = _fit_model( model=model, X_train_list=X_train_list, y_train=y_train, X_val_list=X_val_list, y_val=y_val, epochs=epochs, batch_size=batch_size, ) return history, model
31
97
from typing import Tuple, Union import numpy as np import pandas as pd import tensorflow as tf from src.models.dnn_regressor_funcs import ( _compile_model, _create_keras_model, _fit_model, _to_input_list, ) def predict(model: tf.keras.Model, X_test: pd.DataFrame, cate_cols: list) -> np.array: """ predict function Args: model: keras model fit by fit_model X_test: Test features cate_cols: categorical columns list Returns: y_pred """ X_test_list = _to_input_list(df=X_test, cate_cols=cate_cols) y_pred = model.predict(X_test_list) return y_pred def train( X_train: pd.DataFrame, y_train: Union[pd.Series, np.array], X_val: pd.DataFrame, y_val: Union[pd.Series, np.array], layers: list, num_classes: int, cate_cols: list, learning_rate: float, epochs: int, batch_size: int, dropout_rate: float = 0.3, ) -> Tuple[tf.keras.callbacks.History, tf.keras.Model]: """ Training main function that takes dataset and parameters as input and returns the trained model with history Args: X_train: Train features y_train: train labels X_val: Validation labels y_val: validation labels layers: List of nodes in hidden layers num_classes: Number of classes in target variable cate_cols: categorical columns list learning_rate: learning rate epochs: number of epochs batch_size: batch size dropout_rate: dropout rate Returns: history of training, trained model """ X_train_list = _to_input_list(df=X_train, cate_cols=cate_cols) X_val_list = _to_input_list(df=X_val, cate_cols=cate_cols) # if len(y_train.shape) == 1: # y_train_categorical = tf.keras.utils.to_categorical( # y_train, num_classes=num_classes, dtype="float32" # ) # # y_val_categorical = tf.keras.utils.to_categorical( # y_val, num_classes=num_classes, dtype="float32" # ) y_train = np.array(y_train) y_val = np.array(y_val) model = _create_keras_model( X_train=X_train, layers=layers, num_classes=num_classes, dropout_rate=dropout_rate, cate_cols=cate_cols, ) _compile_model(model=model, num_classes=num_classes, learning_rate=learning_rate) history = _fit_model( model=model, X_train_list=X_train_list, y_train=y_train, X_val_list=X_val_list, y_val=y_val, epochs=epochs, batch_size=batch_size, ) return history, model
contact_list
Displays a list of :model:`rr.Contact` linked to :model:`rr.ServiceProvider`. Includes a ModelForm for adding :model:`rr.Contact` to :model:`rr.ServiceProvider`. **Context** ``object_list`` List of :model:`rr.Contact`. ``form`` ModelForm for creating a :model:`rr.Contact` ``object`` An instance of :model:`rr.ServiceProvider`. **Template:** :template:`rr/contact.html`
import logging from django.contrib import messages from django.contrib.auth.decorators import login_required from django.shortcuts import render from django.utils import timezone from django.utils.translation import ugettext as _ from rr.forms.contact import ContactForm from rr.models.contact import Contact from rr.utils.serviceprovider import get_service_provider logger = logging.getLogger(__name__) # MASKED: contact_list function (lines 16-50) def _add_contact(request, sp): form = ContactForm(request.POST, sp=sp) if form.is_valid(): contact_type = form.cleaned_data['type'] firstname = form.cleaned_data['firstname'] lastname = form.cleaned_data['lastname'] email = form.cleaned_data['email'] Contact.objects.create(sp=sp, type=contact_type, firstname=firstname, lastname=lastname, email=email) sp.save_modified() logger.info("Contact added for {sp} by {user}" .format(sp=sp, user=request.user)) messages.add_message(request, messages.INFO, _('Contact added.')) form = ContactForm(sp=sp) return form def _remove_contacts(request, sp): for key, value in request.POST.dict().items(): if value == "on": contact = Contact.objects.get(pk=key) if contact.sp == sp: contact.end_at = timezone.now() contact.save() sp.save_modified() logger.info("Contact removed from {sp} by {user}" .format(sp=sp, user=request.user)) messages.add_message(request, messages.INFO, _('Contact removed.'))
@login_required def contact_list(request, pk): """ Displays a list of :model:`rr.Contact` linked to :model:`rr.ServiceProvider`. Includes a ModelForm for adding :model:`rr.Contact` to :model:`rr.ServiceProvider`. **Context** ``object_list`` List of :model:`rr.Contact`. ``form`` ModelForm for creating a :model:`rr.Contact` ``object`` An instance of :model:`rr.ServiceProvider`. **Template:** :template:`rr/contact.html` """ sp = get_service_provider(pk, request.user) form = ContactForm(sp=sp) if request.method == "POST": if "add_contact" in request.POST: form = _add_contact(request, sp) elif "remove_contact" in request.POST: _remove_contacts(request, sp) contacts = Contact.objects.filter(sp=sp, end_at=None) return render(request, "rr/contact.html", {'object_list': contacts, 'form': form, 'object': sp})
16
50
import logging from django.contrib import messages from django.contrib.auth.decorators import login_required from django.shortcuts import render from django.utils import timezone from django.utils.translation import ugettext as _ from rr.forms.contact import ContactForm from rr.models.contact import Contact from rr.utils.serviceprovider import get_service_provider logger = logging.getLogger(__name__) @login_required def contact_list(request, pk): """ Displays a list of :model:`rr.Contact` linked to :model:`rr.ServiceProvider`. Includes a ModelForm for adding :model:`rr.Contact` to :model:`rr.ServiceProvider`. **Context** ``object_list`` List of :model:`rr.Contact`. ``form`` ModelForm for creating a :model:`rr.Contact` ``object`` An instance of :model:`rr.ServiceProvider`. **Template:** :template:`rr/contact.html` """ sp = get_service_provider(pk, request.user) form = ContactForm(sp=sp) if request.method == "POST": if "add_contact" in request.POST: form = _add_contact(request, sp) elif "remove_contact" in request.POST: _remove_contacts(request, sp) contacts = Contact.objects.filter(sp=sp, end_at=None) return render(request, "rr/contact.html", {'object_list': contacts, 'form': form, 'object': sp}) def _add_contact(request, sp): form = ContactForm(request.POST, sp=sp) if form.is_valid(): contact_type = form.cleaned_data['type'] firstname = form.cleaned_data['firstname'] lastname = form.cleaned_data['lastname'] email = form.cleaned_data['email'] Contact.objects.create(sp=sp, type=contact_type, firstname=firstname, lastname=lastname, email=email) sp.save_modified() logger.info("Contact added for {sp} by {user}" .format(sp=sp, user=request.user)) messages.add_message(request, messages.INFO, _('Contact added.')) form = ContactForm(sp=sp) return form def _remove_contacts(request, sp): for key, value in request.POST.dict().items(): if value == "on": contact = Contact.objects.get(pk=key) if contact.sp == sp: contact.end_at = timezone.now() contact.save() sp.save_modified() logger.info("Contact removed from {sp} by {user}" .format(sp=sp, user=request.user)) messages.add_message(request, messages.INFO, _('Contact removed.'))
parametrize
Parametrizes the CirqOperation. Args: *args (float): the parameters for the operations
# Copyright 2019-2020 Xanadu Quantum Technologies Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Cirq Operation class ==================== **Module name:** :mod:`pennylane_cirq.cirq_operation` .. currentmodule:: pennylane_cirq.cirq_operation An helper class that wraps the native Cirq operations and provides an interface for PennyLane. Classes ------- .. autosummary:: CirqOperation Code details ~~~~~~~~~~~~ """ from collections.abc import Sequence import cirq import pennylane as qml class CirqOperation: """A helper class that wraps the native Cirq operations and provides an interface for parametrization and application.""" def __init__(self, parametrization): """Initializes the CirqOperation Args: parametrization (Tuple[float] -> Union[Cirq:Qid, List[Cirq:Qid]]): Converts the PennyLane gate parameters to an ordered list of gates that are to be applied. """ self.parametrization = parametrization self.parametrized_cirq_gates = None self.is_inverse = False # MASKED: parametrize function (lines 54-67) def apply(self, *qubits): """Applies the CirqOperation. Args: *qubits (Cirq:Qid): the qubits on which the Cirq gates should be performed. """ if not self.parametrized_cirq_gates: raise qml.DeviceError("CirqOperation must be parametrized before it can be applied.") return (parametrized_gate(*qubits) for parametrized_gate in self.parametrized_cirq_gates) def inv(self): """Inverses the CirqOperation.""" # We can also support inversion after parametrization, but this is not necessary for the # PennyLane-Cirq codebase at the moment. if self.parametrized_cirq_gates: raise qml.DeviceError("CirqOperation can't be inverted after it was parametrized.") self.is_inverse = not self.is_inverse
def parametrize(self, *args): """Parametrizes the CirqOperation. Args: *args (float): the parameters for the operations """ self.parametrized_cirq_gates = self.parametrization(*args) if not isinstance(self.parametrized_cirq_gates, Sequence): self.parametrized_cirq_gates = [self.parametrized_cirq_gates] if self.is_inverse: # Cirq automatically reverses the order if it gets an iterable self.parametrized_cirq_gates = cirq.inverse(self.parametrized_cirq_gates)
54
67
# Copyright 2019-2020 Xanadu Quantum Technologies Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Cirq Operation class ==================== **Module name:** :mod:`pennylane_cirq.cirq_operation` .. currentmodule:: pennylane_cirq.cirq_operation An helper class that wraps the native Cirq operations and provides an interface for PennyLane. Classes ------- .. autosummary:: CirqOperation Code details ~~~~~~~~~~~~ """ from collections.abc import Sequence import cirq import pennylane as qml class CirqOperation: """A helper class that wraps the native Cirq operations and provides an interface for parametrization and application.""" def __init__(self, parametrization): """Initializes the CirqOperation Args: parametrization (Tuple[float] -> Union[Cirq:Qid, List[Cirq:Qid]]): Converts the PennyLane gate parameters to an ordered list of gates that are to be applied. """ self.parametrization = parametrization self.parametrized_cirq_gates = None self.is_inverse = False def parametrize(self, *args): """Parametrizes the CirqOperation. Args: *args (float): the parameters for the operations """ self.parametrized_cirq_gates = self.parametrization(*args) if not isinstance(self.parametrized_cirq_gates, Sequence): self.parametrized_cirq_gates = [self.parametrized_cirq_gates] if self.is_inverse: # Cirq automatically reverses the order if it gets an iterable self.parametrized_cirq_gates = cirq.inverse(self.parametrized_cirq_gates) def apply(self, *qubits): """Applies the CirqOperation. Args: *qubits (Cirq:Qid): the qubits on which the Cirq gates should be performed. """ if not self.parametrized_cirq_gates: raise qml.DeviceError("CirqOperation must be parametrized before it can be applied.") return (parametrized_gate(*qubits) for parametrized_gate in self.parametrized_cirq_gates) def inv(self): """Inverses the CirqOperation.""" # We can also support inversion after parametrization, but this is not necessary for the # PennyLane-Cirq codebase at the moment. if self.parametrized_cirq_gates: raise qml.DeviceError("CirqOperation can't be inverted after it was parametrized.") self.is_inverse = not self.is_inverse
apply
Applies the CirqOperation. Args: *qubits (Cirq:Qid): the qubits on which the Cirq gates should be performed.
# Copyright 2019-2020 Xanadu Quantum Technologies Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Cirq Operation class ==================== **Module name:** :mod:`pennylane_cirq.cirq_operation` .. currentmodule:: pennylane_cirq.cirq_operation An helper class that wraps the native Cirq operations and provides an interface for PennyLane. Classes ------- .. autosummary:: CirqOperation Code details ~~~~~~~~~~~~ """ from collections.abc import Sequence import cirq import pennylane as qml class CirqOperation: """A helper class that wraps the native Cirq operations and provides an interface for parametrization and application.""" def __init__(self, parametrization): """Initializes the CirqOperation Args: parametrization (Tuple[float] -> Union[Cirq:Qid, List[Cirq:Qid]]): Converts the PennyLane gate parameters to an ordered list of gates that are to be applied. """ self.parametrization = parametrization self.parametrized_cirq_gates = None self.is_inverse = False def parametrize(self, *args): """Parametrizes the CirqOperation. Args: *args (float): the parameters for the operations """ self.parametrized_cirq_gates = self.parametrization(*args) if not isinstance(self.parametrized_cirq_gates, Sequence): self.parametrized_cirq_gates = [self.parametrized_cirq_gates] if self.is_inverse: # Cirq automatically reverses the order if it gets an iterable self.parametrized_cirq_gates = cirq.inverse(self.parametrized_cirq_gates) # MASKED: apply function (lines 69-78) def inv(self): """Inverses the CirqOperation.""" # We can also support inversion after parametrization, but this is not necessary for the # PennyLane-Cirq codebase at the moment. if self.parametrized_cirq_gates: raise qml.DeviceError("CirqOperation can't be inverted after it was parametrized.") self.is_inverse = not self.is_inverse
def apply(self, *qubits): """Applies the CirqOperation. Args: *qubits (Cirq:Qid): the qubits on which the Cirq gates should be performed. """ if not self.parametrized_cirq_gates: raise qml.DeviceError("CirqOperation must be parametrized before it can be applied.") return (parametrized_gate(*qubits) for parametrized_gate in self.parametrized_cirq_gates)
69
78
# Copyright 2019-2020 Xanadu Quantum Technologies Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Cirq Operation class ==================== **Module name:** :mod:`pennylane_cirq.cirq_operation` .. currentmodule:: pennylane_cirq.cirq_operation An helper class that wraps the native Cirq operations and provides an interface for PennyLane. Classes ------- .. autosummary:: CirqOperation Code details ~~~~~~~~~~~~ """ from collections.abc import Sequence import cirq import pennylane as qml class CirqOperation: """A helper class that wraps the native Cirq operations and provides an interface for parametrization and application.""" def __init__(self, parametrization): """Initializes the CirqOperation Args: parametrization (Tuple[float] -> Union[Cirq:Qid, List[Cirq:Qid]]): Converts the PennyLane gate parameters to an ordered list of gates that are to be applied. """ self.parametrization = parametrization self.parametrized_cirq_gates = None self.is_inverse = False def parametrize(self, *args): """Parametrizes the CirqOperation. Args: *args (float): the parameters for the operations """ self.parametrized_cirq_gates = self.parametrization(*args) if not isinstance(self.parametrized_cirq_gates, Sequence): self.parametrized_cirq_gates = [self.parametrized_cirq_gates] if self.is_inverse: # Cirq automatically reverses the order if it gets an iterable self.parametrized_cirq_gates = cirq.inverse(self.parametrized_cirq_gates) def apply(self, *qubits): """Applies the CirqOperation. Args: *qubits (Cirq:Qid): the qubits on which the Cirq gates should be performed. """ if not self.parametrized_cirq_gates: raise qml.DeviceError("CirqOperation must be parametrized before it can be applied.") return (parametrized_gate(*qubits) for parametrized_gate in self.parametrized_cirq_gates) def inv(self): """Inverses the CirqOperation.""" # We can also support inversion after parametrization, but this is not necessary for the # PennyLane-Cirq codebase at the moment. if self.parametrized_cirq_gates: raise qml.DeviceError("CirqOperation can't be inverted after it was parametrized.") self.is_inverse = not self.is_inverse
save
Saves the current module dictionary. Args: filename (str): name of output file
import os import urllib import torch from torch.utils import model_zoo class CheckpointIO(object): ''' CheckpointIO class. It handles saving and loading checkpoints. Args: checkpoint_dir (str): path where checkpoints are saved ''' def __init__(self, checkpoint_dir='./chkpts', **kwargs): self.module_dict = kwargs self.checkpoint_dir = checkpoint_dir if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) def register_modules(self, **kwargs): ''' Registers modules in current module dictionary. ''' self.module_dict.update(kwargs) # MASKED: save function (lines 27-39) def load(self, filename): '''Loads a module dictionary from local file or url. Args: filename (str): name of saved module dictionary ''' if is_url(filename): return self.load_url(filename) else: return self.load_file(filename) def load_file(self, filename): '''Loads a module dictionary from file. Args: filename (str): name of saved module dictionary ''' if not os.path.isabs(filename): filename = os.path.join(self.checkpoint_dir, filename) if os.path.exists(filename): print(filename) print('=> Loading checkpoint from local file...') state_dict = torch.load(filename) scalars = self.parse_state_dict(state_dict) return scalars else: raise FileNotFoundError def load_url(self, url): '''Load a module dictionary from url. Args: url (str): url to saved model ''' print(url) print('=> Loading checkpoint from url...') state_dict = model_zoo.load_url(url, progress=True) scalars = self.parse_state_dict(state_dict) return scalars def parse_state_dict(self, state_dict): '''Parse state_dict of model and return scalars. Args: state_dict (dict): State dict of model ''' for k, v in self.module_dict.items(): if k in state_dict: v.load_state_dict(state_dict[k]) else: print('Warning: Could not find %s in checkpoint!' % k) scalars = {k: v for k, v in state_dict.items() if k not in self.module_dict} return scalars def is_url(url): scheme = urllib.parse.urlparse(url).scheme return scheme in ('http', 'https')
def save(self, filename, **kwargs): ''' Saves the current module dictionary. Args: filename (str): name of output file ''' if not os.path.isabs(filename): filename = os.path.join(self.checkpoint_dir, filename) outdict = kwargs for k, v in self.module_dict.items(): outdict[k] = v.state_dict() torch.save(outdict, filename)
27
39
import os import urllib import torch from torch.utils import model_zoo class CheckpointIO(object): ''' CheckpointIO class. It handles saving and loading checkpoints. Args: checkpoint_dir (str): path where checkpoints are saved ''' def __init__(self, checkpoint_dir='./chkpts', **kwargs): self.module_dict = kwargs self.checkpoint_dir = checkpoint_dir if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) def register_modules(self, **kwargs): ''' Registers modules in current module dictionary. ''' self.module_dict.update(kwargs) def save(self, filename, **kwargs): ''' Saves the current module dictionary. Args: filename (str): name of output file ''' if not os.path.isabs(filename): filename = os.path.join(self.checkpoint_dir, filename) outdict = kwargs for k, v in self.module_dict.items(): outdict[k] = v.state_dict() torch.save(outdict, filename) def load(self, filename): '''Loads a module dictionary from local file or url. Args: filename (str): name of saved module dictionary ''' if is_url(filename): return self.load_url(filename) else: return self.load_file(filename) def load_file(self, filename): '''Loads a module dictionary from file. Args: filename (str): name of saved module dictionary ''' if not os.path.isabs(filename): filename = os.path.join(self.checkpoint_dir, filename) if os.path.exists(filename): print(filename) print('=> Loading checkpoint from local file...') state_dict = torch.load(filename) scalars = self.parse_state_dict(state_dict) return scalars else: raise FileNotFoundError def load_url(self, url): '''Load a module dictionary from url. Args: url (str): url to saved model ''' print(url) print('=> Loading checkpoint from url...') state_dict = model_zoo.load_url(url, progress=True) scalars = self.parse_state_dict(state_dict) return scalars def parse_state_dict(self, state_dict): '''Parse state_dict of model and return scalars. Args: state_dict (dict): State dict of model ''' for k, v in self.module_dict.items(): if k in state_dict: v.load_state_dict(state_dict[k]) else: print('Warning: Could not find %s in checkpoint!' % k) scalars = {k: v for k, v in state_dict.items() if k not in self.module_dict} return scalars def is_url(url): scheme = urllib.parse.urlparse(url).scheme return scheme in ('http', 'https')
parse_state_dict
Parse state_dict of model and return scalars. Args: state_dict (dict): State dict of model
import os import urllib import torch from torch.utils import model_zoo class CheckpointIO(object): ''' CheckpointIO class. It handles saving and loading checkpoints. Args: checkpoint_dir (str): path where checkpoints are saved ''' def __init__(self, checkpoint_dir='./chkpts', **kwargs): self.module_dict = kwargs self.checkpoint_dir = checkpoint_dir if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) def register_modules(self, **kwargs): ''' Registers modules in current module dictionary. ''' self.module_dict.update(kwargs) def save(self, filename, **kwargs): ''' Saves the current module dictionary. Args: filename (str): name of output file ''' if not os.path.isabs(filename): filename = os.path.join(self.checkpoint_dir, filename) outdict = kwargs for k, v in self.module_dict.items(): outdict[k] = v.state_dict() torch.save(outdict, filename) def load(self, filename): '''Loads a module dictionary from local file or url. Args: filename (str): name of saved module dictionary ''' if is_url(filename): return self.load_url(filename) else: return self.load_file(filename) def load_file(self, filename): '''Loads a module dictionary from file. Args: filename (str): name of saved module dictionary ''' if not os.path.isabs(filename): filename = os.path.join(self.checkpoint_dir, filename) if os.path.exists(filename): print(filename) print('=> Loading checkpoint from local file...') state_dict = torch.load(filename) scalars = self.parse_state_dict(state_dict) return scalars else: raise FileNotFoundError def load_url(self, url): '''Load a module dictionary from url. Args: url (str): url to saved model ''' print(url) print('=> Loading checkpoint from url...') state_dict = model_zoo.load_url(url, progress=True) scalars = self.parse_state_dict(state_dict) return scalars # MASKED: parse_state_dict function (lines 83-97) def is_url(url): scheme = urllib.parse.urlparse(url).scheme return scheme in ('http', 'https')
def parse_state_dict(self, state_dict): '''Parse state_dict of model and return scalars. Args: state_dict (dict): State dict of model ''' for k, v in self.module_dict.items(): if k in state_dict: v.load_state_dict(state_dict[k]) else: print('Warning: Could not find %s in checkpoint!' % k) scalars = {k: v for k, v in state_dict.items() if k not in self.module_dict} return scalars
83
97
import os import urllib import torch from torch.utils import model_zoo class CheckpointIO(object): ''' CheckpointIO class. It handles saving and loading checkpoints. Args: checkpoint_dir (str): path where checkpoints are saved ''' def __init__(self, checkpoint_dir='./chkpts', **kwargs): self.module_dict = kwargs self.checkpoint_dir = checkpoint_dir if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) def register_modules(self, **kwargs): ''' Registers modules in current module dictionary. ''' self.module_dict.update(kwargs) def save(self, filename, **kwargs): ''' Saves the current module dictionary. Args: filename (str): name of output file ''' if not os.path.isabs(filename): filename = os.path.join(self.checkpoint_dir, filename) outdict = kwargs for k, v in self.module_dict.items(): outdict[k] = v.state_dict() torch.save(outdict, filename) def load(self, filename): '''Loads a module dictionary from local file or url. Args: filename (str): name of saved module dictionary ''' if is_url(filename): return self.load_url(filename) else: return self.load_file(filename) def load_file(self, filename): '''Loads a module dictionary from file. Args: filename (str): name of saved module dictionary ''' if not os.path.isabs(filename): filename = os.path.join(self.checkpoint_dir, filename) if os.path.exists(filename): print(filename) print('=> Loading checkpoint from local file...') state_dict = torch.load(filename) scalars = self.parse_state_dict(state_dict) return scalars else: raise FileNotFoundError def load_url(self, url): '''Load a module dictionary from url. Args: url (str): url to saved model ''' print(url) print('=> Loading checkpoint from url...') state_dict = model_zoo.load_url(url, progress=True) scalars = self.parse_state_dict(state_dict) return scalars def parse_state_dict(self, state_dict): '''Parse state_dict of model and return scalars. Args: state_dict (dict): State dict of model ''' for k, v in self.module_dict.items(): if k in state_dict: v.load_state_dict(state_dict[k]) else: print('Warning: Could not find %s in checkpoint!' % k) scalars = {k: v for k, v in state_dict.items() if k not in self.module_dict} return scalars def is_url(url): scheme = urllib.parse.urlparse(url).scheme return scheme in ('http', 'https')
get_extra_rules
Helper to provide custom (project level/user level) anonymization rules as a mapping of tags -> action function. Args: use_extra (bool): If use extra rules. extra_json_path (Path_Str): Path to extra rules json file. It should be flat json with action as a key and list of tags as value. Returns: Optional[ActionsDict]: extra rules mapping (tags -> action function)
""" This module is intended to extend functionality of the code provided by original authors. The process is as follows: 1. User has to provide source root path containing (possibly nested) folders with dicom files 2. The program will recreate the structure in the destination root path and anonymize all dicom files. """ import argparse import json import logging import logging.config import random from pathlib import Path from typing import Optional import pydicom from dicomanonymizer.anonym_state import AnonState from dicomanonymizer.dicom_utils import fix_exposure from dicomanonymizer.simpledicomanonymizer import ( anonymize_dicom_file, initialize_actions, ) from dicomanonymizer.utils import ( LOGS_PATH, PROJ_ROOT, ActionsDict, Path_Str, get_dirs, to_Path, try_valid_dir, ) # setup logging (create dirs, if it is first time) LOGS_PATH.mkdir(parents=True, exist_ok=True) logging.config.fileConfig( PROJ_ROOT / "dicomanonymizer/config/logging.ini", defaults={"logfilename": (LOGS_PATH / "file.log").as_posix()}, disable_existing_loggers=False, ) logger = logging.getLogger(__name__) _STATE_PATH = Path.home() / ".dicomanonymizer/cache" _STATE_PATH.mkdir(parents=True, exist_ok=True) # MASKED: get_extra_rules function (lines 47-73) def anonymize_dicom_folder( in_path: Path_Str, out_path: Path_Str, debug: bool = False, **kwargs ): """Anonymize dicom files in `in_path`, if `in_path` doesn't contain dicom files, will do nothing. Debug == True will do sort of dry run to check if all good for the large data storages Args: in_path (Path_Str): path to the folder containing dicom files out_path (Path_Str): path to the folder there anonymized copies will be saved debuf (bool): if true, will do a "dry" run """ # check and prepare in_path = to_Path(in_path) try_valid_dir(in_path) out_path = to_Path(out_path) out_path.mkdir(parents=True, exist_ok=True) logger.info(f"Processing: {in_path}") # work itself in_files = [p for p in in_path.iterdir() if p.is_file()] if not in_files: logger.info(f"Folder {in_path} doesn't have dicom files, skip.") return if debug: # anonymize just one file f_in = random.choice(in_files) f_out = out_path / f_in.name try: anonymize_dicom_file(f_in, f_out) except Exception as e: logger.info(f_in) logger.exception(e) raise e else: for f_in in in_files: f_out = out_path / f_in.name try: anonymize_dicom_file(f_in, f_out, **kwargs) except Exception as e: logger.info(f_in) logger.exception(e) raise e def anonymize_root_folder( in_root: Path_Str, out_root: Path_Str, **kwargs, ): """The fuction will get all nested folders from `in_root` and perform anonymization of all folders containg dicom-files Will recreate the `in_root` folders structure in the `out_root` Args: in_root (Path_Str): source root folder (presumably has some dicom-files inide, maybe nested) out_root (Path_Str): destination root folder, will create if not exists """ in_root = to_Path(in_root) try_valid_dir(in_root) out_root = to_Path(out_root) out_root.mkdir(parents=True, exist_ok=True) in_dirs = get_dirs(in_root) state = AnonState(_STATE_PATH) state.init_state() state.load_state() def get_tags_callback(dataset: pydicom.Dataset): state.tag_counter.update(dataset.dir()) logger.info( "Processed paths will be added to the cache, if cache exist and has some paths included, they will be skipped" ) logger.info( f"if, you need to process data again delete files {_STATE_PATH}, please" ) # will try to process all folders, if exception will dump state before raising try: for in_d in in_dirs: rel_path = in_d.relative_to(in_root) if str(rel_path) in state.visited_folders: logger.info(f"{in_d} path is in cache, skipping") continue else: out_d = out_root / rel_path anonymize_dicom_folder( in_d, out_d, ds_callback=get_tags_callback, **kwargs ) # update state state.visited_folders[str(rel_path)] = True except Exception as e: raise e finally: # before saving updated state let's flag tags not seen previously prev_state = AnonState(_STATE_PATH) prev_state.init_state() prev_state.load_state() new_tags = set(state.tag_counter.keys()).difference( prev_state.tag_counter.keys() ) if new_tags: logger.warning( f"During the anonymization new tags: {new_tags} were present" ) else: logger.info("No new tags werer present") # now we can save the current state state.save_state() # Add CLI args parser = argparse.ArgumentParser(description="Batch dicom-anonymization CLI") parser.add_argument( "--type", type=str, choices=["batch", "folder"], default="batch", help="Process only one folder - folder or all nested folders - batch, default = batch", ) parser.add_argument( "--extra-rules", default="", help="Path to json file defining extra rules for additional tags. Defalult in project.", ) parser.add_argument( "--no-extra", action="store_true", help="Only use a rules from DICOM-standard basic de-id profile", ) parser.add_argument( "--debug", action="store_true", help="Will do a dry run (one file per folder)" ) parser.add_argument( "src", type=str, help="Absolute path to the folder containing dicom-files or nested folders with dicom-files", ) parser.add_argument( "dst", type=str, help="Absolute path to the folder where to save anonymized copy of src", ) def main(): # parse args args = parser.parse_args() in_path = Path(args.src) out_path = Path(args.dst) debug = args.debug path = args.extra_rules if not path: path = PROJ_ROOT / "dicomanonymizer/resources/extra_rules.json" extra_rules = get_extra_rules(use_extra=not args.no_extra, extra_json_path=path) # fix known issue with dicom fix_exposure() msg = f""" Start a job: {args.type}, debug set to {args.debug} Will anonymize data at: {in_path} and save to {out_path} """ logger.info(msg) # anonymize if args.type == "batch": anonymize_root_folder( in_path, out_path, debug=debug, extra_anonymization_rules=extra_rules ) elif args.type == "folder": anonymize_dicom_folder( in_path, out_path, debug=debug, extra_anonymization_rules=extra_rules ) logger.info("Well done!") if __name__ == "__main__": main()
def get_extra_rules( use_extra: bool, extra_json_path: Path_Str, ) -> Optional[ActionsDict]: """Helper to provide custom (project level/user level) anonymization rules as a mapping of tags -> action function. Args: use_extra (bool): If use extra rules. extra_json_path (Path_Str): Path to extra rules json file. It should be flat json with action as a key and list of tags as value. Returns: Optional[ActionsDict]: extra rules mapping (tags -> action function) """ # Define the actions dict for additional tags (customization) extra_rules = None if use_extra: # default or user provided path to extra rules json file with open(extra_json_path, "r") as fout: extra_rules = json.load(fout) for key in extra_rules: tag_list = extra_rules[key] tag_list = [tuple(elem) for elem in tag_list] extra_rules[key] = tag_list extra_rules = initialize_actions(extra_rules) return extra_rules
47
73
""" This module is intended to extend functionality of the code provided by original authors. The process is as follows: 1. User has to provide source root path containing (possibly nested) folders with dicom files 2. The program will recreate the structure in the destination root path and anonymize all dicom files. """ import argparse import json import logging import logging.config import random from pathlib import Path from typing import Optional import pydicom from dicomanonymizer.anonym_state import AnonState from dicomanonymizer.dicom_utils import fix_exposure from dicomanonymizer.simpledicomanonymizer import ( anonymize_dicom_file, initialize_actions, ) from dicomanonymizer.utils import ( LOGS_PATH, PROJ_ROOT, ActionsDict, Path_Str, get_dirs, to_Path, try_valid_dir, ) # setup logging (create dirs, if it is first time) LOGS_PATH.mkdir(parents=True, exist_ok=True) logging.config.fileConfig( PROJ_ROOT / "dicomanonymizer/config/logging.ini", defaults={"logfilename": (LOGS_PATH / "file.log").as_posix()}, disable_existing_loggers=False, ) logger = logging.getLogger(__name__) _STATE_PATH = Path.home() / ".dicomanonymizer/cache" _STATE_PATH.mkdir(parents=True, exist_ok=True) def get_extra_rules( use_extra: bool, extra_json_path: Path_Str, ) -> Optional[ActionsDict]: """Helper to provide custom (project level/user level) anonymization rules as a mapping of tags -> action function. Args: use_extra (bool): If use extra rules. extra_json_path (Path_Str): Path to extra rules json file. It should be flat json with action as a key and list of tags as value. Returns: Optional[ActionsDict]: extra rules mapping (tags -> action function) """ # Define the actions dict for additional tags (customization) extra_rules = None if use_extra: # default or user provided path to extra rules json file with open(extra_json_path, "r") as fout: extra_rules = json.load(fout) for key in extra_rules: tag_list = extra_rules[key] tag_list = [tuple(elem) for elem in tag_list] extra_rules[key] = tag_list extra_rules = initialize_actions(extra_rules) return extra_rules def anonymize_dicom_folder( in_path: Path_Str, out_path: Path_Str, debug: bool = False, **kwargs ): """Anonymize dicom files in `in_path`, if `in_path` doesn't contain dicom files, will do nothing. Debug == True will do sort of dry run to check if all good for the large data storages Args: in_path (Path_Str): path to the folder containing dicom files out_path (Path_Str): path to the folder there anonymized copies will be saved debuf (bool): if true, will do a "dry" run """ # check and prepare in_path = to_Path(in_path) try_valid_dir(in_path) out_path = to_Path(out_path) out_path.mkdir(parents=True, exist_ok=True) logger.info(f"Processing: {in_path}") # work itself in_files = [p for p in in_path.iterdir() if p.is_file()] if not in_files: logger.info(f"Folder {in_path} doesn't have dicom files, skip.") return if debug: # anonymize just one file f_in = random.choice(in_files) f_out = out_path / f_in.name try: anonymize_dicom_file(f_in, f_out) except Exception as e: logger.info(f_in) logger.exception(e) raise e else: for f_in in in_files: f_out = out_path / f_in.name try: anonymize_dicom_file(f_in, f_out, **kwargs) except Exception as e: logger.info(f_in) logger.exception(e) raise e def anonymize_root_folder( in_root: Path_Str, out_root: Path_Str, **kwargs, ): """The fuction will get all nested folders from `in_root` and perform anonymization of all folders containg dicom-files Will recreate the `in_root` folders structure in the `out_root` Args: in_root (Path_Str): source root folder (presumably has some dicom-files inide, maybe nested) out_root (Path_Str): destination root folder, will create if not exists """ in_root = to_Path(in_root) try_valid_dir(in_root) out_root = to_Path(out_root) out_root.mkdir(parents=True, exist_ok=True) in_dirs = get_dirs(in_root) state = AnonState(_STATE_PATH) state.init_state() state.load_state() def get_tags_callback(dataset: pydicom.Dataset): state.tag_counter.update(dataset.dir()) logger.info( "Processed paths will be added to the cache, if cache exist and has some paths included, they will be skipped" ) logger.info( f"if, you need to process data again delete files {_STATE_PATH}, please" ) # will try to process all folders, if exception will dump state before raising try: for in_d in in_dirs: rel_path = in_d.relative_to(in_root) if str(rel_path) in state.visited_folders: logger.info(f"{in_d} path is in cache, skipping") continue else: out_d = out_root / rel_path anonymize_dicom_folder( in_d, out_d, ds_callback=get_tags_callback, **kwargs ) # update state state.visited_folders[str(rel_path)] = True except Exception as e: raise e finally: # before saving updated state let's flag tags not seen previously prev_state = AnonState(_STATE_PATH) prev_state.init_state() prev_state.load_state() new_tags = set(state.tag_counter.keys()).difference( prev_state.tag_counter.keys() ) if new_tags: logger.warning( f"During the anonymization new tags: {new_tags} were present" ) else: logger.info("No new tags werer present") # now we can save the current state state.save_state() # Add CLI args parser = argparse.ArgumentParser(description="Batch dicom-anonymization CLI") parser.add_argument( "--type", type=str, choices=["batch", "folder"], default="batch", help="Process only one folder - folder or all nested folders - batch, default = batch", ) parser.add_argument( "--extra-rules", default="", help="Path to json file defining extra rules for additional tags. Defalult in project.", ) parser.add_argument( "--no-extra", action="store_true", help="Only use a rules from DICOM-standard basic de-id profile", ) parser.add_argument( "--debug", action="store_true", help="Will do a dry run (one file per folder)" ) parser.add_argument( "src", type=str, help="Absolute path to the folder containing dicom-files or nested folders with dicom-files", ) parser.add_argument( "dst", type=str, help="Absolute path to the folder where to save anonymized copy of src", ) def main(): # parse args args = parser.parse_args() in_path = Path(args.src) out_path = Path(args.dst) debug = args.debug path = args.extra_rules if not path: path = PROJ_ROOT / "dicomanonymizer/resources/extra_rules.json" extra_rules = get_extra_rules(use_extra=not args.no_extra, extra_json_path=path) # fix known issue with dicom fix_exposure() msg = f""" Start a job: {args.type}, debug set to {args.debug} Will anonymize data at: {in_path} and save to {out_path} """ logger.info(msg) # anonymize if args.type == "batch": anonymize_root_folder( in_path, out_path, debug=debug, extra_anonymization_rules=extra_rules ) elif args.type == "folder": anonymize_dicom_folder( in_path, out_path, debug=debug, extra_anonymization_rules=extra_rules ) logger.info("Well done!") if __name__ == "__main__": main()
anonymize_dicom_folder
Anonymize dicom files in `in_path`, if `in_path` doesn't contain dicom files, will do nothing. Debug == True will do sort of dry run to check if all good for the large data storages Args: in_path (Path_Str): path to the folder containing dicom files out_path (Path_Str): path to the folder there anonymized copies will be saved debuf (bool): if true, will do a "dry" run
""" This module is intended to extend functionality of the code provided by original authors. The process is as follows: 1. User has to provide source root path containing (possibly nested) folders with dicom files 2. The program will recreate the structure in the destination root path and anonymize all dicom files. """ import argparse import json import logging import logging.config import random from pathlib import Path from typing import Optional import pydicom from dicomanonymizer.anonym_state import AnonState from dicomanonymizer.dicom_utils import fix_exposure from dicomanonymizer.simpledicomanonymizer import ( anonymize_dicom_file, initialize_actions, ) from dicomanonymizer.utils import ( LOGS_PATH, PROJ_ROOT, ActionsDict, Path_Str, get_dirs, to_Path, try_valid_dir, ) # setup logging (create dirs, if it is first time) LOGS_PATH.mkdir(parents=True, exist_ok=True) logging.config.fileConfig( PROJ_ROOT / "dicomanonymizer/config/logging.ini", defaults={"logfilename": (LOGS_PATH / "file.log").as_posix()}, disable_existing_loggers=False, ) logger = logging.getLogger(__name__) _STATE_PATH = Path.home() / ".dicomanonymizer/cache" _STATE_PATH.mkdir(parents=True, exist_ok=True) def get_extra_rules( use_extra: bool, extra_json_path: Path_Str, ) -> Optional[ActionsDict]: """Helper to provide custom (project level/user level) anonymization rules as a mapping of tags -> action function. Args: use_extra (bool): If use extra rules. extra_json_path (Path_Str): Path to extra rules json file. It should be flat json with action as a key and list of tags as value. Returns: Optional[ActionsDict]: extra rules mapping (tags -> action function) """ # Define the actions dict for additional tags (customization) extra_rules = None if use_extra: # default or user provided path to extra rules json file with open(extra_json_path, "r") as fout: extra_rules = json.load(fout) for key in extra_rules: tag_list = extra_rules[key] tag_list = [tuple(elem) for elem in tag_list] extra_rules[key] = tag_list extra_rules = initialize_actions(extra_rules) return extra_rules # MASKED: anonymize_dicom_folder function (lines 76-122) def anonymize_root_folder( in_root: Path_Str, out_root: Path_Str, **kwargs, ): """The fuction will get all nested folders from `in_root` and perform anonymization of all folders containg dicom-files Will recreate the `in_root` folders structure in the `out_root` Args: in_root (Path_Str): source root folder (presumably has some dicom-files inide, maybe nested) out_root (Path_Str): destination root folder, will create if not exists """ in_root = to_Path(in_root) try_valid_dir(in_root) out_root = to_Path(out_root) out_root.mkdir(parents=True, exist_ok=True) in_dirs = get_dirs(in_root) state = AnonState(_STATE_PATH) state.init_state() state.load_state() def get_tags_callback(dataset: pydicom.Dataset): state.tag_counter.update(dataset.dir()) logger.info( "Processed paths will be added to the cache, if cache exist and has some paths included, they will be skipped" ) logger.info( f"if, you need to process data again delete files {_STATE_PATH}, please" ) # will try to process all folders, if exception will dump state before raising try: for in_d in in_dirs: rel_path = in_d.relative_to(in_root) if str(rel_path) in state.visited_folders: logger.info(f"{in_d} path is in cache, skipping") continue else: out_d = out_root / rel_path anonymize_dicom_folder( in_d, out_d, ds_callback=get_tags_callback, **kwargs ) # update state state.visited_folders[str(rel_path)] = True except Exception as e: raise e finally: # before saving updated state let's flag tags not seen previously prev_state = AnonState(_STATE_PATH) prev_state.init_state() prev_state.load_state() new_tags = set(state.tag_counter.keys()).difference( prev_state.tag_counter.keys() ) if new_tags: logger.warning( f"During the anonymization new tags: {new_tags} were present" ) else: logger.info("No new tags werer present") # now we can save the current state state.save_state() # Add CLI args parser = argparse.ArgumentParser(description="Batch dicom-anonymization CLI") parser.add_argument( "--type", type=str, choices=["batch", "folder"], default="batch", help="Process only one folder - folder or all nested folders - batch, default = batch", ) parser.add_argument( "--extra-rules", default="", help="Path to json file defining extra rules for additional tags. Defalult in project.", ) parser.add_argument( "--no-extra", action="store_true", help="Only use a rules from DICOM-standard basic de-id profile", ) parser.add_argument( "--debug", action="store_true", help="Will do a dry run (one file per folder)" ) parser.add_argument( "src", type=str, help="Absolute path to the folder containing dicom-files or nested folders with dicom-files", ) parser.add_argument( "dst", type=str, help="Absolute path to the folder where to save anonymized copy of src", ) def main(): # parse args args = parser.parse_args() in_path = Path(args.src) out_path = Path(args.dst) debug = args.debug path = args.extra_rules if not path: path = PROJ_ROOT / "dicomanonymizer/resources/extra_rules.json" extra_rules = get_extra_rules(use_extra=not args.no_extra, extra_json_path=path) # fix known issue with dicom fix_exposure() msg = f""" Start a job: {args.type}, debug set to {args.debug} Will anonymize data at: {in_path} and save to {out_path} """ logger.info(msg) # anonymize if args.type == "batch": anonymize_root_folder( in_path, out_path, debug=debug, extra_anonymization_rules=extra_rules ) elif args.type == "folder": anonymize_dicom_folder( in_path, out_path, debug=debug, extra_anonymization_rules=extra_rules ) logger.info("Well done!") if __name__ == "__main__": main()
def anonymize_dicom_folder( in_path: Path_Str, out_path: Path_Str, debug: bool = False, **kwargs ): """Anonymize dicom files in `in_path`, if `in_path` doesn't contain dicom files, will do nothing. Debug == True will do sort of dry run to check if all good for the large data storages Args: in_path (Path_Str): path to the folder containing dicom files out_path (Path_Str): path to the folder there anonymized copies will be saved debuf (bool): if true, will do a "dry" run """ # check and prepare in_path = to_Path(in_path) try_valid_dir(in_path) out_path = to_Path(out_path) out_path.mkdir(parents=True, exist_ok=True) logger.info(f"Processing: {in_path}") # work itself in_files = [p for p in in_path.iterdir() if p.is_file()] if not in_files: logger.info(f"Folder {in_path} doesn't have dicom files, skip.") return if debug: # anonymize just one file f_in = random.choice(in_files) f_out = out_path / f_in.name try: anonymize_dicom_file(f_in, f_out) except Exception as e: logger.info(f_in) logger.exception(e) raise e else: for f_in in in_files: f_out = out_path / f_in.name try: anonymize_dicom_file(f_in, f_out, **kwargs) except Exception as e: logger.info(f_in) logger.exception(e) raise e
76
122
""" This module is intended to extend functionality of the code provided by original authors. The process is as follows: 1. User has to provide source root path containing (possibly nested) folders with dicom files 2. The program will recreate the structure in the destination root path and anonymize all dicom files. """ import argparse import json import logging import logging.config import random from pathlib import Path from typing import Optional import pydicom from dicomanonymizer.anonym_state import AnonState from dicomanonymizer.dicom_utils import fix_exposure from dicomanonymizer.simpledicomanonymizer import ( anonymize_dicom_file, initialize_actions, ) from dicomanonymizer.utils import ( LOGS_PATH, PROJ_ROOT, ActionsDict, Path_Str, get_dirs, to_Path, try_valid_dir, ) # setup logging (create dirs, if it is first time) LOGS_PATH.mkdir(parents=True, exist_ok=True) logging.config.fileConfig( PROJ_ROOT / "dicomanonymizer/config/logging.ini", defaults={"logfilename": (LOGS_PATH / "file.log").as_posix()}, disable_existing_loggers=False, ) logger = logging.getLogger(__name__) _STATE_PATH = Path.home() / ".dicomanonymizer/cache" _STATE_PATH.mkdir(parents=True, exist_ok=True) def get_extra_rules( use_extra: bool, extra_json_path: Path_Str, ) -> Optional[ActionsDict]: """Helper to provide custom (project level/user level) anonymization rules as a mapping of tags -> action function. Args: use_extra (bool): If use extra rules. extra_json_path (Path_Str): Path to extra rules json file. It should be flat json with action as a key and list of tags as value. Returns: Optional[ActionsDict]: extra rules mapping (tags -> action function) """ # Define the actions dict for additional tags (customization) extra_rules = None if use_extra: # default or user provided path to extra rules json file with open(extra_json_path, "r") as fout: extra_rules = json.load(fout) for key in extra_rules: tag_list = extra_rules[key] tag_list = [tuple(elem) for elem in tag_list] extra_rules[key] = tag_list extra_rules = initialize_actions(extra_rules) return extra_rules def anonymize_dicom_folder( in_path: Path_Str, out_path: Path_Str, debug: bool = False, **kwargs ): """Anonymize dicom files in `in_path`, if `in_path` doesn't contain dicom files, will do nothing. Debug == True will do sort of dry run to check if all good for the large data storages Args: in_path (Path_Str): path to the folder containing dicom files out_path (Path_Str): path to the folder there anonymized copies will be saved debuf (bool): if true, will do a "dry" run """ # check and prepare in_path = to_Path(in_path) try_valid_dir(in_path) out_path = to_Path(out_path) out_path.mkdir(parents=True, exist_ok=True) logger.info(f"Processing: {in_path}") # work itself in_files = [p for p in in_path.iterdir() if p.is_file()] if not in_files: logger.info(f"Folder {in_path} doesn't have dicom files, skip.") return if debug: # anonymize just one file f_in = random.choice(in_files) f_out = out_path / f_in.name try: anonymize_dicom_file(f_in, f_out) except Exception as e: logger.info(f_in) logger.exception(e) raise e else: for f_in in in_files: f_out = out_path / f_in.name try: anonymize_dicom_file(f_in, f_out, **kwargs) except Exception as e: logger.info(f_in) logger.exception(e) raise e def anonymize_root_folder( in_root: Path_Str, out_root: Path_Str, **kwargs, ): """The fuction will get all nested folders from `in_root` and perform anonymization of all folders containg dicom-files Will recreate the `in_root` folders structure in the `out_root` Args: in_root (Path_Str): source root folder (presumably has some dicom-files inide, maybe nested) out_root (Path_Str): destination root folder, will create if not exists """ in_root = to_Path(in_root) try_valid_dir(in_root) out_root = to_Path(out_root) out_root.mkdir(parents=True, exist_ok=True) in_dirs = get_dirs(in_root) state = AnonState(_STATE_PATH) state.init_state() state.load_state() def get_tags_callback(dataset: pydicom.Dataset): state.tag_counter.update(dataset.dir()) logger.info( "Processed paths will be added to the cache, if cache exist and has some paths included, they will be skipped" ) logger.info( f"if, you need to process data again delete files {_STATE_PATH}, please" ) # will try to process all folders, if exception will dump state before raising try: for in_d in in_dirs: rel_path = in_d.relative_to(in_root) if str(rel_path) in state.visited_folders: logger.info(f"{in_d} path is in cache, skipping") continue else: out_d = out_root / rel_path anonymize_dicom_folder( in_d, out_d, ds_callback=get_tags_callback, **kwargs ) # update state state.visited_folders[str(rel_path)] = True except Exception as e: raise e finally: # before saving updated state let's flag tags not seen previously prev_state = AnonState(_STATE_PATH) prev_state.init_state() prev_state.load_state() new_tags = set(state.tag_counter.keys()).difference( prev_state.tag_counter.keys() ) if new_tags: logger.warning( f"During the anonymization new tags: {new_tags} were present" ) else: logger.info("No new tags werer present") # now we can save the current state state.save_state() # Add CLI args parser = argparse.ArgumentParser(description="Batch dicom-anonymization CLI") parser.add_argument( "--type", type=str, choices=["batch", "folder"], default="batch", help="Process only one folder - folder or all nested folders - batch, default = batch", ) parser.add_argument( "--extra-rules", default="", help="Path to json file defining extra rules for additional tags. Defalult in project.", ) parser.add_argument( "--no-extra", action="store_true", help="Only use a rules from DICOM-standard basic de-id profile", ) parser.add_argument( "--debug", action="store_true", help="Will do a dry run (one file per folder)" ) parser.add_argument( "src", type=str, help="Absolute path to the folder containing dicom-files or nested folders with dicom-files", ) parser.add_argument( "dst", type=str, help="Absolute path to the folder where to save anonymized copy of src", ) def main(): # parse args args = parser.parse_args() in_path = Path(args.src) out_path = Path(args.dst) debug = args.debug path = args.extra_rules if not path: path = PROJ_ROOT / "dicomanonymizer/resources/extra_rules.json" extra_rules = get_extra_rules(use_extra=not args.no_extra, extra_json_path=path) # fix known issue with dicom fix_exposure() msg = f""" Start a job: {args.type}, debug set to {args.debug} Will anonymize data at: {in_path} and save to {out_path} """ logger.info(msg) # anonymize if args.type == "batch": anonymize_root_folder( in_path, out_path, debug=debug, extra_anonymization_rules=extra_rules ) elif args.type == "folder": anonymize_dicom_folder( in_path, out_path, debug=debug, extra_anonymization_rules=extra_rules ) logger.info("Well done!") if __name__ == "__main__": main()
_cmdf_in
`{cmd}` - Adds you to the game. This command also allows moderators to add other users and arbitrary strings as participants. **Example:** `{cmd} an elephant` - Adds "an elephant" as a participant.
import asyncio import random import re import textwrap import discord from .. import utils, errors, cmd from ..servermodule import ServerModule, registered from ..enums import PrivilegeLevel @registered class TruthGame(ServerModule): MODULE_NAME = "Truth Game" MODULE_SHORT_DESCRIPTION = "Tools to play *Truth*." RECOMMENDED_CMD_NAMES = ["truth", "troof", "trufe"] _SECRET_TOKEN = utils.SecretToken() _cmdd = {} _HELP_SUMMARY = """ `{modhelp}` - Truth game. """ DEFAULT_SETTINGS = { "enabled channels": [] } _PARTICIPANT_DELIMITER = " --> " _RULES_STRING = textwrap.dedent(""" **Rules for a game of _Truth_**: idk, ask the people playing it. """).strip() async def _initialize(self, resources): self._client = resources.client self._res = resources self._enabled_channels = None self._load_settings() self._res.suppress_autokill(True) return def _load_settings(self): settings = self._res.get_settings(default=self.DEFAULT_SETTINGS) self._enabled_channels = [] try: self._enabled_channels = settings["enabled channels"] if self._enabled_channels is None: print("DEBUGGING: truthgame.py TruthGame._load_settings() enabled channels is None!") self._enabled_channels = [] except KeyError: self._enabled_channels = settings["enabled channels"] = [] self._res.save_settings(settings) return def _save_settings(self): settings = self._res.get_settings() settings["enabled channels"] = self._enabled_channels self._res.save_settings(settings) return @cmd.add(_cmdd, "rules") async def _cmdf_enable(self, substr, msg, privilege_level): """`{cmd}` - View game rules.""" await self._client.send_msg(msg, self._RULES_STRING) return @cmd.add(_cmdd, "newgame", top=True) @cmd.minimum_privilege(PrivilegeLevel.TRUSTED) async def _cmdf_newgame(self, substr, msg, privilege_level): """`{cmd}` - New game.""" channel = msg.channel await self._abort_if_not_truth_channel(channel) await self._new_game(channel) await self._client.send_msg(channel, "Truth game cleared.") return # MASKED: _cmdf_in function (lines 84-107) @cmd.add(_cmdd, "out", top=True) async def _cmdf_out(self, substr, msg, privilege_level): """ `{cmd}` - Removes you from the game. This command also allows moderators to remove other users and arbitrary strings. **Example:** `{cmd} an elephant` - Removes "an elephant" as a participant. """ channel = msg.channel await self._abort_if_not_truth_channel(channel) participant = None if (privilege_level < PrivilegeLevel.MODERATOR) or (len(substr) == 0): participant = "<@" + msg.author.id + ">" else: participant = substr if participant in self._get_participants(channel): await self._remove_participant(channel, participant) await self._client.send_msg(channel, "Removed {} from the game.".format(participant)) else: await self._client.send_msg(channel, "Error: {} is not already a participant.".format(participant)) return @cmd.add(_cmdd, "enablechannel") @cmd.minimum_privilege(PrivilegeLevel.ADMIN) async def _cmdf_enable(self, substr, msg, privilege_level): """`{cmd}` - Enable Truth in this channel.""" channel = msg.channel if channel.id in self._enabled_channels: await self._client.send_msg(channel, "This channel is already a Truth game channel.") else: self._enabled_channels.append(channel.id) self._save_settings() await self._client.send_msg(channel, "This channel is now a Truth game channel.") return @cmd.add(_cmdd, "disablechannel") @cmd.minimum_privilege(PrivilegeLevel.ADMIN) async def _cmdf_disable(self, substr, msg, privilege_level): """`{cmd}` - Disable Truth in this channel.""" channel = msg.channel if channel.id in self._enabled_channels: self._enabled_channels.remove(channel.id) self._save_settings() await self._client.send_msg(channel, "This channel is no longer a Truth game channel.") else: await self._client.send_msg(channel, "This channel is not a Truth game channel.") return @cmd.add(_cmdd, "viewenabled") async def _cmdf_viewenabled(self, substr, msg, privilege_level): """`{cmd}` - View all channels that are enabled as Truth channels.""" buf = None if len(self._enabled_channels) == 0: buf = "No channels have Truth game enabled." else: buf = "**Truth game enabled channels:**" for channel_id in self._enabled_channels: buf += "\n<#{0}> (ID: {0})".format(channel_id) await self._client.send_msg(msg, buf) return # TODO: Edit this to use the topic string abstraction methods. # Currently, it only consideres user mentions to be participants! @cmd.add(_cmdd, "choose", "random", "rand") async def _cmdf_choosetruth(self, substr, msg, privilege_level): """`{cmd}` - Pick a random participant other than yourself.""" topic = msg.channel.topic if topic is None: await self._client.send_msg(msg, "There doesn't appear to be a truth game in here.") raise errors.OperationAborted mentions = utils.get_all_mentions(topic) if len(mentions) == 0: await self._client.send_msg(msg, "There doesn't appear to be a truth game in here.") raise errors.OperationAborted try: mentions.remove(msg.author.id) if len(mentions) == 0: await self._client.send_msg(msg, "<@{}>".format(msg.author.id)) raise errors.OperationAborted except ValueError: pass choice = random.choice(mentions) buf = "<@{}>\n".format(choice) buf += "My choices were: " for mention in mentions: user = self._client.search_for_user(mention, enablenamesearch=False, serverrestriction=self._res.server) if user is None: buf += "<@{}>, ".format(mention) else: buf += "{}, ".format(user.name) buf = buf[:-2] await self._client.send_msg(msg, buf) return ################################ ### TOPIC STRING ABSTRACTION ### ################################ def _get_participants(self, channel): topic = channel.topic if topic is None: return [] return topic.split(self._PARTICIPANT_DELIMITER) # PRECONDITION: participant_str contains printable characters. # PRECONDITION: participant_str does not contain the delimiter. async def _add_participant(self, channel, participant_str): topic = channel.topic new_topic = None if topic == "": new_topic = participant_str else: new_topic = topic + self._PARTICIPANT_DELIMITER + participant_str await self._client.edit_channel(channel, topic=new_topic) return # PRECONDITION: participant_str in self._get_participants(channel) async def _remove_participant(self, channel, participant_str): participants_list = self._get_participants(channel) participants_list.remove(participant_str) new_topic = self._PARTICIPANT_DELIMITER.join(participants_list) await self._client.edit_channel(channel, topic=new_topic) return async def _new_game(self, channel): await self._client.edit_channel(channel, topic="") return ######################## ### GENERAL SERVICES ### ######################## async def _abort_if_not_truth_channel(self, channel): if not channel.id in self._enabled_channels: await self._client.send_msg(channel, "Error: Truth isn't enabled on this channel.") raise errors.OperationAborted return
@cmd.add(_cmdd, "in", top=True) async def _cmdf_in(self, substr, msg, privilege_level): """ `{cmd}` - Adds you to the game. This command also allows moderators to add other users and arbitrary strings as participants. **Example:** `{cmd} an elephant` - Adds "an elephant" as a participant. """ channel = msg.channel await self._abort_if_not_truth_channel(channel) new_participant = None if (privilege_level < PrivilegeLevel.MODERATOR) or (len(substr) == 0): new_participant = "<@" + msg.author.id + ">" else: new_participant = substr if self._PARTICIPANT_DELIMITER in new_participant: await self._client.send_msg(channel, "Error: Not allowed to use the delimiter characters.") raise errors.OperationAborted if new_participant in self._get_participants(channel): await self._client.send_msg(channel, "Error: {} is already a participant.".format(new_participant)) else: await self._add_participant(channel, new_participant) await self._client.send_msg(channel, "Added {} to the game.".format(new_participant)) return
84
107
import asyncio import random import re import textwrap import discord from .. import utils, errors, cmd from ..servermodule import ServerModule, registered from ..enums import PrivilegeLevel @registered class TruthGame(ServerModule): MODULE_NAME = "Truth Game" MODULE_SHORT_DESCRIPTION = "Tools to play *Truth*." RECOMMENDED_CMD_NAMES = ["truth", "troof", "trufe"] _SECRET_TOKEN = utils.SecretToken() _cmdd = {} _HELP_SUMMARY = """ `{modhelp}` - Truth game. """ DEFAULT_SETTINGS = { "enabled channels": [] } _PARTICIPANT_DELIMITER = " --> " _RULES_STRING = textwrap.dedent(""" **Rules for a game of _Truth_**: idk, ask the people playing it. """).strip() async def _initialize(self, resources): self._client = resources.client self._res = resources self._enabled_channels = None self._load_settings() self._res.suppress_autokill(True) return def _load_settings(self): settings = self._res.get_settings(default=self.DEFAULT_SETTINGS) self._enabled_channels = [] try: self._enabled_channels = settings["enabled channels"] if self._enabled_channels is None: print("DEBUGGING: truthgame.py TruthGame._load_settings() enabled channels is None!") self._enabled_channels = [] except KeyError: self._enabled_channels = settings["enabled channels"] = [] self._res.save_settings(settings) return def _save_settings(self): settings = self._res.get_settings() settings["enabled channels"] = self._enabled_channels self._res.save_settings(settings) return @cmd.add(_cmdd, "rules") async def _cmdf_enable(self, substr, msg, privilege_level): """`{cmd}` - View game rules.""" await self._client.send_msg(msg, self._RULES_STRING) return @cmd.add(_cmdd, "newgame", top=True) @cmd.minimum_privilege(PrivilegeLevel.TRUSTED) async def _cmdf_newgame(self, substr, msg, privilege_level): """`{cmd}` - New game.""" channel = msg.channel await self._abort_if_not_truth_channel(channel) await self._new_game(channel) await self._client.send_msg(channel, "Truth game cleared.") return @cmd.add(_cmdd, "in", top=True) async def _cmdf_in(self, substr, msg, privilege_level): """ `{cmd}` - Adds you to the game. This command also allows moderators to add other users and arbitrary strings as participants. **Example:** `{cmd} an elephant` - Adds "an elephant" as a participant. """ channel = msg.channel await self._abort_if_not_truth_channel(channel) new_participant = None if (privilege_level < PrivilegeLevel.MODERATOR) or (len(substr) == 0): new_participant = "<@" + msg.author.id + ">" else: new_participant = substr if self._PARTICIPANT_DELIMITER in new_participant: await self._client.send_msg(channel, "Error: Not allowed to use the delimiter characters.") raise errors.OperationAborted if new_participant in self._get_participants(channel): await self._client.send_msg(channel, "Error: {} is already a participant.".format(new_participant)) else: await self._add_participant(channel, new_participant) await self._client.send_msg(channel, "Added {} to the game.".format(new_participant)) return @cmd.add(_cmdd, "out", top=True) async def _cmdf_out(self, substr, msg, privilege_level): """ `{cmd}` - Removes you from the game. This command also allows moderators to remove other users and arbitrary strings. **Example:** `{cmd} an elephant` - Removes "an elephant" as a participant. """ channel = msg.channel await self._abort_if_not_truth_channel(channel) participant = None if (privilege_level < PrivilegeLevel.MODERATOR) or (len(substr) == 0): participant = "<@" + msg.author.id + ">" else: participant = substr if participant in self._get_participants(channel): await self._remove_participant(channel, participant) await self._client.send_msg(channel, "Removed {} from the game.".format(participant)) else: await self._client.send_msg(channel, "Error: {} is not already a participant.".format(participant)) return @cmd.add(_cmdd, "enablechannel") @cmd.minimum_privilege(PrivilegeLevel.ADMIN) async def _cmdf_enable(self, substr, msg, privilege_level): """`{cmd}` - Enable Truth in this channel.""" channel = msg.channel if channel.id in self._enabled_channels: await self._client.send_msg(channel, "This channel is already a Truth game channel.") else: self._enabled_channels.append(channel.id) self._save_settings() await self._client.send_msg(channel, "This channel is now a Truth game channel.") return @cmd.add(_cmdd, "disablechannel") @cmd.minimum_privilege(PrivilegeLevel.ADMIN) async def _cmdf_disable(self, substr, msg, privilege_level): """`{cmd}` - Disable Truth in this channel.""" channel = msg.channel if channel.id in self._enabled_channels: self._enabled_channels.remove(channel.id) self._save_settings() await self._client.send_msg(channel, "This channel is no longer a Truth game channel.") else: await self._client.send_msg(channel, "This channel is not a Truth game channel.") return @cmd.add(_cmdd, "viewenabled") async def _cmdf_viewenabled(self, substr, msg, privilege_level): """`{cmd}` - View all channels that are enabled as Truth channels.""" buf = None if len(self._enabled_channels) == 0: buf = "No channels have Truth game enabled." else: buf = "**Truth game enabled channels:**" for channel_id in self._enabled_channels: buf += "\n<#{0}> (ID: {0})".format(channel_id) await self._client.send_msg(msg, buf) return # TODO: Edit this to use the topic string abstraction methods. # Currently, it only consideres user mentions to be participants! @cmd.add(_cmdd, "choose", "random", "rand") async def _cmdf_choosetruth(self, substr, msg, privilege_level): """`{cmd}` - Pick a random participant other than yourself.""" topic = msg.channel.topic if topic is None: await self._client.send_msg(msg, "There doesn't appear to be a truth game in here.") raise errors.OperationAborted mentions = utils.get_all_mentions(topic) if len(mentions) == 0: await self._client.send_msg(msg, "There doesn't appear to be a truth game in here.") raise errors.OperationAborted try: mentions.remove(msg.author.id) if len(mentions) == 0: await self._client.send_msg(msg, "<@{}>".format(msg.author.id)) raise errors.OperationAborted except ValueError: pass choice = random.choice(mentions) buf = "<@{}>\n".format(choice) buf += "My choices were: " for mention in mentions: user = self._client.search_for_user(mention, enablenamesearch=False, serverrestriction=self._res.server) if user is None: buf += "<@{}>, ".format(mention) else: buf += "{}, ".format(user.name) buf = buf[:-2] await self._client.send_msg(msg, buf) return ################################ ### TOPIC STRING ABSTRACTION ### ################################ def _get_participants(self, channel): topic = channel.topic if topic is None: return [] return topic.split(self._PARTICIPANT_DELIMITER) # PRECONDITION: participant_str contains printable characters. # PRECONDITION: participant_str does not contain the delimiter. async def _add_participant(self, channel, participant_str): topic = channel.topic new_topic = None if topic == "": new_topic = participant_str else: new_topic = topic + self._PARTICIPANT_DELIMITER + participant_str await self._client.edit_channel(channel, topic=new_topic) return # PRECONDITION: participant_str in self._get_participants(channel) async def _remove_participant(self, channel, participant_str): participants_list = self._get_participants(channel) participants_list.remove(participant_str) new_topic = self._PARTICIPANT_DELIMITER.join(participants_list) await self._client.edit_channel(channel, topic=new_topic) return async def _new_game(self, channel): await self._client.edit_channel(channel, topic="") return ######################## ### GENERAL SERVICES ### ######################## async def _abort_if_not_truth_channel(self, channel): if not channel.id in self._enabled_channels: await self._client.send_msg(channel, "Error: Truth isn't enabled on this channel.") raise errors.OperationAborted return
resize_axis
Truncates or pads a tensor to new_size on on a given axis. Truncate or extend tensor such that tensor.shape[axis] == new_size. If the size increases, the padding will be performed at the end, using fill_value. Args: tensor: The tensor to be resized. axis: An integer representing the dimension to be sliced. new_size: An integer or 0d tensor representing the new value for tensor.shape[axis]. fill_value: Value to use to fill any new entries in the tensor. Will be cast to the type of tensor. Returns: The resized tensor.
import io import os import random import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from PIL import Image # MASKED: resize_axis function (lines 12-43) class CircleLoss(torch.nn.Module): def __init__(self, m=0.25, gamma=256): super(CircleLoss, self).__init__() self.m = m self.gamma = gamma self.loss = torch.nn.CrossEntropyLoss() def forward(self, logits, labels): alpha = torch.clamp_min(logits + self.m, min=0).detach() # an alpha[labels] = torch.clamp_min(-logits[labels] + 1 + self.m, min=0).detach() # ap delta = torch.ones_like(logits, device=logits.device, dtype=logits.dtype) * self.m # delta_n delta[labels] = 1 - self.m # delta_p return self.loss(alpha * (logits - delta) * self.gamma, labels)
def resize_axis(tensor, axis, new_size, fill_value=0, random_sampling=False): """Truncates or pads a tensor to new_size on on a given axis. Truncate or extend tensor such that tensor.shape[axis] == new_size. If the size increases, the padding will be performed at the end, using fill_value. Args: tensor: The tensor to be resized. axis: An integer representing the dimension to be sliced. new_size: An integer or 0d tensor representing the new value for tensor.shape[axis]. fill_value: Value to use to fill any new entries in the tensor. Will be cast to the type of tensor. Returns: The resized tensor. """ tensor = torch.Tensor(tensor) shape = list(tensor.shape) pad_shape = shape[:] pad_shape[axis] = max(0, new_size - shape[axis]) start = 0 if shape[axis] <= new_size else np.random.randint( shape[axis] - new_size) # random clip old_length = shape[axis] shape[axis] = min(shape[axis], new_size) resized = torch.cat([ torch.index_select(tensor, dim=axis, index=torch.randint(old_length, (new_size,)) ) if start > 0 and random_sampling else torch.narrow(tensor, dim=axis, start=start, length=shape[axis]), torch.Tensor(*pad_shape).fill_(fill_value) ], dim=axis) return resized
12
43
import io import os import random import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from PIL import Image def resize_axis(tensor, axis, new_size, fill_value=0, random_sampling=False): """Truncates or pads a tensor to new_size on on a given axis. Truncate or extend tensor such that tensor.shape[axis] == new_size. If the size increases, the padding will be performed at the end, using fill_value. Args: tensor: The tensor to be resized. axis: An integer representing the dimension to be sliced. new_size: An integer or 0d tensor representing the new value for tensor.shape[axis]. fill_value: Value to use to fill any new entries in the tensor. Will be cast to the type of tensor. Returns: The resized tensor. """ tensor = torch.Tensor(tensor) shape = list(tensor.shape) pad_shape = shape[:] pad_shape[axis] = max(0, new_size - shape[axis]) start = 0 if shape[axis] <= new_size else np.random.randint( shape[axis] - new_size) # random clip old_length = shape[axis] shape[axis] = min(shape[axis], new_size) resized = torch.cat([ torch.index_select(tensor, dim=axis, index=torch.randint(old_length, (new_size,)) ) if start > 0 and random_sampling else torch.narrow(tensor, dim=axis, start=start, length=shape[axis]), torch.Tensor(*pad_shape).fill_(fill_value) ], dim=axis) return resized class CircleLoss(torch.nn.Module): def __init__(self, m=0.25, gamma=256): super(CircleLoss, self).__init__() self.m = m self.gamma = gamma self.loss = torch.nn.CrossEntropyLoss() def forward(self, logits, labels): alpha = torch.clamp_min(logits + self.m, min=0).detach() # an alpha[labels] = torch.clamp_min(-logits[labels] + 1 + self.m, min=0).detach() # ap delta = torch.ones_like(logits, device=logits.device, dtype=logits.dtype) * self.m # delta_n delta[labels] = 1 - self.m # delta_p return self.loss(alpha * (logits - delta) * self.gamma, labels)
_query_for_init
返回某些类型合约的 query todo: 为了兼容旧版提供给用户的 api._data["quote"].items() 类似用法,应该限制交易所 ["SHFE", "DCE", "CZCE", "INE", "CFFEX", "KQ"]
#!usr/bin/env python3 # -*- coding:utf-8 -*- __author__ = 'yanqiong' import random import secrets from bisect import bisect_right from sgqlc.operation import Operation from pandas.core.internals import BlockManager from tqsdk.ins_schema import ins_schema, _add_all_frags RD = random.Random(secrets.randbits(128)) # 初始化随机数引擎,使用随机数作为seed,防止用户同时拉起多个策略,产生同样的 seed def _generate_uuid(prefix=''): return f"{prefix + '_' if prefix else ''}{RD.getrandbits(128):032x}" def _query_for_quote(symbol): """ 返回请求某个合约的合约信息的 query_pack 调用次函数应该全部都是sdk的代码主动请求合约信息 用户请求合约信息一定是 PYSDK_api 开头的请求,因为用户请求的合约信息在回测时带有 timestamp 参数,是不应该调用此函数的 """ symbol_list = symbol if isinstance(symbol, list) else [symbol] op = Operation(ins_schema.rootQuery) query = op.multi_symbol_info(instrument_id=symbol_list) _add_all_frags(query) return { "aid": "ins_query", "query_id": _generate_uuid(prefix='PYSDK_quote_'), "query": op.__to_graphql__() } # MASKED: _query_for_init function (lines 39-48) night_trading_table = { "DCE.a": ["21:00:00", "23:00:00"], "DCE.b": ["21:00:00", "23:00:00"], "DCE.c": ["21:00:00", "23:00:00"], "DCE.cs": ["21:00:00", "23:00:00"], "DCE.m": ["21:00:00", "23:00:00"], "DCE.y": ["21:00:00", "23:00:00"], "DCE.p": ["21:00:00", "23:00:00"], "DCE.l": ["21:00:00", "23:00:00"], "DCE.v": ["21:00:00", "23:00:00"], "DCE.pp": ["21:00:00", "23:00:00"], "DCE.j": ["21:00:00", "23:00:00"], "DCE.jm": ["21:00:00", "23:00:00"], "DCE.i": ["21:00:00", "23:00:00"], "DCE.eg": ["21:00:00", "23:00:00"], "DCE.eb": ["21:00:00", "23:00:00"], "DCE.rr": ["21:00:00", "23:00:00"], "DCE.pg": ["21:00:00", "23:00:00"], "CZCE.CF": ["21:00:00", "23:00:00"], "CZCE.CY": ["21:00:00", "23:00:00"], "CZCE.SA": ["21:00:00", "23:00:00"], "CZCE.SR": ["21:00:00", "23:00:00"], "CZCE.TA": ["21:00:00", "23:00:00"], "CZCE.OI": ["21:00:00", "23:00:00"], "CZCE.MA": ["21:00:00", "23:00:00"], "CZCE.FG": ["21:00:00", "23:00:00"], "CZCE.RM": ["21:00:00", "23:00:00"], "CZCE.ZC": ["21:00:00", "23:00:00"], "CZCE.TC": ["21:00:00", "23:00:00"], "SHFE.rb": ["21:00:00", "23:00:00"], "SHFE.hc": ["21:00:00", "23:00:00"], "SHFE.fu": ["21:00:00", "23:00:00"], "SHFE.bu": ["21:00:00", "23:00:00"], "SHFE.ru": ["21:00:00", "23:00:00"], "SHFE.sp": ["21:00:00", "23:00:00"], "INE.nr": ["21:00:00", "23:00:00"], "SHFE.cu": ["21:00:00", "25:00:00"], "SHFE.al": ["21:00:00", "25:00:00"], "SHFE.zn": ["21:00:00", "25:00:00"], "SHFE.pb": ["21:00:00", "25:00:00"], "SHFE.ni": ["21:00:00", "25:00:00"], "SHFE.sn": ["21:00:00", "25:00:00"], "SHFE.ss": ["21:00:00", "25:00:00"], "SHFE.au": ["21:00:00", "26:30:00"], "SHFE.ag": ["21:00:00", "26:30:00"], "INE.sc": ["21:00:00", "26:30:00"], } def _quotes_add_night(quotes): """为 quotes 中应该有夜盘但是市价合约文件中没有夜盘的品种,添加夜盘时间""" for symbol in quotes: product_id = quotes[symbol].get("product_id") if quotes[symbol].get("trading_time") and product_id: key = f"{quotes[symbol].get('exchange_id')}.{product_id}" if key in night_trading_table and (not quotes[symbol]["trading_time"].get("night")): quotes[symbol]["trading_time"]["night"] = [night_trading_table[key]] def _bisect_value(a, x, priority="right"): """ 返回 bisect_right() 取得下标对应的值,当插入点距离前后元素距离相等,priority 表示优先返回右边的值还是左边的值 a: 必须是已经排序好(升序排列)的 list bisect_right : Return the index where to insert item x in list a, assuming a is sorted. """ assert priority in ['left', 'right'] insert_index = bisect_right(a, x) if 0 < insert_index < len(a): left_dis = x - a[insert_index - 1] right_dis = a[insert_index] - x if left_dis == right_dis: mid_index = insert_index - 1 if priority == "left" else insert_index elif left_dis < right_dis: mid_index = insert_index - 1 else: mid_index = insert_index else: assert insert_index == 0 or insert_index == len(a) mid_index = 0 if insert_index == 0 else (len(a) - 1) return a[mid_index] class BlockManagerUnconsolidated(BlockManager): """mock BlockManager for unconsolidated, 不会因为自动合并同类型的 blocks 而导致 k 线数据不更新""" def __init__(self, *args, **kwargs): BlockManager.__init__(self, *args, **kwargs) self._is_consolidated = False self._known_consolidated = False def _consolidate_inplace(self): pass
def _query_for_init(): """ 返回某些类型合约的 query todo: 为了兼容旧版提供给用户的 api._data["quote"].items() 类似用法,应该限制交易所 ["SHFE", "DCE", "CZCE", "INE", "CFFEX", "KQ"] """ op = Operation(ins_schema.rootQuery) query = op.multi_symbol_info(class_=["FUTURE", "INDEX", "OPTION", "COMBINE", "CONT"], exchange_id=["SHFE", "DCE", "CZCE", "INE", "CFFEX", "KQ"]) _add_all_frags(query) return op.__to_graphql__()
39
48
#!usr/bin/env python3 # -*- coding:utf-8 -*- __author__ = 'yanqiong' import random import secrets from bisect import bisect_right from sgqlc.operation import Operation from pandas.core.internals import BlockManager from tqsdk.ins_schema import ins_schema, _add_all_frags RD = random.Random(secrets.randbits(128)) # 初始化随机数引擎,使用随机数作为seed,防止用户同时拉起多个策略,产生同样的 seed def _generate_uuid(prefix=''): return f"{prefix + '_' if prefix else ''}{RD.getrandbits(128):032x}" def _query_for_quote(symbol): """ 返回请求某个合约的合约信息的 query_pack 调用次函数应该全部都是sdk的代码主动请求合约信息 用户请求合约信息一定是 PYSDK_api 开头的请求,因为用户请求的合约信息在回测时带有 timestamp 参数,是不应该调用此函数的 """ symbol_list = symbol if isinstance(symbol, list) else [symbol] op = Operation(ins_schema.rootQuery) query = op.multi_symbol_info(instrument_id=symbol_list) _add_all_frags(query) return { "aid": "ins_query", "query_id": _generate_uuid(prefix='PYSDK_quote_'), "query": op.__to_graphql__() } def _query_for_init(): """ 返回某些类型合约的 query todo: 为了兼容旧版提供给用户的 api._data["quote"].items() 类似用法,应该限制交易所 ["SHFE", "DCE", "CZCE", "INE", "CFFEX", "KQ"] """ op = Operation(ins_schema.rootQuery) query = op.multi_symbol_info(class_=["FUTURE", "INDEX", "OPTION", "COMBINE", "CONT"], exchange_id=["SHFE", "DCE", "CZCE", "INE", "CFFEX", "KQ"]) _add_all_frags(query) return op.__to_graphql__() night_trading_table = { "DCE.a": ["21:00:00", "23:00:00"], "DCE.b": ["21:00:00", "23:00:00"], "DCE.c": ["21:00:00", "23:00:00"], "DCE.cs": ["21:00:00", "23:00:00"], "DCE.m": ["21:00:00", "23:00:00"], "DCE.y": ["21:00:00", "23:00:00"], "DCE.p": ["21:00:00", "23:00:00"], "DCE.l": ["21:00:00", "23:00:00"], "DCE.v": ["21:00:00", "23:00:00"], "DCE.pp": ["21:00:00", "23:00:00"], "DCE.j": ["21:00:00", "23:00:00"], "DCE.jm": ["21:00:00", "23:00:00"], "DCE.i": ["21:00:00", "23:00:00"], "DCE.eg": ["21:00:00", "23:00:00"], "DCE.eb": ["21:00:00", "23:00:00"], "DCE.rr": ["21:00:00", "23:00:00"], "DCE.pg": ["21:00:00", "23:00:00"], "CZCE.CF": ["21:00:00", "23:00:00"], "CZCE.CY": ["21:00:00", "23:00:00"], "CZCE.SA": ["21:00:00", "23:00:00"], "CZCE.SR": ["21:00:00", "23:00:00"], "CZCE.TA": ["21:00:00", "23:00:00"], "CZCE.OI": ["21:00:00", "23:00:00"], "CZCE.MA": ["21:00:00", "23:00:00"], "CZCE.FG": ["21:00:00", "23:00:00"], "CZCE.RM": ["21:00:00", "23:00:00"], "CZCE.ZC": ["21:00:00", "23:00:00"], "CZCE.TC": ["21:00:00", "23:00:00"], "SHFE.rb": ["21:00:00", "23:00:00"], "SHFE.hc": ["21:00:00", "23:00:00"], "SHFE.fu": ["21:00:00", "23:00:00"], "SHFE.bu": ["21:00:00", "23:00:00"], "SHFE.ru": ["21:00:00", "23:00:00"], "SHFE.sp": ["21:00:00", "23:00:00"], "INE.nr": ["21:00:00", "23:00:00"], "SHFE.cu": ["21:00:00", "25:00:00"], "SHFE.al": ["21:00:00", "25:00:00"], "SHFE.zn": ["21:00:00", "25:00:00"], "SHFE.pb": ["21:00:00", "25:00:00"], "SHFE.ni": ["21:00:00", "25:00:00"], "SHFE.sn": ["21:00:00", "25:00:00"], "SHFE.ss": ["21:00:00", "25:00:00"], "SHFE.au": ["21:00:00", "26:30:00"], "SHFE.ag": ["21:00:00", "26:30:00"], "INE.sc": ["21:00:00", "26:30:00"], } def _quotes_add_night(quotes): """为 quotes 中应该有夜盘但是市价合约文件中没有夜盘的品种,添加夜盘时间""" for symbol in quotes: product_id = quotes[symbol].get("product_id") if quotes[symbol].get("trading_time") and product_id: key = f"{quotes[symbol].get('exchange_id')}.{product_id}" if key in night_trading_table and (not quotes[symbol]["trading_time"].get("night")): quotes[symbol]["trading_time"]["night"] = [night_trading_table[key]] def _bisect_value(a, x, priority="right"): """ 返回 bisect_right() 取得下标对应的值,当插入点距离前后元素距离相等,priority 表示优先返回右边的值还是左边的值 a: 必须是已经排序好(升序排列)的 list bisect_right : Return the index where to insert item x in list a, assuming a is sorted. """ assert priority in ['left', 'right'] insert_index = bisect_right(a, x) if 0 < insert_index < len(a): left_dis = x - a[insert_index - 1] right_dis = a[insert_index] - x if left_dis == right_dis: mid_index = insert_index - 1 if priority == "left" else insert_index elif left_dis < right_dis: mid_index = insert_index - 1 else: mid_index = insert_index else: assert insert_index == 0 or insert_index == len(a) mid_index = 0 if insert_index == 0 else (len(a) - 1) return a[mid_index] class BlockManagerUnconsolidated(BlockManager): """mock BlockManager for unconsolidated, 不会因为自动合并同类型的 blocks 而导致 k 线数据不更新""" def __init__(self, *args, **kwargs): BlockManager.__init__(self, *args, **kwargs) self._is_consolidated = False self._known_consolidated = False def _consolidate_inplace(self): pass
solve_for_target_return
Solve for the weights of the minimum variance portfolio which has a specific targeted return. Constraints: sum of weights = 1, weights bound by [0, 0.2], portfolio return = target return, Returns the weights and the jacobian used to generate the solution.
''' A collection of functions to perform portfolio analysis. Max Gosselin, 2019 ''' import numpy as np import pandas as pd from scipy import optimize def portfolio_metrics(weights, avg_xs_returns, covariance_matrix): ''' Compute basic portfolio metrics: return, stdv, sharpe ratio ''' portfolio_return = np.sum(weights * avg_xs_returns) portfolio_stdv = np.sqrt(np.dot(weights.T, np.dot(weights, covariance_matrix))) portfolio_sharpe = portfolio_return / portfolio_stdv tickers = covariance_matrix.columns metrics = { 'return': portfolio_return, 'stdv': portfolio_stdv, 'sharpe': portfolio_sharpe, 'weights': weights } metrics.update(dict([(ticker, weight) for ticker, weight in zip(tickers, weights)]).items()) return metrics def simulate_portfolios(iters, xs_stats, covariance_matrix): ''' What we want here is to randomly generate portfolios that will sit inside the efficiency frontier for illustrative purposes ''' # Set up an empty array to store our generated portfolios simulations = [] while iters > 1: weights = np.random.random(len(xs_stats.columns)) weights /= np.sum(weights) simulations.append(portfolio_metrics(weights, xs_stats.loc['Avg'], covariance_matrix)) iters -= 1 return simulations def solve_minvar(xs_avg, covariance_matrix): ''' Solve for the weights of the minimum variance portfolio Constraints: sum of weights = 1, weights bound by [0, 0.2], Returns the weights and the jacobian used to generate the solution. ''' def __minvar(weights, xs_avg, covariance_matrix): ''' Anonymous function to compute stdv ''' return np.sqrt(np.dot(weights.T, np.dot(weights, covariance_matrix))) p_size = len(xs_avg) args = (xs_avg, covariance_matrix) constraints = [{'type': 'eq', 'fun': lambda x: np.sum(x) - 1}] bounds = [(0, 0.2)] * p_size minimized_weights = optimize.minimize(__minvar, np.zeros(p_size), args=args, method='SLSQP', bounds=bounds, constraints=constraints, options={'maxiter':1000}) return minimized_weights def solve_maxsharpe(xs_avg, covariance_matrix): ''' Solve for the weights of the maximum Sharpe ratio portfolio Constraints: sum of weights = 1, weights bound by [0, 0.2], Returns the weights and the jacobian used to generate the solution. ''' def __max_by_min_sharpe(weights, xs_avg, covariance_matrix): ''' Anonymous function to compute sharpe ratio, note that since scipy only minimizes we go negative. ''' pm = portfolio_metrics(weights, xs_avg, covariance_matrix) return -pm['return'] / pm['stdv'] p_size = len(xs_avg) args = (xs_avg, covariance_matrix) constraints = [{'type': 'eq', 'fun': lambda x: np.sum(x) - 1}] bounds = [(0, 0.2)] * p_size minimized_weights = optimize.minimize(__max_by_min_sharpe, ((1/p_size) * np.ones(p_size)), args=args, method='SLSQP', bounds=bounds, constraints=constraints, options={'maxiter':1000}) return minimized_weights # MASKED: solve_for_target_return function (lines 103-135) def generate_efficient_frontier(targets, xs_avg, covariance_matrix): portfolios = [] for target in targets: p_weights = solve_for_target_return(xs_avg, covariance_matrix, target) portfolios.append(portfolio_metrics(p_weights['x'], xs_avg, covariance_matrix)) return portfolios
def solve_for_target_return(xs_avg, covariance_matrix, target): ''' Solve for the weights of the minimum variance portfolio which has a specific targeted return. Constraints: sum of weights = 1, weights bound by [0, 0.2], portfolio return = target return, Returns the weights and the jacobian used to generate the solution. ''' def __minvar(weights, xs_avg, covariance_matrix): ''' Anonymous function to compute stdv ''' return np.sqrt(np.dot(weights.T, np.dot(weights, covariance_matrix))) def __match_target(weights): ''' Anonymous function to check equality with the target return ''' return np.sum(weights * xs_avg) p_size = len(xs_avg) args = (xs_avg, covariance_matrix) constraints = [ {'type': 'eq', 'fun': lambda x: np.sum(x) - 1}, {'type': 'eq', 'fun': lambda x: __match_target(x) - target}, ] bounds = [(0, 0.2)] * p_size minimized_weights = optimize.minimize(__minvar, ((1/p_size) * np.ones(p_size)), args=args, method='SLSQP', bounds=bounds, constraints=constraints, options={'maxiter':1000}) return minimized_weights
103
135
''' A collection of functions to perform portfolio analysis. Max Gosselin, 2019 ''' import numpy as np import pandas as pd from scipy import optimize def portfolio_metrics(weights, avg_xs_returns, covariance_matrix): ''' Compute basic portfolio metrics: return, stdv, sharpe ratio ''' portfolio_return = np.sum(weights * avg_xs_returns) portfolio_stdv = np.sqrt(np.dot(weights.T, np.dot(weights, covariance_matrix))) portfolio_sharpe = portfolio_return / portfolio_stdv tickers = covariance_matrix.columns metrics = { 'return': portfolio_return, 'stdv': portfolio_stdv, 'sharpe': portfolio_sharpe, 'weights': weights } metrics.update(dict([(ticker, weight) for ticker, weight in zip(tickers, weights)]).items()) return metrics def simulate_portfolios(iters, xs_stats, covariance_matrix): ''' What we want here is to randomly generate portfolios that will sit inside the efficiency frontier for illustrative purposes ''' # Set up an empty array to store our generated portfolios simulations = [] while iters > 1: weights = np.random.random(len(xs_stats.columns)) weights /= np.sum(weights) simulations.append(portfolio_metrics(weights, xs_stats.loc['Avg'], covariance_matrix)) iters -= 1 return simulations def solve_minvar(xs_avg, covariance_matrix): ''' Solve for the weights of the minimum variance portfolio Constraints: sum of weights = 1, weights bound by [0, 0.2], Returns the weights and the jacobian used to generate the solution. ''' def __minvar(weights, xs_avg, covariance_matrix): ''' Anonymous function to compute stdv ''' return np.sqrt(np.dot(weights.T, np.dot(weights, covariance_matrix))) p_size = len(xs_avg) args = (xs_avg, covariance_matrix) constraints = [{'type': 'eq', 'fun': lambda x: np.sum(x) - 1}] bounds = [(0, 0.2)] * p_size minimized_weights = optimize.minimize(__minvar, np.zeros(p_size), args=args, method='SLSQP', bounds=bounds, constraints=constraints, options={'maxiter':1000}) return minimized_weights def solve_maxsharpe(xs_avg, covariance_matrix): ''' Solve for the weights of the maximum Sharpe ratio portfolio Constraints: sum of weights = 1, weights bound by [0, 0.2], Returns the weights and the jacobian used to generate the solution. ''' def __max_by_min_sharpe(weights, xs_avg, covariance_matrix): ''' Anonymous function to compute sharpe ratio, note that since scipy only minimizes we go negative. ''' pm = portfolio_metrics(weights, xs_avg, covariance_matrix) return -pm['return'] / pm['stdv'] p_size = len(xs_avg) args = (xs_avg, covariance_matrix) constraints = [{'type': 'eq', 'fun': lambda x: np.sum(x) - 1}] bounds = [(0, 0.2)] * p_size minimized_weights = optimize.minimize(__max_by_min_sharpe, ((1/p_size) * np.ones(p_size)), args=args, method='SLSQP', bounds=bounds, constraints=constraints, options={'maxiter':1000}) return minimized_weights def solve_for_target_return(xs_avg, covariance_matrix, target): ''' Solve for the weights of the minimum variance portfolio which has a specific targeted return. Constraints: sum of weights = 1, weights bound by [0, 0.2], portfolio return = target return, Returns the weights and the jacobian used to generate the solution. ''' def __minvar(weights, xs_avg, covariance_matrix): ''' Anonymous function to compute stdv ''' return np.sqrt(np.dot(weights.T, np.dot(weights, covariance_matrix))) def __match_target(weights): ''' Anonymous function to check equality with the target return ''' return np.sum(weights * xs_avg) p_size = len(xs_avg) args = (xs_avg, covariance_matrix) constraints = [ {'type': 'eq', 'fun': lambda x: np.sum(x) - 1}, {'type': 'eq', 'fun': lambda x: __match_target(x) - target}, ] bounds = [(0, 0.2)] * p_size minimized_weights = optimize.minimize(__minvar, ((1/p_size) * np.ones(p_size)), args=args, method='SLSQP', bounds=bounds, constraints=constraints, options={'maxiter':1000}) return minimized_weights def generate_efficient_frontier(targets, xs_avg, covariance_matrix): portfolios = [] for target in targets: p_weights = solve_for_target_return(xs_avg, covariance_matrix, target) portfolios.append(portfolio_metrics(p_weights['x'], xs_avg, covariance_matrix)) return portfolios
_consistency_check
Required defintions: -- WHITESPACE (Default done automatically) => Assert. -- NEWLINE (Default done automatically) => Assert. Inadmissible 'eat-into'. -- SUPPRESSOR shall not eat into [NEWLINE] -- NEWLINE shall not eat into [WHITESPACE, BADSPACE, SUSPEND, SUPPRESSOR] -- WHITESPACE shall not eat into [SUPPRESSOR, NEWLINE, SUSPEND]. -- BADSPACE shall not eat into [SUPPRESSOR, NEWLINE, SUSPEND]. No common lexemes: -- WHITESPACE and BADSPACE may not have common lexemes. Outrun: -- NEWLINE may not start with SUSPEND and vice versa -- NEWLINE may not start with SUPPRESSOR and vice versa -- SUPPRESSOR may not start with SUSPEND and vice versa -- WHITESPACE shall not outrun BADSPACE, but the contrary is ok. (BADSPACE may outrun WHITESPACE (e.g: lexeme with 'tab' after whitespace')
# Project Quex (http://quex.sourceforge.net); License: MIT; # (C) 2005-2020 Frank-Rene Schaefer; #_______________________________________________________________________________ from quex.input.setup import NotificationDB from quex.input.regular_expression.pattern import Pattern_Prep import quex.input.regular_expression.core as regular_expression from quex.input.code.base import SourceRef, \ SourceRef_DEFAULT, \ SourceRefObject from quex.engine.state_machine.core import DFA import quex.engine.state_machine.construction.sequentialize as sequentialize import quex.engine.state_machine.construction.repeat as repeat import quex.engine.state_machine.algebra.difference as difference import quex.engine.state_machine.algebra.intersection as intersection import quex.engine.state_machine.algorithm.beautifier as beautifier import quex.engine.state_machine.check.swallow as swallow import quex.engine.state_machine.check.outrun as outrun import quex.engine.state_machine.check.identity as identity import quex.engine.state_machine.check.tail as tail from quex.engine.misc.tools import typed from quex.engine.misc.interval_handling import NumberSet from quex.engine.counter import IndentationCount_Pre, \ cc_type_name_db, \ cc_type_db from quex.engine.counter_builder import CountActionMap_Builder import quex.engine.misc.error as error import quex.engine.misc.error_check as error_check from quex.engine.misc.file_in import check, \ check_or_die, \ skip_whitespace, \ read_identifier, \ read_integer from quex.constants import E_CharacterCountType from quex.blackboard import setup as Setup def parse_CountActionMap(fh): return _base_parse(fh, CountActionMapFromParser_Builder(fh)) def parse_IndentationSetup(fh): return _base_parse(fh, IndentationSetup_Builder(fh)) def _base_parse(fh, builder, IndentationSetupF=False): """Parses pattern definitions of the form: [ \t] => grid 4; [:intersection([:alpha:], [\X064-\X066]):] => space 1; In other words the right hand side *must* be a character set. ADAPTS: result to contain parsing information. """ # NOTE: Catching of EOF happens in caller: parse_section(...) # while 1 + 1 == 2: skip_whitespace(fh) if check(fh, ">"): break # A regular expression state machine pattern, identifier, sr = _parse_definition_head(fh, builder.identifier_list) if pattern is None and not builder.keyword_else_f: error.log("Keyword '\\else' cannot be used in indentation setup.", fh) # '_parse_definition_head()' ensures that only identifiers mentioned in # 'result' are accepted. if builder.requires_count(): count = _read_value_specifier(fh, identifier, 1) builder.specify(identifier, pattern, count, sr) else: builder.specify(identifier, pattern, sr) if not check(fh, ";"): error.log("Missing ';' after '%s' specification." % identifier, fh) return builder.finalize() class CharacterSetVsAction_BuilderBase: def __init__(self, IdentifierList, KeywordElseAdmissibleF): self.identifier_list = IdentifierList self.keyword_else_f = KeywordElseAdmissibleF class CountActionMapFromParser_Builder(CharacterSetVsAction_BuilderBase): """Line/column number count specification. ___________________________________________________________________________ The main result of the parsing the the Base's .count_command_map which is an instance of CountActionMap_Builder. ____________________________________________________________________________ """ @typed(sr=SourceRef) def __init__(self, fh): self.sr = SourceRef.from_FileHandle(fh) self.__fh = fh self._ca_map_builder = CountActionMap_Builder() CharacterSetVsAction_BuilderBase.__init__(self, ("columns", "grid", "lines"), KeywordElseAdmissibleF=True) def finalize(self): # Finalize / Produce 'LineColumnCount' object. # ca_map = self._ca_map_builder.finalize( Setup.buffer_encoding.source_set.minimum(), Setup.buffer_encoding.source_set.least_greater_bound(), self.sr) _check_grid_values_integer_multiples(ca_map) check_defined(ca_map, self.sr, E_CharacterCountType.LINE) return ca_map def requires_count(self): return True @typed(sr=SourceRef, Identifier=(str,str)) def specify(self, Identifier, Pattern, Count, sr): if Pattern is None: self._ca_map_builder.define_else(cc_type_db[Identifier], Count, sr) else: trigger_set = _extract_trigger_set(sr, Identifier, Pattern) self._ca_map_builder.add(trigger_set, cc_type_db[Identifier], Count, sr) class IndentationSetup_Builder(CharacterSetVsAction_BuilderBase): """Indentation counter specification. ____________________________________________________________________________ The base's .count_command_map contains information about how to count the space at the beginning of the line. The count until the first non-whitespace is the 'indentation'. +bad: The spec contains information about what characters are not supposed to appear in indentation (bad characters). Depending on the philosophical basis, some might consider 'space' as evil, others consider 'tab' as evil. +newline: A detailed state machine can be defined for 'newline'. This might be '\n|(\r\n)' or more complex things. +suppressor: A newline might be suppressed by '\' for example. For that, it might be specified as 'newline suppressor'. ____________________________________________________________________________ """ @typed(sr=SourceRef) def __init__(self, fh): self.__fh = fh self.sm_whitespace = SourceRefObject("whitespace", None) self.sm_badspace = SourceRefObject("bad", None) self.sm_newline = SourceRefObject("newline", None) self.sm_newline_suppressor = SourceRefObject("suppressor", None) self.sm_suspend_list = [] if fh == -1: self.sr = SourceRef_DEFAULT else: self.sr = SourceRef.from_FileHandle(self.__fh) CharacterSetVsAction_BuilderBase.__init__(self, ("whitespace", "suspend", "newline", "suppressor", "bad"), KeywordElseAdmissibleF=False) def finalize(self): # Finalize / Produce 'IndentationCount' object. # if self.sm_whitespace.get() is None: self.sm_whitespace.set(self.__sm_whitespace_default(), SourceRef_DEFAULT) if self.sm_newline.get() is None: self.sm_newline.set(self.__sm_newline_default(), SourceRef_DEFAULT) # -- consistency self._consistency_check() # Transform 'SourceRefObject' into 'Pattern_Prep' objects # (TODO: Why not use it in the first place?) def get_pattern(SRO): if SRO is None or SRO.get() is None: return None return Pattern_Prep(SRO.get(), PatternString="<indentation %s>" % SRO.name, Sr=SRO.sr) pattern_suspend_list = [ get_pattern(sro) for sro in self.sm_suspend_list ] pattern_suspend_list = [ x for x in pattern_suspend_list if x is not None ] if self.sm_newline_suppressor.set_f(): sm_suppressed_newline = sequentialize.do([self.sm_newline_suppressor.get(), self.sm_newline.get()]) sm_suppressed_newline = beautifier.do(sm_suppressed_newline) pattern_suppressed_newline = Pattern_Prep(sm_suppressed_newline, PatternString="<indentation suppressed newline>", Sr=self.sm_newline_suppressor.sr) else: pattern_suppressed_newline = None return IndentationCount_Pre(self.sr, get_pattern(self.sm_whitespace), get_pattern(self.sm_badspace), get_pattern(self.sm_newline), pattern_suppressed_newline, pattern_suspend_list) def requires_count(self): return False def specify(self, identifier, pattern, sr): sm = pattern.extract_sm() if identifier == "whitespace": self.__specify(self.sm_whitespace, sm, sr) elif identifier == "bad": self.__specify(self.sm_badspace, sm, sr) elif identifier == "newline": self.__specify(self.sm_newline, sm, sr) elif identifier == "suppressor": self.__specify(self.sm_newline_suppressor, sm , sr) elif identifier == "suspend": self.__specify_suspend(sm, sr) else: return False return True @typed(sr=SourceRef) def __specify(self, member_ref, Sm, sr): assert Sm is not None _error_if_defined_before(member_ref, sr) if not Sm.is_DFA_compliant(): Sm = beautifier.do(Sm) member_ref.set(Sm, sr) @typed(sr=SourceRef) def __specify_suspend(self, Sm, sr): for before in self.sm_suspend_list: if not identity.do(before.get(), Sm): continue error.log("'suspend' has been defined before;", sr, DontExitF=True) error.log("at this place.", before.sr) sm_suspend = SourceRefObject("suspend", None) self.__specify(sm_suspend, Sm, sr) self.sm_suspend_list.append(sm_suspend) def __sm_newline_default(self): """Default newline: '(\n)|(\r\n)' """ sm = DFA.from_character_set(NumberSet(ord('\n'))) if Setup.dos_carriage_return_newline_f: sm.add_transition_sequence(sm.init_state_index, [ord('\r'), ord('\n')]) return sm def __sm_whitespace_default(self): """Try to define default whitespace ' ' or '\t' if their positions are not yet occupied in the count_command_map. """ sm_whitespace = DFA.from_character_set(NumberSet.from_integer_list([ord(' '), ord('\t')])) sm_whitespace = beautifier.do(repeat.do(sm_whitespace, 1)) if self.sm_badspace.get() is not None: sm_whitespace = difference.do(sm_whitespace, self.sm_badspace.get()) if sm_whitespace.is_Empty() \ or outrun.do(self.sm_badspace.get(), sm_whitespace): error.log("Cannot define default 'whitespace' in the frame of the given\n" "definition of 'bad'.", self.sm_badspace.sr) return sm_whitespace # MASKED: _consistency_check function (lines 258-342) def _parse_definition_head(fh, IdentifierList): if check(fh, "\\default"): error.log("'\\default' has been replaced by keyword '\\else' since quex 0.64.9!", fh) elif check(fh, "\\else"): pattern = None else: pattern = regular_expression.parse(fh, AllowPreContextF=False, AllowPostContextF=False) skip_whitespace(fh) check_or_die(fh, "=>", " after character set definition.") skip_whitespace(fh) identifier = read_identifier(fh, OnMissingStr="Missing identifier following '=>'.") error.verify_word_in_list(identifier, IdentifierList, "Unrecognized specifier '%s'." % identifier, fh) skip_whitespace(fh) return pattern, identifier, SourceRef.from_FileHandle(fh) def _read_value_specifier(fh, Keyword, Default=None): skip_whitespace(fh) value = read_integer(fh) if value is not None: return value # not a number received, is it an identifier? variable = read_identifier(fh) if variable: return variable elif Default is not None: return Default error.log("Missing integer or variable name after keyword '%s'." % Keyword, fh) __CountActionMap_DEFAULT = None def LineColumnCount_Default(): global __CountActionMap_DEFAULT if __CountActionMap_DEFAULT is None: builder = CountActionMap_Builder() builder.add(NumberSet(ord('\n')), E_CharacterCountType.LINE, 1, SourceRef_DEFAULT) builder.add(NumberSet(ord('\t')), E_CharacterCountType.GRID, 4, SourceRef_DEFAULT) builder.define_else(E_CharacterCountType.COLUMN, 1, SourceRef_DEFAULT) # Define: "\else" __CountActionMap_DEFAULT = builder.finalize( Setup.buffer_encoding.source_set.minimum(), Setup.buffer_encoding.source_set.least_greater_bound(), # Apply: "\else" SourceRef_DEFAULT) return __CountActionMap_DEFAULT def _error_if_defined_before(Before, sr): if not Before.set_f(): return error.log("'%s' has been defined before;" % Before.name, sr, DontExitF=True) error.log("at this place.", Before.sr) def _extract_trigger_set(sr, Keyword, Pattern): if Pattern is None: return None elif isinstance(Pattern, NumberSet): return Pattern def check_can_be_matched_by_single_character(SM): bad_f = False init_state = SM.get_init_state() if SM.get_init_state().is_acceptance(): bad_f = True elif len(SM.states) != 2: bad_f = True # Init state MUST transit to second state. Second state MUST not have any transitions elif len(init_state.target_map.get_target_state_index_list()) != 1: bad_f = True else: tmp = set(SM.states.keys()) tmp.remove(SM.init_state_index) other_state_index = next(iter(tmp)) if len(SM.states[other_state_index].target_map.get_target_state_index_list()) != 0: bad_f = True if bad_f: error.log("For '%s' only patterns are addmissible which\n" % Keyword + \ "can be matched by a single character, e.g. \" \" or [a-z].", sr) sm = Pattern.extract_sm() check_can_be_matched_by_single_character(sm) transition_map = sm.get_init_state().target_map.get_map() assert len(transition_map) == 1 return list(transition_map.values())[0] def _check_grid_values_integer_multiples(CaMap): """If there are no spaces and the grid is on a homogeneous scale, => then the grid can be transformed into 'easy-to-compute' spaces. """ grid_value_list = [] min_info = None for character_set, info in CaMap: if info.cc_type == E_CharacterCountType.COLUMN: return elif info.cc_type != E_CharacterCountType.GRID: continue elif type(info.value) in (str, str): # If there is one single 'variable' grid value, # then no assumptions can be made. return grid_value_list.append(info.value) if min_info is None or info.value < min_info.value: min_info = info if min_info is None: return # Are all grid values a multiple of the minimum? if all(x % min_info.value == 0 for x in grid_value_list): error.warning("Setup does not contain spaces, only grids (tabulators). All grid\n" \ "widths are multiples of %i. The grid setup %s is equivalent to\n" \ % (min_info.value, repr(sorted(grid_value_list))[1:-1]) + \ "a setup with space counts %s. Space counts are faster to compute.\n" \ % repr([x / min_info.value for x in sorted(grid_value_list)])[1:-1], min_info.sr) return def check_defined(CaMap, SourceReference, CCT): """Checks whether the character counter type has been defined in the map. THROWS: Error in case that is has not been defined. """ for character_set, info in CaMap: if info.cc_type == CCT: return error.warning("Setup does not define '%s'." % cc_type_name_db[CCT], SourceReference, SuppressCode=NotificationDB.warning_counter_setup_without_newline)
def _consistency_check(self): """ Required defintions: -- WHITESPACE (Default done automatically) => Assert. -- NEWLINE (Default done automatically) => Assert. Inadmissible 'eat-into'. -- SUPPRESSOR shall not eat into [NEWLINE] -- NEWLINE shall not eat into [WHITESPACE, BADSPACE, SUSPEND, SUPPRESSOR] -- WHITESPACE shall not eat into [SUPPRESSOR, NEWLINE, SUSPEND]. -- BADSPACE shall not eat into [SUPPRESSOR, NEWLINE, SUSPEND]. No common lexemes: -- WHITESPACE and BADSPACE may not have common lexemes. Outrun: -- NEWLINE may not start with SUSPEND and vice versa -- NEWLINE may not start with SUPPRESSOR and vice versa -- SUPPRESSOR may not start with SUSPEND and vice versa -- WHITESPACE shall not outrun BADSPACE, but the contrary is ok. (BADSPACE may outrun WHITESPACE (e.g: lexeme with 'tab' after whitespace') """ # (1) Required definitions _____________________________________________ assert self.sm_whitespace.set_f() assert self.sm_newline.set_f() whitespace = self.sm_whitespace newline = self.sm_newline badspace = self.sm_badspace suppressor = self.sm_newline_suppressor suspend_list = self.sm_suspend_list # (2) Inadmissible 'eat-into' __________________________________________ # cmp_list = [ (newline, badspace), (newline, whitespace), (newline, suppressor), (suppressor, newline), (whitespace, newline), (whitespace, suppressor), (badspace, newline), (badspace, suppressor), ] \ + [ (whitespace, x) for x in suspend_list ] \ + [ (newline, x) for x in suspend_list ] \ + [ (badspace, x) for x in suspend_list ] def _error(FormatStr, Sro0, Sro1): error.log(FormatStr % (Sro0.name, Sro1.name), Sro0.sr, DontExitF=True) error.log("'%s' defined here." % Sro1.name, Sro1.sr) def _iterate(SroPairList): for first_sro, second_sro in cmp_list: first, second = first_sro.get(), second_sro.get() if first is None or second is None: continue yield first_sro, first, second_sro, second for first_sro, first, second_sro, second in _iterate(cmp_list): if swallow.ending_A_beginning_B(first, second): _error("'%s' may eat into beginning of '%s'.", first_sro, second_sro) elif swallow.inside_A_match_B(first, second): _error("'%s' may swallow something matched by '%s'.", first_sro, second_sro) for sm_suspend in self.sm_suspend_list: only_common_f, \ common_f = tail.do(self.sm_newline.get(), sm_suspend.get()) error_check.tail(only_common_f, common_f, "indentation handler's newline", self.sm_newline.sr, "suspend", sm_suspend.sr) # (3) Inadmissible common lexemes _____________________________________ # if badspace.get() and not intersection.do([badspace.get(), whitespace.get()]).is_Empty(): _error("'%s' and '%s' match on common lexemes.", whitespace, badspace) # (3) Inadmissible outruns ____________________________________________ # cmp_list = [ (newline, suppressor), (suppressor, newline), (whitespace, badspace) ] for x in suspend_list: cmp_list.extend([ (newline, x), (x, newline), (suppressor, x), (x, suppressor) ]) for first_sro, first, second_sro, second in _iterate(cmp_list): if outrun.do(second, first): _error("'%s' may outrun '%s'.", first_sro, second_sro)
258
342
# Project Quex (http://quex.sourceforge.net); License: MIT; # (C) 2005-2020 Frank-Rene Schaefer; #_______________________________________________________________________________ from quex.input.setup import NotificationDB from quex.input.regular_expression.pattern import Pattern_Prep import quex.input.regular_expression.core as regular_expression from quex.input.code.base import SourceRef, \ SourceRef_DEFAULT, \ SourceRefObject from quex.engine.state_machine.core import DFA import quex.engine.state_machine.construction.sequentialize as sequentialize import quex.engine.state_machine.construction.repeat as repeat import quex.engine.state_machine.algebra.difference as difference import quex.engine.state_machine.algebra.intersection as intersection import quex.engine.state_machine.algorithm.beautifier as beautifier import quex.engine.state_machine.check.swallow as swallow import quex.engine.state_machine.check.outrun as outrun import quex.engine.state_machine.check.identity as identity import quex.engine.state_machine.check.tail as tail from quex.engine.misc.tools import typed from quex.engine.misc.interval_handling import NumberSet from quex.engine.counter import IndentationCount_Pre, \ cc_type_name_db, \ cc_type_db from quex.engine.counter_builder import CountActionMap_Builder import quex.engine.misc.error as error import quex.engine.misc.error_check as error_check from quex.engine.misc.file_in import check, \ check_or_die, \ skip_whitespace, \ read_identifier, \ read_integer from quex.constants import E_CharacterCountType from quex.blackboard import setup as Setup def parse_CountActionMap(fh): return _base_parse(fh, CountActionMapFromParser_Builder(fh)) def parse_IndentationSetup(fh): return _base_parse(fh, IndentationSetup_Builder(fh)) def _base_parse(fh, builder, IndentationSetupF=False): """Parses pattern definitions of the form: [ \t] => grid 4; [:intersection([:alpha:], [\X064-\X066]):] => space 1; In other words the right hand side *must* be a character set. ADAPTS: result to contain parsing information. """ # NOTE: Catching of EOF happens in caller: parse_section(...) # while 1 + 1 == 2: skip_whitespace(fh) if check(fh, ">"): break # A regular expression state machine pattern, identifier, sr = _parse_definition_head(fh, builder.identifier_list) if pattern is None and not builder.keyword_else_f: error.log("Keyword '\\else' cannot be used in indentation setup.", fh) # '_parse_definition_head()' ensures that only identifiers mentioned in # 'result' are accepted. if builder.requires_count(): count = _read_value_specifier(fh, identifier, 1) builder.specify(identifier, pattern, count, sr) else: builder.specify(identifier, pattern, sr) if not check(fh, ";"): error.log("Missing ';' after '%s' specification." % identifier, fh) return builder.finalize() class CharacterSetVsAction_BuilderBase: def __init__(self, IdentifierList, KeywordElseAdmissibleF): self.identifier_list = IdentifierList self.keyword_else_f = KeywordElseAdmissibleF class CountActionMapFromParser_Builder(CharacterSetVsAction_BuilderBase): """Line/column number count specification. ___________________________________________________________________________ The main result of the parsing the the Base's .count_command_map which is an instance of CountActionMap_Builder. ____________________________________________________________________________ """ @typed(sr=SourceRef) def __init__(self, fh): self.sr = SourceRef.from_FileHandle(fh) self.__fh = fh self._ca_map_builder = CountActionMap_Builder() CharacterSetVsAction_BuilderBase.__init__(self, ("columns", "grid", "lines"), KeywordElseAdmissibleF=True) def finalize(self): # Finalize / Produce 'LineColumnCount' object. # ca_map = self._ca_map_builder.finalize( Setup.buffer_encoding.source_set.minimum(), Setup.buffer_encoding.source_set.least_greater_bound(), self.sr) _check_grid_values_integer_multiples(ca_map) check_defined(ca_map, self.sr, E_CharacterCountType.LINE) return ca_map def requires_count(self): return True @typed(sr=SourceRef, Identifier=(str,str)) def specify(self, Identifier, Pattern, Count, sr): if Pattern is None: self._ca_map_builder.define_else(cc_type_db[Identifier], Count, sr) else: trigger_set = _extract_trigger_set(sr, Identifier, Pattern) self._ca_map_builder.add(trigger_set, cc_type_db[Identifier], Count, sr) class IndentationSetup_Builder(CharacterSetVsAction_BuilderBase): """Indentation counter specification. ____________________________________________________________________________ The base's .count_command_map contains information about how to count the space at the beginning of the line. The count until the first non-whitespace is the 'indentation'. +bad: The spec contains information about what characters are not supposed to appear in indentation (bad characters). Depending on the philosophical basis, some might consider 'space' as evil, others consider 'tab' as evil. +newline: A detailed state machine can be defined for 'newline'. This might be '\n|(\r\n)' or more complex things. +suppressor: A newline might be suppressed by '\' for example. For that, it might be specified as 'newline suppressor'. ____________________________________________________________________________ """ @typed(sr=SourceRef) def __init__(self, fh): self.__fh = fh self.sm_whitespace = SourceRefObject("whitespace", None) self.sm_badspace = SourceRefObject("bad", None) self.sm_newline = SourceRefObject("newline", None) self.sm_newline_suppressor = SourceRefObject("suppressor", None) self.sm_suspend_list = [] if fh == -1: self.sr = SourceRef_DEFAULT else: self.sr = SourceRef.from_FileHandle(self.__fh) CharacterSetVsAction_BuilderBase.__init__(self, ("whitespace", "suspend", "newline", "suppressor", "bad"), KeywordElseAdmissibleF=False) def finalize(self): # Finalize / Produce 'IndentationCount' object. # if self.sm_whitespace.get() is None: self.sm_whitespace.set(self.__sm_whitespace_default(), SourceRef_DEFAULT) if self.sm_newline.get() is None: self.sm_newline.set(self.__sm_newline_default(), SourceRef_DEFAULT) # -- consistency self._consistency_check() # Transform 'SourceRefObject' into 'Pattern_Prep' objects # (TODO: Why not use it in the first place?) def get_pattern(SRO): if SRO is None or SRO.get() is None: return None return Pattern_Prep(SRO.get(), PatternString="<indentation %s>" % SRO.name, Sr=SRO.sr) pattern_suspend_list = [ get_pattern(sro) for sro in self.sm_suspend_list ] pattern_suspend_list = [ x for x in pattern_suspend_list if x is not None ] if self.sm_newline_suppressor.set_f(): sm_suppressed_newline = sequentialize.do([self.sm_newline_suppressor.get(), self.sm_newline.get()]) sm_suppressed_newline = beautifier.do(sm_suppressed_newline) pattern_suppressed_newline = Pattern_Prep(sm_suppressed_newline, PatternString="<indentation suppressed newline>", Sr=self.sm_newline_suppressor.sr) else: pattern_suppressed_newline = None return IndentationCount_Pre(self.sr, get_pattern(self.sm_whitespace), get_pattern(self.sm_badspace), get_pattern(self.sm_newline), pattern_suppressed_newline, pattern_suspend_list) def requires_count(self): return False def specify(self, identifier, pattern, sr): sm = pattern.extract_sm() if identifier == "whitespace": self.__specify(self.sm_whitespace, sm, sr) elif identifier == "bad": self.__specify(self.sm_badspace, sm, sr) elif identifier == "newline": self.__specify(self.sm_newline, sm, sr) elif identifier == "suppressor": self.__specify(self.sm_newline_suppressor, sm , sr) elif identifier == "suspend": self.__specify_suspend(sm, sr) else: return False return True @typed(sr=SourceRef) def __specify(self, member_ref, Sm, sr): assert Sm is not None _error_if_defined_before(member_ref, sr) if not Sm.is_DFA_compliant(): Sm = beautifier.do(Sm) member_ref.set(Sm, sr) @typed(sr=SourceRef) def __specify_suspend(self, Sm, sr): for before in self.sm_suspend_list: if not identity.do(before.get(), Sm): continue error.log("'suspend' has been defined before;", sr, DontExitF=True) error.log("at this place.", before.sr) sm_suspend = SourceRefObject("suspend", None) self.__specify(sm_suspend, Sm, sr) self.sm_suspend_list.append(sm_suspend) def __sm_newline_default(self): """Default newline: '(\n)|(\r\n)' """ sm = DFA.from_character_set(NumberSet(ord('\n'))) if Setup.dos_carriage_return_newline_f: sm.add_transition_sequence(sm.init_state_index, [ord('\r'), ord('\n')]) return sm def __sm_whitespace_default(self): """Try to define default whitespace ' ' or '\t' if their positions are not yet occupied in the count_command_map. """ sm_whitespace = DFA.from_character_set(NumberSet.from_integer_list([ord(' '), ord('\t')])) sm_whitespace = beautifier.do(repeat.do(sm_whitespace, 1)) if self.sm_badspace.get() is not None: sm_whitespace = difference.do(sm_whitespace, self.sm_badspace.get()) if sm_whitespace.is_Empty() \ or outrun.do(self.sm_badspace.get(), sm_whitespace): error.log("Cannot define default 'whitespace' in the frame of the given\n" "definition of 'bad'.", self.sm_badspace.sr) return sm_whitespace def _consistency_check(self): """ Required defintions: -- WHITESPACE (Default done automatically) => Assert. -- NEWLINE (Default done automatically) => Assert. Inadmissible 'eat-into'. -- SUPPRESSOR shall not eat into [NEWLINE] -- NEWLINE shall not eat into [WHITESPACE, BADSPACE, SUSPEND, SUPPRESSOR] -- WHITESPACE shall not eat into [SUPPRESSOR, NEWLINE, SUSPEND]. -- BADSPACE shall not eat into [SUPPRESSOR, NEWLINE, SUSPEND]. No common lexemes: -- WHITESPACE and BADSPACE may not have common lexemes. Outrun: -- NEWLINE may not start with SUSPEND and vice versa -- NEWLINE may not start with SUPPRESSOR and vice versa -- SUPPRESSOR may not start with SUSPEND and vice versa -- WHITESPACE shall not outrun BADSPACE, but the contrary is ok. (BADSPACE may outrun WHITESPACE (e.g: lexeme with 'tab' after whitespace') """ # (1) Required definitions _____________________________________________ assert self.sm_whitespace.set_f() assert self.sm_newline.set_f() whitespace = self.sm_whitespace newline = self.sm_newline badspace = self.sm_badspace suppressor = self.sm_newline_suppressor suspend_list = self.sm_suspend_list # (2) Inadmissible 'eat-into' __________________________________________ # cmp_list = [ (newline, badspace), (newline, whitespace), (newline, suppressor), (suppressor, newline), (whitespace, newline), (whitespace, suppressor), (badspace, newline), (badspace, suppressor), ] \ + [ (whitespace, x) for x in suspend_list ] \ + [ (newline, x) for x in suspend_list ] \ + [ (badspace, x) for x in suspend_list ] def _error(FormatStr, Sro0, Sro1): error.log(FormatStr % (Sro0.name, Sro1.name), Sro0.sr, DontExitF=True) error.log("'%s' defined here." % Sro1.name, Sro1.sr) def _iterate(SroPairList): for first_sro, second_sro in cmp_list: first, second = first_sro.get(), second_sro.get() if first is None or second is None: continue yield first_sro, first, second_sro, second for first_sro, first, second_sro, second in _iterate(cmp_list): if swallow.ending_A_beginning_B(first, second): _error("'%s' may eat into beginning of '%s'.", first_sro, second_sro) elif swallow.inside_A_match_B(first, second): _error("'%s' may swallow something matched by '%s'.", first_sro, second_sro) for sm_suspend in self.sm_suspend_list: only_common_f, \ common_f = tail.do(self.sm_newline.get(), sm_suspend.get()) error_check.tail(only_common_f, common_f, "indentation handler's newline", self.sm_newline.sr, "suspend", sm_suspend.sr) # (3) Inadmissible common lexemes _____________________________________ # if badspace.get() and not intersection.do([badspace.get(), whitespace.get()]).is_Empty(): _error("'%s' and '%s' match on common lexemes.", whitespace, badspace) # (3) Inadmissible outruns ____________________________________________ # cmp_list = [ (newline, suppressor), (suppressor, newline), (whitespace, badspace) ] for x in suspend_list: cmp_list.extend([ (newline, x), (x, newline), (suppressor, x), (x, suppressor) ]) for first_sro, first, second_sro, second in _iterate(cmp_list): if outrun.do(second, first): _error("'%s' may outrun '%s'.", first_sro, second_sro) def _parse_definition_head(fh, IdentifierList): if check(fh, "\\default"): error.log("'\\default' has been replaced by keyword '\\else' since quex 0.64.9!", fh) elif check(fh, "\\else"): pattern = None else: pattern = regular_expression.parse(fh, AllowPreContextF=False, AllowPostContextF=False) skip_whitespace(fh) check_or_die(fh, "=>", " after character set definition.") skip_whitespace(fh) identifier = read_identifier(fh, OnMissingStr="Missing identifier following '=>'.") error.verify_word_in_list(identifier, IdentifierList, "Unrecognized specifier '%s'." % identifier, fh) skip_whitespace(fh) return pattern, identifier, SourceRef.from_FileHandle(fh) def _read_value_specifier(fh, Keyword, Default=None): skip_whitespace(fh) value = read_integer(fh) if value is not None: return value # not a number received, is it an identifier? variable = read_identifier(fh) if variable: return variable elif Default is not None: return Default error.log("Missing integer or variable name after keyword '%s'." % Keyword, fh) __CountActionMap_DEFAULT = None def LineColumnCount_Default(): global __CountActionMap_DEFAULT if __CountActionMap_DEFAULT is None: builder = CountActionMap_Builder() builder.add(NumberSet(ord('\n')), E_CharacterCountType.LINE, 1, SourceRef_DEFAULT) builder.add(NumberSet(ord('\t')), E_CharacterCountType.GRID, 4, SourceRef_DEFAULT) builder.define_else(E_CharacterCountType.COLUMN, 1, SourceRef_DEFAULT) # Define: "\else" __CountActionMap_DEFAULT = builder.finalize( Setup.buffer_encoding.source_set.minimum(), Setup.buffer_encoding.source_set.least_greater_bound(), # Apply: "\else" SourceRef_DEFAULT) return __CountActionMap_DEFAULT def _error_if_defined_before(Before, sr): if not Before.set_f(): return error.log("'%s' has been defined before;" % Before.name, sr, DontExitF=True) error.log("at this place.", Before.sr) def _extract_trigger_set(sr, Keyword, Pattern): if Pattern is None: return None elif isinstance(Pattern, NumberSet): return Pattern def check_can_be_matched_by_single_character(SM): bad_f = False init_state = SM.get_init_state() if SM.get_init_state().is_acceptance(): bad_f = True elif len(SM.states) != 2: bad_f = True # Init state MUST transit to second state. Second state MUST not have any transitions elif len(init_state.target_map.get_target_state_index_list()) != 1: bad_f = True else: tmp = set(SM.states.keys()) tmp.remove(SM.init_state_index) other_state_index = next(iter(tmp)) if len(SM.states[other_state_index].target_map.get_target_state_index_list()) != 0: bad_f = True if bad_f: error.log("For '%s' only patterns are addmissible which\n" % Keyword + \ "can be matched by a single character, e.g. \" \" or [a-z].", sr) sm = Pattern.extract_sm() check_can_be_matched_by_single_character(sm) transition_map = sm.get_init_state().target_map.get_map() assert len(transition_map) == 1 return list(transition_map.values())[0] def _check_grid_values_integer_multiples(CaMap): """If there are no spaces and the grid is on a homogeneous scale, => then the grid can be transformed into 'easy-to-compute' spaces. """ grid_value_list = [] min_info = None for character_set, info in CaMap: if info.cc_type == E_CharacterCountType.COLUMN: return elif info.cc_type != E_CharacterCountType.GRID: continue elif type(info.value) in (str, str): # If there is one single 'variable' grid value, # then no assumptions can be made. return grid_value_list.append(info.value) if min_info is None or info.value < min_info.value: min_info = info if min_info is None: return # Are all grid values a multiple of the minimum? if all(x % min_info.value == 0 for x in grid_value_list): error.warning("Setup does not contain spaces, only grids (tabulators). All grid\n" \ "widths are multiples of %i. The grid setup %s is equivalent to\n" \ % (min_info.value, repr(sorted(grid_value_list))[1:-1]) + \ "a setup with space counts %s. Space counts are faster to compute.\n" \ % repr([x / min_info.value for x in sorted(grid_value_list)])[1:-1], min_info.sr) return def check_defined(CaMap, SourceReference, CCT): """Checks whether the character counter type has been defined in the map. THROWS: Error in case that is has not been defined. """ for character_set, info in CaMap: if info.cc_type == CCT: return error.warning("Setup does not define '%s'." % cc_type_name_db[CCT], SourceReference, SuppressCode=NotificationDB.warning_counter_setup_without_newline)
get_enrollment_dates
Takes a course object and returns student dates of enrollment. Useful for handling late registrations and modified deadlines. Example: course.get_enrollment_date()
import requests import urllib.parse import posixpath import pandas as pd # MASKED: get_enrollment_dates function (lines 6-34) def get_assignments(course): '''Takes a course object and returns a Pandas data frame with all existing assignments and their attributes/data Example: course.get_assignments()''' url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] resp = requests.get( url=api_url, headers={ "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "per_page": "10000" }, ) assignments = resp.json() assign_data = pd.DataFrame.from_dict(assignments) return assign_data def get_assignment_lock_date(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned. Example: course.get_assignment_due_date('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'lock_at']].query('name == @assignment') lock_date = assignments['lock_at'].to_numpy()[0] if lock_date is None: return lock_date lock_date = lock_date.replace("T", "-") lock_date = lock_date.replace(":", "-") return lock_date[:16] def get_assignment_due_date(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned. Example: course.get_assignment_due_date('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'due_at']].query('name == @assignment') due_date = assignments['due_at'].to_numpy()[0] if due_date is None: return due_date due_date = due_date.replace("T", "-") due_date = due_date.replace(":", "-") return due_date[:16] def get_assignment_unlock_date(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned. Example: course.get_assignment_unlock_date('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'unlock_at']].query('name == @assignment') unlock_date = assignments['unlock_at'].to_numpy()[0] if unlock_date is None: return unlock_date unlock_date = unlock_date.replace("T", "-").replace(':', '-') return unlock_date[:16] def get_assignment_id(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the Canvas ID. Example: course.get_assignment_id('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'id']].query('name == @assignment') return assignments['id'].values[0] def get_grades(course, assignment): '''Takes a course object, an assignment name, and get the grades for that assignment from Canvas. Example: course.get_grades(course, 'worksheet_01')''' assignment_id = get_assignment_id(course, assignment) url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] resp = None scores = {} while resp is None or resp.links['current']['url'] != resp.links['last']['url']: resp = requests.get( url = api_url if resp is None else resp.links['next']['url'], headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "per_page":"100" } ) scores.update( {res['user_id'] : res['score'] for res in resp.json()} ) return scores def grades_need_posting(course, assignment): '''Takes a course object, an assignment name, and get the grades for that assignment from Canvas. Example: course.get_grades(course, 'worksheet_01')''' assignment_id = get_assignment_id(course, assignment) url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] #get enrollments to avoid the test student's submissions real_stu_ids = list(get_enrollment_dates(course).keys()) resp = None posted_flags = [] while resp is None or resp.links['current']['url'] != resp.links['last']['url']: resp = requests.get( url = api_url if resp is None else resp.links['next']['url'], headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "per_page":"100" } ) posted_flags.extend([ (subm_grd['posted_at'] is not None) for subm_grd in resp.json() if subm_grd['user_id'] in real_stu_ids]) return not all(posted_flags) def post_grade(course, assignment, student, score): '''Takes a course object, an assignment name, student id, and score to upload. Posts to Canvas. Example: course.post_grades(dsci100, 'worksheet_01', '23423', 10)''' assignment_id = get_assignment_id(course, assignment) url_post_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions", student) api_url = urllib.parse.urljoin(course['hostname'], url_post_path) token = course['token'] resp = requests.put( url = urllib.parse.urljoin(api_url, student), headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "submission": {"posted_grade": score} }, )
def get_enrollment_dates(course): '''Takes a course object and returns student dates of enrollment. Useful for handling late registrations and modified deadlines. Example: course.get_enrollment_date()''' url_path = posixpath.join("api", "v1", "courses", course['course_id'], "enrollments") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] resp = None students = [] while resp is None or resp.links['current']['url'] != resp.links['last']['url']: resp = requests.get( url = api_url if resp is None else resp.links['next']['url'], headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "type": ["StudentEnrollment"], "per_page":"100" } ) students.extend(resp.json()) enrollment_dates = {} for st in students: enrollment_dates[str(st['user_id'])] = str(st['created_at']).strip('Z').replace('T','-').replace(':','-')[:16] return enrollment_dates
6
34
import requests import urllib.parse import posixpath import pandas as pd def get_enrollment_dates(course): '''Takes a course object and returns student dates of enrollment. Useful for handling late registrations and modified deadlines. Example: course.get_enrollment_date()''' url_path = posixpath.join("api", "v1", "courses", course['course_id'], "enrollments") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] resp = None students = [] while resp is None or resp.links['current']['url'] != resp.links['last']['url']: resp = requests.get( url = api_url if resp is None else resp.links['next']['url'], headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "type": ["StudentEnrollment"], "per_page":"100" } ) students.extend(resp.json()) enrollment_dates = {} for st in students: enrollment_dates[str(st['user_id'])] = str(st['created_at']).strip('Z').replace('T','-').replace(':','-')[:16] return enrollment_dates def get_assignments(course): '''Takes a course object and returns a Pandas data frame with all existing assignments and their attributes/data Example: course.get_assignments()''' url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] resp = requests.get( url=api_url, headers={ "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "per_page": "10000" }, ) assignments = resp.json() assign_data = pd.DataFrame.from_dict(assignments) return assign_data def get_assignment_lock_date(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned. Example: course.get_assignment_due_date('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'lock_at']].query('name == @assignment') lock_date = assignments['lock_at'].to_numpy()[0] if lock_date is None: return lock_date lock_date = lock_date.replace("T", "-") lock_date = lock_date.replace(":", "-") return lock_date[:16] def get_assignment_due_date(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned. Example: course.get_assignment_due_date('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'due_at']].query('name == @assignment') due_date = assignments['due_at'].to_numpy()[0] if due_date is None: return due_date due_date = due_date.replace("T", "-") due_date = due_date.replace(":", "-") return due_date[:16] def get_assignment_unlock_date(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned. Example: course.get_assignment_unlock_date('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'unlock_at']].query('name == @assignment') unlock_date = assignments['unlock_at'].to_numpy()[0] if unlock_date is None: return unlock_date unlock_date = unlock_date.replace("T", "-").replace(':', '-') return unlock_date[:16] def get_assignment_id(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the Canvas ID. Example: course.get_assignment_id('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'id']].query('name == @assignment') return assignments['id'].values[0] def get_grades(course, assignment): '''Takes a course object, an assignment name, and get the grades for that assignment from Canvas. Example: course.get_grades(course, 'worksheet_01')''' assignment_id = get_assignment_id(course, assignment) url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] resp = None scores = {} while resp is None or resp.links['current']['url'] != resp.links['last']['url']: resp = requests.get( url = api_url if resp is None else resp.links['next']['url'], headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "per_page":"100" } ) scores.update( {res['user_id'] : res['score'] for res in resp.json()} ) return scores def grades_need_posting(course, assignment): '''Takes a course object, an assignment name, and get the grades for that assignment from Canvas. Example: course.get_grades(course, 'worksheet_01')''' assignment_id = get_assignment_id(course, assignment) url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] #get enrollments to avoid the test student's submissions real_stu_ids = list(get_enrollment_dates(course).keys()) resp = None posted_flags = [] while resp is None or resp.links['current']['url'] != resp.links['last']['url']: resp = requests.get( url = api_url if resp is None else resp.links['next']['url'], headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "per_page":"100" } ) posted_flags.extend([ (subm_grd['posted_at'] is not None) for subm_grd in resp.json() if subm_grd['user_id'] in real_stu_ids]) return not all(posted_flags) def post_grade(course, assignment, student, score): '''Takes a course object, an assignment name, student id, and score to upload. Posts to Canvas. Example: course.post_grades(dsci100, 'worksheet_01', '23423', 10)''' assignment_id = get_assignment_id(course, assignment) url_post_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions", student) api_url = urllib.parse.urljoin(course['hostname'], url_post_path) token = course['token'] resp = requests.put( url = urllib.parse.urljoin(api_url, student), headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "submission": {"posted_grade": score} }, )
get_assignments
Takes a course object and returns a Pandas data frame with all existing assignments and their attributes/data Example: course.get_assignments()
import requests import urllib.parse import posixpath import pandas as pd def get_enrollment_dates(course): '''Takes a course object and returns student dates of enrollment. Useful for handling late registrations and modified deadlines. Example: course.get_enrollment_date()''' url_path = posixpath.join("api", "v1", "courses", course['course_id'], "enrollments") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] resp = None students = [] while resp is None or resp.links['current']['url'] != resp.links['last']['url']: resp = requests.get( url = api_url if resp is None else resp.links['next']['url'], headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "type": ["StudentEnrollment"], "per_page":"100" } ) students.extend(resp.json()) enrollment_dates = {} for st in students: enrollment_dates[str(st['user_id'])] = str(st['created_at']).strip('Z').replace('T','-').replace(':','-')[:16] return enrollment_dates # MASKED: get_assignments function (lines 36-57) def get_assignment_lock_date(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned. Example: course.get_assignment_due_date('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'lock_at']].query('name == @assignment') lock_date = assignments['lock_at'].to_numpy()[0] if lock_date is None: return lock_date lock_date = lock_date.replace("T", "-") lock_date = lock_date.replace(":", "-") return lock_date[:16] def get_assignment_due_date(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned. Example: course.get_assignment_due_date('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'due_at']].query('name == @assignment') due_date = assignments['due_at'].to_numpy()[0] if due_date is None: return due_date due_date = due_date.replace("T", "-") due_date = due_date.replace(":", "-") return due_date[:16] def get_assignment_unlock_date(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned. Example: course.get_assignment_unlock_date('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'unlock_at']].query('name == @assignment') unlock_date = assignments['unlock_at'].to_numpy()[0] if unlock_date is None: return unlock_date unlock_date = unlock_date.replace("T", "-").replace(':', '-') return unlock_date[:16] def get_assignment_id(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the Canvas ID. Example: course.get_assignment_id('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'id']].query('name == @assignment') return assignments['id'].values[0] def get_grades(course, assignment): '''Takes a course object, an assignment name, and get the grades for that assignment from Canvas. Example: course.get_grades(course, 'worksheet_01')''' assignment_id = get_assignment_id(course, assignment) url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] resp = None scores = {} while resp is None or resp.links['current']['url'] != resp.links['last']['url']: resp = requests.get( url = api_url if resp is None else resp.links['next']['url'], headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "per_page":"100" } ) scores.update( {res['user_id'] : res['score'] for res in resp.json()} ) return scores def grades_need_posting(course, assignment): '''Takes a course object, an assignment name, and get the grades for that assignment from Canvas. Example: course.get_grades(course, 'worksheet_01')''' assignment_id = get_assignment_id(course, assignment) url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] #get enrollments to avoid the test student's submissions real_stu_ids = list(get_enrollment_dates(course).keys()) resp = None posted_flags = [] while resp is None or resp.links['current']['url'] != resp.links['last']['url']: resp = requests.get( url = api_url if resp is None else resp.links['next']['url'], headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "per_page":"100" } ) posted_flags.extend([ (subm_grd['posted_at'] is not None) for subm_grd in resp.json() if subm_grd['user_id'] in real_stu_ids]) return not all(posted_flags) def post_grade(course, assignment, student, score): '''Takes a course object, an assignment name, student id, and score to upload. Posts to Canvas. Example: course.post_grades(dsci100, 'worksheet_01', '23423', 10)''' assignment_id = get_assignment_id(course, assignment) url_post_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions", student) api_url = urllib.parse.urljoin(course['hostname'], url_post_path) token = course['token'] resp = requests.put( url = urllib.parse.urljoin(api_url, student), headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "submission": {"posted_grade": score} }, )
def get_assignments(course): '''Takes a course object and returns a Pandas data frame with all existing assignments and their attributes/data Example: course.get_assignments()''' url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] resp = requests.get( url=api_url, headers={ "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "per_page": "10000" }, ) assignments = resp.json() assign_data = pd.DataFrame.from_dict(assignments) return assign_data
36
57
import requests import urllib.parse import posixpath import pandas as pd def get_enrollment_dates(course): '''Takes a course object and returns student dates of enrollment. Useful for handling late registrations and modified deadlines. Example: course.get_enrollment_date()''' url_path = posixpath.join("api", "v1", "courses", course['course_id'], "enrollments") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] resp = None students = [] while resp is None or resp.links['current']['url'] != resp.links['last']['url']: resp = requests.get( url = api_url if resp is None else resp.links['next']['url'], headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "type": ["StudentEnrollment"], "per_page":"100" } ) students.extend(resp.json()) enrollment_dates = {} for st in students: enrollment_dates[str(st['user_id'])] = str(st['created_at']).strip('Z').replace('T','-').replace(':','-')[:16] return enrollment_dates def get_assignments(course): '''Takes a course object and returns a Pandas data frame with all existing assignments and their attributes/data Example: course.get_assignments()''' url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] resp = requests.get( url=api_url, headers={ "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "per_page": "10000" }, ) assignments = resp.json() assign_data = pd.DataFrame.from_dict(assignments) return assign_data def get_assignment_lock_date(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned. Example: course.get_assignment_due_date('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'lock_at']].query('name == @assignment') lock_date = assignments['lock_at'].to_numpy()[0] if lock_date is None: return lock_date lock_date = lock_date.replace("T", "-") lock_date = lock_date.replace(":", "-") return lock_date[:16] def get_assignment_due_date(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned. Example: course.get_assignment_due_date('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'due_at']].query('name == @assignment') due_date = assignments['due_at'].to_numpy()[0] if due_date is None: return due_date due_date = due_date.replace("T", "-") due_date = due_date.replace(":", "-") return due_date[:16] def get_assignment_unlock_date(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned. Example: course.get_assignment_unlock_date('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'unlock_at']].query('name == @assignment') unlock_date = assignments['unlock_at'].to_numpy()[0] if unlock_date is None: return unlock_date unlock_date = unlock_date.replace("T", "-").replace(':', '-') return unlock_date[:16] def get_assignment_id(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the Canvas ID. Example: course.get_assignment_id('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'id']].query('name == @assignment') return assignments['id'].values[0] def get_grades(course, assignment): '''Takes a course object, an assignment name, and get the grades for that assignment from Canvas. Example: course.get_grades(course, 'worksheet_01')''' assignment_id = get_assignment_id(course, assignment) url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] resp = None scores = {} while resp is None or resp.links['current']['url'] != resp.links['last']['url']: resp = requests.get( url = api_url if resp is None else resp.links['next']['url'], headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "per_page":"100" } ) scores.update( {res['user_id'] : res['score'] for res in resp.json()} ) return scores def grades_need_posting(course, assignment): '''Takes a course object, an assignment name, and get the grades for that assignment from Canvas. Example: course.get_grades(course, 'worksheet_01')''' assignment_id = get_assignment_id(course, assignment) url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] #get enrollments to avoid the test student's submissions real_stu_ids = list(get_enrollment_dates(course).keys()) resp = None posted_flags = [] while resp is None or resp.links['current']['url'] != resp.links['last']['url']: resp = requests.get( url = api_url if resp is None else resp.links['next']['url'], headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "per_page":"100" } ) posted_flags.extend([ (subm_grd['posted_at'] is not None) for subm_grd in resp.json() if subm_grd['user_id'] in real_stu_ids]) return not all(posted_flags) def post_grade(course, assignment, student, score): '''Takes a course object, an assignment name, student id, and score to upload. Posts to Canvas. Example: course.post_grades(dsci100, 'worksheet_01', '23423', 10)''' assignment_id = get_assignment_id(course, assignment) url_post_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions", student) api_url = urllib.parse.urljoin(course['hostname'], url_post_path) token = course['token'] resp = requests.put( url = urllib.parse.urljoin(api_url, student), headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "submission": {"posted_grade": score} }, )
get_assignment_lock_date
Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned. Example: course.get_assignment_due_date('worksheet_01')
import requests import urllib.parse import posixpath import pandas as pd def get_enrollment_dates(course): '''Takes a course object and returns student dates of enrollment. Useful for handling late registrations and modified deadlines. Example: course.get_enrollment_date()''' url_path = posixpath.join("api", "v1", "courses", course['course_id'], "enrollments") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] resp = None students = [] while resp is None or resp.links['current']['url'] != resp.links['last']['url']: resp = requests.get( url = api_url if resp is None else resp.links['next']['url'], headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "type": ["StudentEnrollment"], "per_page":"100" } ) students.extend(resp.json()) enrollment_dates = {} for st in students: enrollment_dates[str(st['user_id'])] = str(st['created_at']).strip('Z').replace('T','-').replace(':','-')[:16] return enrollment_dates def get_assignments(course): '''Takes a course object and returns a Pandas data frame with all existing assignments and their attributes/data Example: course.get_assignments()''' url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] resp = requests.get( url=api_url, headers={ "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "per_page": "10000" }, ) assignments = resp.json() assign_data = pd.DataFrame.from_dict(assignments) return assign_data # MASKED: get_assignment_lock_date function (lines 59-71) def get_assignment_due_date(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned. Example: course.get_assignment_due_date('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'due_at']].query('name == @assignment') due_date = assignments['due_at'].to_numpy()[0] if due_date is None: return due_date due_date = due_date.replace("T", "-") due_date = due_date.replace(":", "-") return due_date[:16] def get_assignment_unlock_date(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned. Example: course.get_assignment_unlock_date('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'unlock_at']].query('name == @assignment') unlock_date = assignments['unlock_at'].to_numpy()[0] if unlock_date is None: return unlock_date unlock_date = unlock_date.replace("T", "-").replace(':', '-') return unlock_date[:16] def get_assignment_id(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the Canvas ID. Example: course.get_assignment_id('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'id']].query('name == @assignment') return assignments['id'].values[0] def get_grades(course, assignment): '''Takes a course object, an assignment name, and get the grades for that assignment from Canvas. Example: course.get_grades(course, 'worksheet_01')''' assignment_id = get_assignment_id(course, assignment) url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] resp = None scores = {} while resp is None or resp.links['current']['url'] != resp.links['last']['url']: resp = requests.get( url = api_url if resp is None else resp.links['next']['url'], headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "per_page":"100" } ) scores.update( {res['user_id'] : res['score'] for res in resp.json()} ) return scores def grades_need_posting(course, assignment): '''Takes a course object, an assignment name, and get the grades for that assignment from Canvas. Example: course.get_grades(course, 'worksheet_01')''' assignment_id = get_assignment_id(course, assignment) url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] #get enrollments to avoid the test student's submissions real_stu_ids = list(get_enrollment_dates(course).keys()) resp = None posted_flags = [] while resp is None or resp.links['current']['url'] != resp.links['last']['url']: resp = requests.get( url = api_url if resp is None else resp.links['next']['url'], headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "per_page":"100" } ) posted_flags.extend([ (subm_grd['posted_at'] is not None) for subm_grd in resp.json() if subm_grd['user_id'] in real_stu_ids]) return not all(posted_flags) def post_grade(course, assignment, student, score): '''Takes a course object, an assignment name, student id, and score to upload. Posts to Canvas. Example: course.post_grades(dsci100, 'worksheet_01', '23423', 10)''' assignment_id = get_assignment_id(course, assignment) url_post_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions", student) api_url = urllib.parse.urljoin(course['hostname'], url_post_path) token = course['token'] resp = requests.put( url = urllib.parse.urljoin(api_url, student), headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "submission": {"posted_grade": score} }, )
def get_assignment_lock_date(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned. Example: course.get_assignment_due_date('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'lock_at']].query('name == @assignment') lock_date = assignments['lock_at'].to_numpy()[0] if lock_date is None: return lock_date lock_date = lock_date.replace("T", "-") lock_date = lock_date.replace(":", "-") return lock_date[:16]
59
71
import requests import urllib.parse import posixpath import pandas as pd def get_enrollment_dates(course): '''Takes a course object and returns student dates of enrollment. Useful for handling late registrations and modified deadlines. Example: course.get_enrollment_date()''' url_path = posixpath.join("api", "v1", "courses", course['course_id'], "enrollments") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] resp = None students = [] while resp is None or resp.links['current']['url'] != resp.links['last']['url']: resp = requests.get( url = api_url if resp is None else resp.links['next']['url'], headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "type": ["StudentEnrollment"], "per_page":"100" } ) students.extend(resp.json()) enrollment_dates = {} for st in students: enrollment_dates[str(st['user_id'])] = str(st['created_at']).strip('Z').replace('T','-').replace(':','-')[:16] return enrollment_dates def get_assignments(course): '''Takes a course object and returns a Pandas data frame with all existing assignments and their attributes/data Example: course.get_assignments()''' url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] resp = requests.get( url=api_url, headers={ "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "per_page": "10000" }, ) assignments = resp.json() assign_data = pd.DataFrame.from_dict(assignments) return assign_data def get_assignment_lock_date(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned. Example: course.get_assignment_due_date('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'lock_at']].query('name == @assignment') lock_date = assignments['lock_at'].to_numpy()[0] if lock_date is None: return lock_date lock_date = lock_date.replace("T", "-") lock_date = lock_date.replace(":", "-") return lock_date[:16] def get_assignment_due_date(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned. Example: course.get_assignment_due_date('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'due_at']].query('name == @assignment') due_date = assignments['due_at'].to_numpy()[0] if due_date is None: return due_date due_date = due_date.replace("T", "-") due_date = due_date.replace(":", "-") return due_date[:16] def get_assignment_unlock_date(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned. Example: course.get_assignment_unlock_date('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'unlock_at']].query('name == @assignment') unlock_date = assignments['unlock_at'].to_numpy()[0] if unlock_date is None: return unlock_date unlock_date = unlock_date.replace("T", "-").replace(':', '-') return unlock_date[:16] def get_assignment_id(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the Canvas ID. Example: course.get_assignment_id('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'id']].query('name == @assignment') return assignments['id'].values[0] def get_grades(course, assignment): '''Takes a course object, an assignment name, and get the grades for that assignment from Canvas. Example: course.get_grades(course, 'worksheet_01')''' assignment_id = get_assignment_id(course, assignment) url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] resp = None scores = {} while resp is None or resp.links['current']['url'] != resp.links['last']['url']: resp = requests.get( url = api_url if resp is None else resp.links['next']['url'], headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "per_page":"100" } ) scores.update( {res['user_id'] : res['score'] for res in resp.json()} ) return scores def grades_need_posting(course, assignment): '''Takes a course object, an assignment name, and get the grades for that assignment from Canvas. Example: course.get_grades(course, 'worksheet_01')''' assignment_id = get_assignment_id(course, assignment) url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] #get enrollments to avoid the test student's submissions real_stu_ids = list(get_enrollment_dates(course).keys()) resp = None posted_flags = [] while resp is None or resp.links['current']['url'] != resp.links['last']['url']: resp = requests.get( url = api_url if resp is None else resp.links['next']['url'], headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "per_page":"100" } ) posted_flags.extend([ (subm_grd['posted_at'] is not None) for subm_grd in resp.json() if subm_grd['user_id'] in real_stu_ids]) return not all(posted_flags) def post_grade(course, assignment, student, score): '''Takes a course object, an assignment name, student id, and score to upload. Posts to Canvas. Example: course.post_grades(dsci100, 'worksheet_01', '23423', 10)''' assignment_id = get_assignment_id(course, assignment) url_post_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions", student) api_url = urllib.parse.urljoin(course['hostname'], url_post_path) token = course['token'] resp = requests.put( url = urllib.parse.urljoin(api_url, student), headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "submission": {"posted_grade": score} }, )
get_assignment_due_date
Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned. Example: course.get_assignment_due_date('worksheet_01')
import requests import urllib.parse import posixpath import pandas as pd def get_enrollment_dates(course): '''Takes a course object and returns student dates of enrollment. Useful for handling late registrations and modified deadlines. Example: course.get_enrollment_date()''' url_path = posixpath.join("api", "v1", "courses", course['course_id'], "enrollments") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] resp = None students = [] while resp is None or resp.links['current']['url'] != resp.links['last']['url']: resp = requests.get( url = api_url if resp is None else resp.links['next']['url'], headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "type": ["StudentEnrollment"], "per_page":"100" } ) students.extend(resp.json()) enrollment_dates = {} for st in students: enrollment_dates[str(st['user_id'])] = str(st['created_at']).strip('Z').replace('T','-').replace(':','-')[:16] return enrollment_dates def get_assignments(course): '''Takes a course object and returns a Pandas data frame with all existing assignments and their attributes/data Example: course.get_assignments()''' url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] resp = requests.get( url=api_url, headers={ "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "per_page": "10000" }, ) assignments = resp.json() assign_data = pd.DataFrame.from_dict(assignments) return assign_data def get_assignment_lock_date(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned. Example: course.get_assignment_due_date('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'lock_at']].query('name == @assignment') lock_date = assignments['lock_at'].to_numpy()[0] if lock_date is None: return lock_date lock_date = lock_date.replace("T", "-") lock_date = lock_date.replace(":", "-") return lock_date[:16] # MASKED: get_assignment_due_date function (lines 75-87) def get_assignment_unlock_date(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned. Example: course.get_assignment_unlock_date('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'unlock_at']].query('name == @assignment') unlock_date = assignments['unlock_at'].to_numpy()[0] if unlock_date is None: return unlock_date unlock_date = unlock_date.replace("T", "-").replace(':', '-') return unlock_date[:16] def get_assignment_id(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the Canvas ID. Example: course.get_assignment_id('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'id']].query('name == @assignment') return assignments['id'].values[0] def get_grades(course, assignment): '''Takes a course object, an assignment name, and get the grades for that assignment from Canvas. Example: course.get_grades(course, 'worksheet_01')''' assignment_id = get_assignment_id(course, assignment) url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] resp = None scores = {} while resp is None or resp.links['current']['url'] != resp.links['last']['url']: resp = requests.get( url = api_url if resp is None else resp.links['next']['url'], headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "per_page":"100" } ) scores.update( {res['user_id'] : res['score'] for res in resp.json()} ) return scores def grades_need_posting(course, assignment): '''Takes a course object, an assignment name, and get the grades for that assignment from Canvas. Example: course.get_grades(course, 'worksheet_01')''' assignment_id = get_assignment_id(course, assignment) url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] #get enrollments to avoid the test student's submissions real_stu_ids = list(get_enrollment_dates(course).keys()) resp = None posted_flags = [] while resp is None or resp.links['current']['url'] != resp.links['last']['url']: resp = requests.get( url = api_url if resp is None else resp.links['next']['url'], headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "per_page":"100" } ) posted_flags.extend([ (subm_grd['posted_at'] is not None) for subm_grd in resp.json() if subm_grd['user_id'] in real_stu_ids]) return not all(posted_flags) def post_grade(course, assignment, student, score): '''Takes a course object, an assignment name, student id, and score to upload. Posts to Canvas. Example: course.post_grades(dsci100, 'worksheet_01', '23423', 10)''' assignment_id = get_assignment_id(course, assignment) url_post_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions", student) api_url = urllib.parse.urljoin(course['hostname'], url_post_path) token = course['token'] resp = requests.put( url = urllib.parse.urljoin(api_url, student), headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "submission": {"posted_grade": score} }, )
def get_assignment_due_date(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned. Example: course.get_assignment_due_date('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'due_at']].query('name == @assignment') due_date = assignments['due_at'].to_numpy()[0] if due_date is None: return due_date due_date = due_date.replace("T", "-") due_date = due_date.replace(":", "-") return due_date[:16]
75
87
import requests import urllib.parse import posixpath import pandas as pd def get_enrollment_dates(course): '''Takes a course object and returns student dates of enrollment. Useful for handling late registrations and modified deadlines. Example: course.get_enrollment_date()''' url_path = posixpath.join("api", "v1", "courses", course['course_id'], "enrollments") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] resp = None students = [] while resp is None or resp.links['current']['url'] != resp.links['last']['url']: resp = requests.get( url = api_url if resp is None else resp.links['next']['url'], headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "type": ["StudentEnrollment"], "per_page":"100" } ) students.extend(resp.json()) enrollment_dates = {} for st in students: enrollment_dates[str(st['user_id'])] = str(st['created_at']).strip('Z').replace('T','-').replace(':','-')[:16] return enrollment_dates def get_assignments(course): '''Takes a course object and returns a Pandas data frame with all existing assignments and their attributes/data Example: course.get_assignments()''' url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] resp = requests.get( url=api_url, headers={ "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "per_page": "10000" }, ) assignments = resp.json() assign_data = pd.DataFrame.from_dict(assignments) return assign_data def get_assignment_lock_date(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned. Example: course.get_assignment_due_date('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'lock_at']].query('name == @assignment') lock_date = assignments['lock_at'].to_numpy()[0] if lock_date is None: return lock_date lock_date = lock_date.replace("T", "-") lock_date = lock_date.replace(":", "-") return lock_date[:16] def get_assignment_due_date(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned. Example: course.get_assignment_due_date('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'due_at']].query('name == @assignment') due_date = assignments['due_at'].to_numpy()[0] if due_date is None: return due_date due_date = due_date.replace("T", "-") due_date = due_date.replace(":", "-") return due_date[:16] def get_assignment_unlock_date(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned. Example: course.get_assignment_unlock_date('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'unlock_at']].query('name == @assignment') unlock_date = assignments['unlock_at'].to_numpy()[0] if unlock_date is None: return unlock_date unlock_date = unlock_date.replace("T", "-").replace(':', '-') return unlock_date[:16] def get_assignment_id(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the Canvas ID. Example: course.get_assignment_id('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'id']].query('name == @assignment') return assignments['id'].values[0] def get_grades(course, assignment): '''Takes a course object, an assignment name, and get the grades for that assignment from Canvas. Example: course.get_grades(course, 'worksheet_01')''' assignment_id = get_assignment_id(course, assignment) url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] resp = None scores = {} while resp is None or resp.links['current']['url'] != resp.links['last']['url']: resp = requests.get( url = api_url if resp is None else resp.links['next']['url'], headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "per_page":"100" } ) scores.update( {res['user_id'] : res['score'] for res in resp.json()} ) return scores def grades_need_posting(course, assignment): '''Takes a course object, an assignment name, and get the grades for that assignment from Canvas. Example: course.get_grades(course, 'worksheet_01')''' assignment_id = get_assignment_id(course, assignment) url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] #get enrollments to avoid the test student's submissions real_stu_ids = list(get_enrollment_dates(course).keys()) resp = None posted_flags = [] while resp is None or resp.links['current']['url'] != resp.links['last']['url']: resp = requests.get( url = api_url if resp is None else resp.links['next']['url'], headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "per_page":"100" } ) posted_flags.extend([ (subm_grd['posted_at'] is not None) for subm_grd in resp.json() if subm_grd['user_id'] in real_stu_ids]) return not all(posted_flags) def post_grade(course, assignment, student, score): '''Takes a course object, an assignment name, student id, and score to upload. Posts to Canvas. Example: course.post_grades(dsci100, 'worksheet_01', '23423', 10)''' assignment_id = get_assignment_id(course, assignment) url_post_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions", student) api_url = urllib.parse.urljoin(course['hostname'], url_post_path) token = course['token'] resp = requests.put( url = urllib.parse.urljoin(api_url, student), headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "submission": {"posted_grade": score} }, )
get_assignment_unlock_date
Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned. Example: course.get_assignment_unlock_date('worksheet_01')
import requests import urllib.parse import posixpath import pandas as pd def get_enrollment_dates(course): '''Takes a course object and returns student dates of enrollment. Useful for handling late registrations and modified deadlines. Example: course.get_enrollment_date()''' url_path = posixpath.join("api", "v1", "courses", course['course_id'], "enrollments") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] resp = None students = [] while resp is None or resp.links['current']['url'] != resp.links['last']['url']: resp = requests.get( url = api_url if resp is None else resp.links['next']['url'], headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "type": ["StudentEnrollment"], "per_page":"100" } ) students.extend(resp.json()) enrollment_dates = {} for st in students: enrollment_dates[str(st['user_id'])] = str(st['created_at']).strip('Z').replace('T','-').replace(':','-')[:16] return enrollment_dates def get_assignments(course): '''Takes a course object and returns a Pandas data frame with all existing assignments and their attributes/data Example: course.get_assignments()''' url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] resp = requests.get( url=api_url, headers={ "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "per_page": "10000" }, ) assignments = resp.json() assign_data = pd.DataFrame.from_dict(assignments) return assign_data def get_assignment_lock_date(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned. Example: course.get_assignment_due_date('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'lock_at']].query('name == @assignment') lock_date = assignments['lock_at'].to_numpy()[0] if lock_date is None: return lock_date lock_date = lock_date.replace("T", "-") lock_date = lock_date.replace(":", "-") return lock_date[:16] def get_assignment_due_date(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned. Example: course.get_assignment_due_date('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'due_at']].query('name == @assignment') due_date = assignments['due_at'].to_numpy()[0] if due_date is None: return due_date due_date = due_date.replace("T", "-") due_date = due_date.replace(":", "-") return due_date[:16] # MASKED: get_assignment_unlock_date function (lines 89-100) def get_assignment_id(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the Canvas ID. Example: course.get_assignment_id('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'id']].query('name == @assignment') return assignments['id'].values[0] def get_grades(course, assignment): '''Takes a course object, an assignment name, and get the grades for that assignment from Canvas. Example: course.get_grades(course, 'worksheet_01')''' assignment_id = get_assignment_id(course, assignment) url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] resp = None scores = {} while resp is None or resp.links['current']['url'] != resp.links['last']['url']: resp = requests.get( url = api_url if resp is None else resp.links['next']['url'], headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "per_page":"100" } ) scores.update( {res['user_id'] : res['score'] for res in resp.json()} ) return scores def grades_need_posting(course, assignment): '''Takes a course object, an assignment name, and get the grades for that assignment from Canvas. Example: course.get_grades(course, 'worksheet_01')''' assignment_id = get_assignment_id(course, assignment) url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] #get enrollments to avoid the test student's submissions real_stu_ids = list(get_enrollment_dates(course).keys()) resp = None posted_flags = [] while resp is None or resp.links['current']['url'] != resp.links['last']['url']: resp = requests.get( url = api_url if resp is None else resp.links['next']['url'], headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "per_page":"100" } ) posted_flags.extend([ (subm_grd['posted_at'] is not None) for subm_grd in resp.json() if subm_grd['user_id'] in real_stu_ids]) return not all(posted_flags) def post_grade(course, assignment, student, score): '''Takes a course object, an assignment name, student id, and score to upload. Posts to Canvas. Example: course.post_grades(dsci100, 'worksheet_01', '23423', 10)''' assignment_id = get_assignment_id(course, assignment) url_post_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions", student) api_url = urllib.parse.urljoin(course['hostname'], url_post_path) token = course['token'] resp = requests.put( url = urllib.parse.urljoin(api_url, student), headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "submission": {"posted_grade": score} }, )
def get_assignment_unlock_date(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned. Example: course.get_assignment_unlock_date('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'unlock_at']].query('name == @assignment') unlock_date = assignments['unlock_at'].to_numpy()[0] if unlock_date is None: return unlock_date unlock_date = unlock_date.replace("T", "-").replace(':', '-') return unlock_date[:16]
89
100
import requests import urllib.parse import posixpath import pandas as pd def get_enrollment_dates(course): '''Takes a course object and returns student dates of enrollment. Useful for handling late registrations and modified deadlines. Example: course.get_enrollment_date()''' url_path = posixpath.join("api", "v1", "courses", course['course_id'], "enrollments") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] resp = None students = [] while resp is None or resp.links['current']['url'] != resp.links['last']['url']: resp = requests.get( url = api_url if resp is None else resp.links['next']['url'], headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "type": ["StudentEnrollment"], "per_page":"100" } ) students.extend(resp.json()) enrollment_dates = {} for st in students: enrollment_dates[str(st['user_id'])] = str(st['created_at']).strip('Z').replace('T','-').replace(':','-')[:16] return enrollment_dates def get_assignments(course): '''Takes a course object and returns a Pandas data frame with all existing assignments and their attributes/data Example: course.get_assignments()''' url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] resp = requests.get( url=api_url, headers={ "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "per_page": "10000" }, ) assignments = resp.json() assign_data = pd.DataFrame.from_dict(assignments) return assign_data def get_assignment_lock_date(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned. Example: course.get_assignment_due_date('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'lock_at']].query('name == @assignment') lock_date = assignments['lock_at'].to_numpy()[0] if lock_date is None: return lock_date lock_date = lock_date.replace("T", "-") lock_date = lock_date.replace(":", "-") return lock_date[:16] def get_assignment_due_date(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned. Example: course.get_assignment_due_date('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'due_at']].query('name == @assignment') due_date = assignments['due_at'].to_numpy()[0] if due_date is None: return due_date due_date = due_date.replace("T", "-") due_date = due_date.replace(":", "-") return due_date[:16] def get_assignment_unlock_date(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned. Example: course.get_assignment_unlock_date('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'unlock_at']].query('name == @assignment') unlock_date = assignments['unlock_at'].to_numpy()[0] if unlock_date is None: return unlock_date unlock_date = unlock_date.replace("T", "-").replace(':', '-') return unlock_date[:16] def get_assignment_id(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the Canvas ID. Example: course.get_assignment_id('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'id']].query('name == @assignment') return assignments['id'].values[0] def get_grades(course, assignment): '''Takes a course object, an assignment name, and get the grades for that assignment from Canvas. Example: course.get_grades(course, 'worksheet_01')''' assignment_id = get_assignment_id(course, assignment) url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] resp = None scores = {} while resp is None or resp.links['current']['url'] != resp.links['last']['url']: resp = requests.get( url = api_url if resp is None else resp.links['next']['url'], headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "per_page":"100" } ) scores.update( {res['user_id'] : res['score'] for res in resp.json()} ) return scores def grades_need_posting(course, assignment): '''Takes a course object, an assignment name, and get the grades for that assignment from Canvas. Example: course.get_grades(course, 'worksheet_01')''' assignment_id = get_assignment_id(course, assignment) url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] #get enrollments to avoid the test student's submissions real_stu_ids = list(get_enrollment_dates(course).keys()) resp = None posted_flags = [] while resp is None or resp.links['current']['url'] != resp.links['last']['url']: resp = requests.get( url = api_url if resp is None else resp.links['next']['url'], headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "per_page":"100" } ) posted_flags.extend([ (subm_grd['posted_at'] is not None) for subm_grd in resp.json() if subm_grd['user_id'] in real_stu_ids]) return not all(posted_flags) def post_grade(course, assignment, student, score): '''Takes a course object, an assignment name, student id, and score to upload. Posts to Canvas. Example: course.post_grades(dsci100, 'worksheet_01', '23423', 10)''' assignment_id = get_assignment_id(course, assignment) url_post_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions", student) api_url = urllib.parse.urljoin(course['hostname'], url_post_path) token = course['token'] resp = requests.put( url = urllib.parse.urljoin(api_url, student), headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "submission": {"posted_grade": score} }, )
get_assignment_id
Takes a course object and the name of a Canvas assignment and returns the Canvas ID. Example: course.get_assignment_id('worksheet_01')
import requests import urllib.parse import posixpath import pandas as pd def get_enrollment_dates(course): '''Takes a course object and returns student dates of enrollment. Useful for handling late registrations and modified deadlines. Example: course.get_enrollment_date()''' url_path = posixpath.join("api", "v1", "courses", course['course_id'], "enrollments") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] resp = None students = [] while resp is None or resp.links['current']['url'] != resp.links['last']['url']: resp = requests.get( url = api_url if resp is None else resp.links['next']['url'], headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "type": ["StudentEnrollment"], "per_page":"100" } ) students.extend(resp.json()) enrollment_dates = {} for st in students: enrollment_dates[str(st['user_id'])] = str(st['created_at']).strip('Z').replace('T','-').replace(':','-')[:16] return enrollment_dates def get_assignments(course): '''Takes a course object and returns a Pandas data frame with all existing assignments and their attributes/data Example: course.get_assignments()''' url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] resp = requests.get( url=api_url, headers={ "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "per_page": "10000" }, ) assignments = resp.json() assign_data = pd.DataFrame.from_dict(assignments) return assign_data def get_assignment_lock_date(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned. Example: course.get_assignment_due_date('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'lock_at']].query('name == @assignment') lock_date = assignments['lock_at'].to_numpy()[0] if lock_date is None: return lock_date lock_date = lock_date.replace("T", "-") lock_date = lock_date.replace(":", "-") return lock_date[:16] def get_assignment_due_date(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned. Example: course.get_assignment_due_date('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'due_at']].query('name == @assignment') due_date = assignments['due_at'].to_numpy()[0] if due_date is None: return due_date due_date = due_date.replace("T", "-") due_date = due_date.replace(":", "-") return due_date[:16] def get_assignment_unlock_date(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned. Example: course.get_assignment_unlock_date('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'unlock_at']].query('name == @assignment') unlock_date = assignments['unlock_at'].to_numpy()[0] if unlock_date is None: return unlock_date unlock_date = unlock_date.replace("T", "-").replace(':', '-') return unlock_date[:16] # MASKED: get_assignment_id function (lines 103-110) def get_grades(course, assignment): '''Takes a course object, an assignment name, and get the grades for that assignment from Canvas. Example: course.get_grades(course, 'worksheet_01')''' assignment_id = get_assignment_id(course, assignment) url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] resp = None scores = {} while resp is None or resp.links['current']['url'] != resp.links['last']['url']: resp = requests.get( url = api_url if resp is None else resp.links['next']['url'], headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "per_page":"100" } ) scores.update( {res['user_id'] : res['score'] for res in resp.json()} ) return scores def grades_need_posting(course, assignment): '''Takes a course object, an assignment name, and get the grades for that assignment from Canvas. Example: course.get_grades(course, 'worksheet_01')''' assignment_id = get_assignment_id(course, assignment) url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] #get enrollments to avoid the test student's submissions real_stu_ids = list(get_enrollment_dates(course).keys()) resp = None posted_flags = [] while resp is None or resp.links['current']['url'] != resp.links['last']['url']: resp = requests.get( url = api_url if resp is None else resp.links['next']['url'], headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "per_page":"100" } ) posted_flags.extend([ (subm_grd['posted_at'] is not None) for subm_grd in resp.json() if subm_grd['user_id'] in real_stu_ids]) return not all(posted_flags) def post_grade(course, assignment, student, score): '''Takes a course object, an assignment name, student id, and score to upload. Posts to Canvas. Example: course.post_grades(dsci100, 'worksheet_01', '23423', 10)''' assignment_id = get_assignment_id(course, assignment) url_post_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions", student) api_url = urllib.parse.urljoin(course['hostname'], url_post_path) token = course['token'] resp = requests.put( url = urllib.parse.urljoin(api_url, student), headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "submission": {"posted_grade": score} }, )
def get_assignment_id(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the Canvas ID. Example: course.get_assignment_id('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'id']].query('name == @assignment') return assignments['id'].values[0]
103
110
import requests import urllib.parse import posixpath import pandas as pd def get_enrollment_dates(course): '''Takes a course object and returns student dates of enrollment. Useful for handling late registrations and modified deadlines. Example: course.get_enrollment_date()''' url_path = posixpath.join("api", "v1", "courses", course['course_id'], "enrollments") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] resp = None students = [] while resp is None or resp.links['current']['url'] != resp.links['last']['url']: resp = requests.get( url = api_url if resp is None else resp.links['next']['url'], headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "type": ["StudentEnrollment"], "per_page":"100" } ) students.extend(resp.json()) enrollment_dates = {} for st in students: enrollment_dates[str(st['user_id'])] = str(st['created_at']).strip('Z').replace('T','-').replace(':','-')[:16] return enrollment_dates def get_assignments(course): '''Takes a course object and returns a Pandas data frame with all existing assignments and their attributes/data Example: course.get_assignments()''' url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] resp = requests.get( url=api_url, headers={ "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "per_page": "10000" }, ) assignments = resp.json() assign_data = pd.DataFrame.from_dict(assignments) return assign_data def get_assignment_lock_date(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned. Example: course.get_assignment_due_date('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'lock_at']].query('name == @assignment') lock_date = assignments['lock_at'].to_numpy()[0] if lock_date is None: return lock_date lock_date = lock_date.replace("T", "-") lock_date = lock_date.replace(":", "-") return lock_date[:16] def get_assignment_due_date(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned. Example: course.get_assignment_due_date('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'due_at']].query('name == @assignment') due_date = assignments['due_at'].to_numpy()[0] if due_date is None: return due_date due_date = due_date.replace("T", "-") due_date = due_date.replace(":", "-") return due_date[:16] def get_assignment_unlock_date(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned. Example: course.get_assignment_unlock_date('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'unlock_at']].query('name == @assignment') unlock_date = assignments['unlock_at'].to_numpy()[0] if unlock_date is None: return unlock_date unlock_date = unlock_date.replace("T", "-").replace(':', '-') return unlock_date[:16] def get_assignment_id(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the Canvas ID. Example: course.get_assignment_id('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'id']].query('name == @assignment') return assignments['id'].values[0] def get_grades(course, assignment): '''Takes a course object, an assignment name, and get the grades for that assignment from Canvas. Example: course.get_grades(course, 'worksheet_01')''' assignment_id = get_assignment_id(course, assignment) url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] resp = None scores = {} while resp is None or resp.links['current']['url'] != resp.links['last']['url']: resp = requests.get( url = api_url if resp is None else resp.links['next']['url'], headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "per_page":"100" } ) scores.update( {res['user_id'] : res['score'] for res in resp.json()} ) return scores def grades_need_posting(course, assignment): '''Takes a course object, an assignment name, and get the grades for that assignment from Canvas. Example: course.get_grades(course, 'worksheet_01')''' assignment_id = get_assignment_id(course, assignment) url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] #get enrollments to avoid the test student's submissions real_stu_ids = list(get_enrollment_dates(course).keys()) resp = None posted_flags = [] while resp is None or resp.links['current']['url'] != resp.links['last']['url']: resp = requests.get( url = api_url if resp is None else resp.links['next']['url'], headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "per_page":"100" } ) posted_flags.extend([ (subm_grd['posted_at'] is not None) for subm_grd in resp.json() if subm_grd['user_id'] in real_stu_ids]) return not all(posted_flags) def post_grade(course, assignment, student, score): '''Takes a course object, an assignment name, student id, and score to upload. Posts to Canvas. Example: course.post_grades(dsci100, 'worksheet_01', '23423', 10)''' assignment_id = get_assignment_id(course, assignment) url_post_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions", student) api_url = urllib.parse.urljoin(course['hostname'], url_post_path) token = course['token'] resp = requests.put( url = urllib.parse.urljoin(api_url, student), headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "submission": {"posted_grade": score} }, )
post_grade
Takes a course object, an assignment name, student id, and score to upload. Posts to Canvas. Example: course.post_grades(dsci100, 'worksheet_01', '23423', 10)
import requests import urllib.parse import posixpath import pandas as pd def get_enrollment_dates(course): '''Takes a course object and returns student dates of enrollment. Useful for handling late registrations and modified deadlines. Example: course.get_enrollment_date()''' url_path = posixpath.join("api", "v1", "courses", course['course_id'], "enrollments") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] resp = None students = [] while resp is None or resp.links['current']['url'] != resp.links['last']['url']: resp = requests.get( url = api_url if resp is None else resp.links['next']['url'], headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "type": ["StudentEnrollment"], "per_page":"100" } ) students.extend(resp.json()) enrollment_dates = {} for st in students: enrollment_dates[str(st['user_id'])] = str(st['created_at']).strip('Z').replace('T','-').replace(':','-')[:16] return enrollment_dates def get_assignments(course): '''Takes a course object and returns a Pandas data frame with all existing assignments and their attributes/data Example: course.get_assignments()''' url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] resp = requests.get( url=api_url, headers={ "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "per_page": "10000" }, ) assignments = resp.json() assign_data = pd.DataFrame.from_dict(assignments) return assign_data def get_assignment_lock_date(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned. Example: course.get_assignment_due_date('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'lock_at']].query('name == @assignment') lock_date = assignments['lock_at'].to_numpy()[0] if lock_date is None: return lock_date lock_date = lock_date.replace("T", "-") lock_date = lock_date.replace(":", "-") return lock_date[:16] def get_assignment_due_date(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned. Example: course.get_assignment_due_date('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'due_at']].query('name == @assignment') due_date = assignments['due_at'].to_numpy()[0] if due_date is None: return due_date due_date = due_date.replace("T", "-") due_date = due_date.replace(":", "-") return due_date[:16] def get_assignment_unlock_date(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned. Example: course.get_assignment_unlock_date('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'unlock_at']].query('name == @assignment') unlock_date = assignments['unlock_at'].to_numpy()[0] if unlock_date is None: return unlock_date unlock_date = unlock_date.replace("T", "-").replace(':', '-') return unlock_date[:16] def get_assignment_id(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the Canvas ID. Example: course.get_assignment_id('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'id']].query('name == @assignment') return assignments['id'].values[0] def get_grades(course, assignment): '''Takes a course object, an assignment name, and get the grades for that assignment from Canvas. Example: course.get_grades(course, 'worksheet_01')''' assignment_id = get_assignment_id(course, assignment) url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] resp = None scores = {} while resp is None or resp.links['current']['url'] != resp.links['last']['url']: resp = requests.get( url = api_url if resp is None else resp.links['next']['url'], headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "per_page":"100" } ) scores.update( {res['user_id'] : res['score'] for res in resp.json()} ) return scores def grades_need_posting(course, assignment): '''Takes a course object, an assignment name, and get the grades for that assignment from Canvas. Example: course.get_grades(course, 'worksheet_01')''' assignment_id = get_assignment_id(course, assignment) url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] #get enrollments to avoid the test student's submissions real_stu_ids = list(get_enrollment_dates(course).keys()) resp = None posted_flags = [] while resp is None or resp.links['current']['url'] != resp.links['last']['url']: resp = requests.get( url = api_url if resp is None else resp.links['next']['url'], headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "per_page":"100" } ) posted_flags.extend([ (subm_grd['posted_at'] is not None) for subm_grd in resp.json() if subm_grd['user_id'] in real_stu_ids]) return not all(posted_flags) # MASKED: post_grade function (lines 168-186)
def post_grade(course, assignment, student, score): '''Takes a course object, an assignment name, student id, and score to upload. Posts to Canvas. Example: course.post_grades(dsci100, 'worksheet_01', '23423', 10)''' assignment_id = get_assignment_id(course, assignment) url_post_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions", student) api_url = urllib.parse.urljoin(course['hostname'], url_post_path) token = course['token'] resp = requests.put( url = urllib.parse.urljoin(api_url, student), headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "submission": {"posted_grade": score} }, )
168
186
import requests import urllib.parse import posixpath import pandas as pd def get_enrollment_dates(course): '''Takes a course object and returns student dates of enrollment. Useful for handling late registrations and modified deadlines. Example: course.get_enrollment_date()''' url_path = posixpath.join("api", "v1", "courses", course['course_id'], "enrollments") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] resp = None students = [] while resp is None or resp.links['current']['url'] != resp.links['last']['url']: resp = requests.get( url = api_url if resp is None else resp.links['next']['url'], headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "type": ["StudentEnrollment"], "per_page":"100" } ) students.extend(resp.json()) enrollment_dates = {} for st in students: enrollment_dates[str(st['user_id'])] = str(st['created_at']).strip('Z').replace('T','-').replace(':','-')[:16] return enrollment_dates def get_assignments(course): '''Takes a course object and returns a Pandas data frame with all existing assignments and their attributes/data Example: course.get_assignments()''' url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] resp = requests.get( url=api_url, headers={ "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "per_page": "10000" }, ) assignments = resp.json() assign_data = pd.DataFrame.from_dict(assignments) return assign_data def get_assignment_lock_date(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned. Example: course.get_assignment_due_date('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'lock_at']].query('name == @assignment') lock_date = assignments['lock_at'].to_numpy()[0] if lock_date is None: return lock_date lock_date = lock_date.replace("T", "-") lock_date = lock_date.replace(":", "-") return lock_date[:16] def get_assignment_due_date(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned. Example: course.get_assignment_due_date('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'due_at']].query('name == @assignment') due_date = assignments['due_at'].to_numpy()[0] if due_date is None: return due_date due_date = due_date.replace("T", "-") due_date = due_date.replace(":", "-") return due_date[:16] def get_assignment_unlock_date(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned. Example: course.get_assignment_unlock_date('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'unlock_at']].query('name == @assignment') unlock_date = assignments['unlock_at'].to_numpy()[0] if unlock_date is None: return unlock_date unlock_date = unlock_date.replace("T", "-").replace(':', '-') return unlock_date[:16] def get_assignment_id(course, assignment): '''Takes a course object and the name of a Canvas assignment and returns the Canvas ID. Example: course.get_assignment_id('worksheet_01')''' assignments = get_assignments(course) assignments = assignments[['name', 'id']].query('name == @assignment') return assignments['id'].values[0] def get_grades(course, assignment): '''Takes a course object, an assignment name, and get the grades for that assignment from Canvas. Example: course.get_grades(course, 'worksheet_01')''' assignment_id = get_assignment_id(course, assignment) url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] resp = None scores = {} while resp is None or resp.links['current']['url'] != resp.links['last']['url']: resp = requests.get( url = api_url if resp is None else resp.links['next']['url'], headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "per_page":"100" } ) scores.update( {res['user_id'] : res['score'] for res in resp.json()} ) return scores def grades_need_posting(course, assignment): '''Takes a course object, an assignment name, and get the grades for that assignment from Canvas. Example: course.get_grades(course, 'worksheet_01')''' assignment_id = get_assignment_id(course, assignment) url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions") api_url = urllib.parse.urljoin(course['hostname'], url_path) token = course['token'] #get enrollments to avoid the test student's submissions real_stu_ids = list(get_enrollment_dates(course).keys()) resp = None posted_flags = [] while resp is None or resp.links['current']['url'] != resp.links['last']['url']: resp = requests.get( url = api_url if resp is None else resp.links['next']['url'], headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "per_page":"100" } ) posted_flags.extend([ (subm_grd['posted_at'] is not None) for subm_grd in resp.json() if subm_grd['user_id'] in real_stu_ids]) return not all(posted_flags) def post_grade(course, assignment, student, score): '''Takes a course object, an assignment name, student id, and score to upload. Posts to Canvas. Example: course.post_grades(dsci100, 'worksheet_01', '23423', 10)''' assignment_id = get_assignment_id(course, assignment) url_post_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions", student) api_url = urllib.parse.urljoin(course['hostname'], url_post_path) token = course['token'] resp = requests.put( url = urllib.parse.urljoin(api_url, student), headers = { "Authorization": f"Bearer {token}", "Accept": "application/json+canvas-string-ids" }, json={ "submission": {"posted_grade": score} }, )
make_kinetic_precond
Preconditioner P = 1 / (||k|| + ε) Keyword Arguments: kpointset --
from ..coefficient_array import PwCoeffs from scipy.sparse import dia_matrix import numpy as np # MASKED: make_kinetic_precond function (lines 6-43) class Preconditioner: def __init__(self): pass class DiagonalPreconditioner(Preconditioner): """ Apply diagonal preconditioner and project resulting gradient to satisfy the constraint. """ def __init__(self, D, c0): super().__init__() self.c0 = c0 self.D = D def __matmul__(self, other): """ """ from ..coefficient_array import CoefficientArray from .ot_transformations import lagrangeMult out = type(other)(dtype=other.dtype) if isinstance(other, CoefficientArray): for key, Dl in self.D.items(): out[key] = Dl * other[key] else: raise ValueError('wrong type given') ll = lagrangeMult(other, self.c0, self) return out + ll def __mul__(self, s): """ """ from ..coefficient_array import CoefficientArray import numpy as np if np.isscalar(s): for key, Dl in self.D.items(): self.D[key] = s*Dl elif isinstance(s, CoefficientArray): out = type(s)(dtype=s.dtype) for key in s.keys(): out[key] = self.D[key] * s[key] return out __lmul__ = __mul__ __rmul__ = __mul__ def __neg__(self): """ """ from ..coefficient_array import CoefficientArray if isinstance(self.D, CoefficientArray): out_data = type(self.D)(dtype=self.D.dtype, ctype=self.D.ctype) out = DiagonalPreconditioner(out_data, self.c0) for k, v in self.D.items(): out.D[k] = -v return out else: out = DiagonalPreconditioner(self.D, self.c0) out.D = -self.D return out def __getitem__(self, key): return self.D[key] class IdentityPreconditioner(Preconditioner): def __init__(self, c0, _f=1): super().__init__() self.c0 = c0 self._f = _f def __matmul__(self, other): from .ot_transformations import lagrangeMult ll = lagrangeMult(other, self.c0, self) return self._f * other + ll def __mul__(self, s): return self._f * s def __neg__(self): return IdentityPreconditioner(self.c0, _f=-self._f) def __getitem__(self, key): return self._f __lmul__ = __mul__ __rmul__ = __mul__
def make_kinetic_precond(kpointset, c0, eps=0.1, asPwCoeffs=True): """ Preconditioner P = 1 / (||k|| + ε) Keyword Arguments: kpointset -- """ nk = len(kpointset) nc = kpointset.ctx().num_spins() if nc == 1 and nk == 1 and not asPwCoeffs: # return as np.matrix kp = kpointset[0] gkvec = kp.gkvec() assert (gkvec.num_gvec() == gkvec.count()) N = gkvec.count() d = np.array([ 1 / (np.sum((np.array(gkvec.gkvec(i)))**2) + eps) for i in range(N) ]) return DiagonalPreconditioner( D=dia_matrix((d, 0), shape=(N, N)), c0=c0) else: P = PwCoeffs(dtype=np.float64, ctype=dia_matrix) for k in range(nk): kp = kpointset[k] gkvec = kp.gkvec() assert (gkvec.num_gvec() == gkvec.count()) N = gkvec.count() d = np.array([ 1 / (np.sum( (np.array(gkvec.gkvec_cart(i)))**2) + eps) for i in range(N) ]) for ispn in range(nc): P[k, ispn] = dia_matrix((d, 0), shape=(N, N)) return DiagonalPreconditioner(P, c0)
6
43
from ..coefficient_array import PwCoeffs from scipy.sparse import dia_matrix import numpy as np def make_kinetic_precond(kpointset, c0, eps=0.1, asPwCoeffs=True): """ Preconditioner P = 1 / (||k|| + ε) Keyword Arguments: kpointset -- """ nk = len(kpointset) nc = kpointset.ctx().num_spins() if nc == 1 and nk == 1 and not asPwCoeffs: # return as np.matrix kp = kpointset[0] gkvec = kp.gkvec() assert (gkvec.num_gvec() == gkvec.count()) N = gkvec.count() d = np.array([ 1 / (np.sum((np.array(gkvec.gkvec(i)))**2) + eps) for i in range(N) ]) return DiagonalPreconditioner( D=dia_matrix((d, 0), shape=(N, N)), c0=c0) else: P = PwCoeffs(dtype=np.float64, ctype=dia_matrix) for k in range(nk): kp = kpointset[k] gkvec = kp.gkvec() assert (gkvec.num_gvec() == gkvec.count()) N = gkvec.count() d = np.array([ 1 / (np.sum( (np.array(gkvec.gkvec_cart(i)))**2) + eps) for i in range(N) ]) for ispn in range(nc): P[k, ispn] = dia_matrix((d, 0), shape=(N, N)) return DiagonalPreconditioner(P, c0) class Preconditioner: def __init__(self): pass class DiagonalPreconditioner(Preconditioner): """ Apply diagonal preconditioner and project resulting gradient to satisfy the constraint. """ def __init__(self, D, c0): super().__init__() self.c0 = c0 self.D = D def __matmul__(self, other): """ """ from ..coefficient_array import CoefficientArray from .ot_transformations import lagrangeMult out = type(other)(dtype=other.dtype) if isinstance(other, CoefficientArray): for key, Dl in self.D.items(): out[key] = Dl * other[key] else: raise ValueError('wrong type given') ll = lagrangeMult(other, self.c0, self) return out + ll def __mul__(self, s): """ """ from ..coefficient_array import CoefficientArray import numpy as np if np.isscalar(s): for key, Dl in self.D.items(): self.D[key] = s*Dl elif isinstance(s, CoefficientArray): out = type(s)(dtype=s.dtype) for key in s.keys(): out[key] = self.D[key] * s[key] return out __lmul__ = __mul__ __rmul__ = __mul__ def __neg__(self): """ """ from ..coefficient_array import CoefficientArray if isinstance(self.D, CoefficientArray): out_data = type(self.D)(dtype=self.D.dtype, ctype=self.D.ctype) out = DiagonalPreconditioner(out_data, self.c0) for k, v in self.D.items(): out.D[k] = -v return out else: out = DiagonalPreconditioner(self.D, self.c0) out.D = -self.D return out def __getitem__(self, key): return self.D[key] class IdentityPreconditioner(Preconditioner): def __init__(self, c0, _f=1): super().__init__() self.c0 = c0 self._f = _f def __matmul__(self, other): from .ot_transformations import lagrangeMult ll = lagrangeMult(other, self.c0, self) return self._f * other + ll def __mul__(self, s): return self._f * s def __neg__(self): return IdentityPreconditioner(self.c0, _f=-self._f) def __getitem__(self, key): return self._f __lmul__ = __mul__ __rmul__ = __mul__
rbbox2d_to_near_bbox
convert rotated bbox to nearest 'standing' or 'lying' bbox. Args: rbboxes: [N, 5(x, y, xdim, ydim, rad)] rotated bboxes Returns: bboxes: [N, 4(xmin, ymin, xmax, ymax)] bboxes
from pathlib import Path import numba import numpy as np from det3d.core.bbox.geometry import ( points_count_convex_polygon_3d_jit, points_in_convex_polygon_3d_jit, ) try: from spconv.utils import rbbox_intersection, rbbox_iou except: print("Import spconv fail, no support for sparse convolution!") def points_count_rbbox(points, rbbox, z_axis=2, origin=(0.5, 0.5, 0.5)): rbbox_corners = center_to_corner_box3d( rbbox[:, :3], rbbox[:, 3:6], rbbox[:, -1], origin=origin, axis=z_axis ) surfaces = corner_to_surfaces_3d(rbbox_corners) return points_count_convex_polygon_3d_jit(points[:, :3], surfaces) def riou_cc(rbboxes, qrbboxes, standup_thresh=0.0): # less than 50ms when used in second one thread. 10x slower than gpu boxes_corners = center_to_corner_box2d( rbboxes[:, :2], rbboxes[:, 2:4], rbboxes[:, 4] ) boxes_standup = corner_to_standup_nd(boxes_corners) qboxes_corners = center_to_corner_box2d( qrbboxes[:, :2], qrbboxes[:, 2:4], qrbboxes[:, 4] ) qboxes_standup = corner_to_standup_nd(qboxes_corners) # if standup box not overlapped, rbbox not overlapped too. standup_iou = iou_jit(boxes_standup, qboxes_standup, eps=0.0) return rbbox_iou(boxes_corners, qboxes_corners, standup_iou, standup_thresh) def rinter_cc(rbboxes, qrbboxes, standup_thresh=0.0): # less than 50ms when used in second one thread. 10x slower than gpu boxes_corners = center_to_corner_box2d( rbboxes[:, :2], rbboxes[:, 2:4], rbboxes[:, 4] ) boxes_standup = corner_to_standup_nd(boxes_corners) qboxes_corners = center_to_corner_box2d( qrbboxes[:, :2], qrbboxes[:, 2:4], qrbboxes[:, 4] ) qboxes_standup = corner_to_standup_nd(qboxes_corners) # if standup box not overlapped, rbbox not overlapped too. standup_iou = iou_jit(boxes_standup, qboxes_standup, eps=0.0) return rbbox_intersection( boxes_corners, qboxes_corners, standup_iou, standup_thresh ) def corners_nd(dims, origin=0.5): """generate relative box corners based on length per dim and origin point. Args: dims (float array, shape=[N, ndim]): array of length per dim origin (list or array or float): origin point relate to smallest point. Returns: float array, shape=[N, 2 ** ndim, ndim]: returned corners. point layout example: (2d) x0y0, x0y1, x1y0, x1y1; (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1 where x0 < x1, y0 < y1, z0 < z1 """ ndim = int(dims.shape[1]) corners_norm = np.stack( np.unravel_index(np.arange(2 ** ndim), [2] * ndim), axis=1 ).astype(dims.dtype) # now corners_norm has format: (2d) x0y0, x0y1, x1y0, x1y1 # (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1 # so need to convert to a format which is convenient to do other computing. # for 2d boxes, format is clockwise start with minimum point # for 3d boxes, please draw lines by your hand. if ndim == 2: # generate clockwise box corners corners_norm = corners_norm[[0, 1, 3, 2]] elif ndim == 3: corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]] corners_norm = corners_norm - np.array(origin, dtype=dims.dtype) corners = dims.reshape([-1, 1, ndim]) * corners_norm.reshape([1, 2 ** ndim, ndim]) return corners @numba.njit def corners_2d_jit(dims, origin=0.5): ndim = 2 corners_norm = np.array([[0, 0], [0, 1], [1, 1], [1, 0]], dtype=dims.dtype) corners_norm = corners_norm - np.array(origin, dtype=dims.dtype) corners = dims.reshape((-1, 1, ndim)) * corners_norm.reshape((1, 2 ** ndim, ndim)) return corners @numba.njit def corners_3d_jit(dims, origin=0.5): ndim = 3 corners_norm = np.array( [0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1], dtype=dims.dtype, ).reshape((8, 3)) corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]] corners_norm = corners_norm - np.array(origin, dtype=dims.dtype) corners = dims.reshape((-1, 1, ndim)) * corners_norm.reshape((1, 2 ** ndim, ndim)) return corners @numba.njit def corner_to_standup_nd_jit(boxes_corner): num_boxes = boxes_corner.shape[0] ndim = boxes_corner.shape[-1] result = np.zeros((num_boxes, ndim * 2), dtype=boxes_corner.dtype) for i in range(num_boxes): for j in range(ndim): result[i, j] = np.min(boxes_corner[i, :, j]) for j in range(ndim): result[i, j + ndim] = np.max(boxes_corner[i, :, j]) return result def corner_to_standup_nd(boxes_corner): assert len(boxes_corner.shape) == 3 standup_boxes = [] standup_boxes.append(np.min(boxes_corner, axis=1)) standup_boxes.append(np.max(boxes_corner, axis=1)) return np.concatenate(standup_boxes, -1) # MASKED: rbbox2d_to_near_bbox function (lines 131-143) def rotation_3d_in_axis(points, angles, axis=0): # points: [N, point_size, 3] rot_sin = np.sin(angles) rot_cos = np.cos(angles) ones = np.ones_like(rot_cos) zeros = np.zeros_like(rot_cos) if axis == 1: rot_mat_T = np.stack( [ [rot_cos, zeros, -rot_sin], [zeros, ones, zeros], [rot_sin, zeros, rot_cos], ] ) elif axis == 2 or axis == -1: rot_mat_T = np.stack( [ [rot_cos, -rot_sin, zeros], [rot_sin, rot_cos, zeros], [zeros, zeros, ones], ] ) elif axis == 0: rot_mat_T = np.stack( [ [zeros, rot_cos, -rot_sin], [zeros, rot_sin, rot_cos], [ones, zeros, zeros], ] ) else: raise ValueError("axis should in range") return np.einsum("aij,jka->aik", points, rot_mat_T) def rotation_points_single_angle(points, angle, axis=0): # points: [N, 3] rot_sin = np.sin(angle) rot_cos = np.cos(angle) if axis == 1: rot_mat_T = np.array( [[rot_cos, 0, -rot_sin], [0, 1, 0], [rot_sin, 0, rot_cos]], dtype=points.dtype, ) elif axis == 2 or axis == -1: rot_mat_T = np.array( [[rot_cos, -rot_sin, 0], [rot_sin, rot_cos, 0], [0, 0, 1]], dtype=points.dtype, ) elif axis == 0: rot_mat_T = np.array( [[1, 0, 0], [0, rot_cos, -rot_sin], [0, rot_sin, rot_cos]], dtype=points.dtype, ) else: raise ValueError("axis should in range") return points @ rot_mat_T def rotation_2d(points, angles): """rotation 2d points based on origin point clockwise when angle positive. Args: points (float array, shape=[N, point_size, 2]): points to be rotated. angles (float array, shape=[N]): rotation angle. Returns: float array: same shape as points """ rot_sin = np.sin(angles) rot_cos = np.cos(angles) rot_mat_T = np.stack([[rot_cos, -rot_sin], [rot_sin, rot_cos]]) return np.einsum("aij,jka->aik", points, rot_mat_T) def rotation_box(box_corners, angle): """rotation 2d points based on origin point clockwise when angle positive. Args: points (float array, shape=[N, point_size, 2]): points to be rotated. angle (float): rotation angle. Returns: float array: same shape as points """ rot_sin = np.sin(angle) rot_cos = np.cos(angle) rot_mat_T = np.array( [[rot_cos, -rot_sin], [rot_sin, rot_cos]], dtype=box_corners.dtype ) return box_corners @ rot_mat_T def center_to_corner_box3d(centers, dims, angles=None, origin=(0.5, 0.5, 0.5), axis=2): """convert kitti locations, dimensions and angles to corners Args: centers (float array, shape=[N, 3]): locations in kitti label file. dims (float array, shape=[N, 3]): dimensions in kitti label file. angles (float array, shape=[N]): rotation_y in kitti label file. origin (list or array or float): origin point relate to smallest point. use [0.5, 1.0, 0.5] in camera and [0.5, 0.5, 0] in lidar. axis (int): rotation axis. 1 for camera and 2 for lidar. Returns: [type]: [description] """ # 'length' in kitti format is in x axis. # yzx(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar) # center in kitti format is [0.5, 1.0, 0.5] in xyz. corners = corners_nd(dims, origin=origin) # corners: [N, 8, 3] if angles is not None: corners = rotation_3d_in_axis(corners, angles, axis=axis) corners += centers.reshape([-1, 1, 3]) return corners def center_to_corner_box2d(centers, dims, angles=None, origin=0.5): """convert kitti locations, dimensions and angles to corners. format: center(xy), dims(xy), angles(clockwise when positive) Args: centers (float array, shape=[N, 2]): locations in kitti label file. dims (float array, shape=[N, 2]): dimensions in kitti label file. angles (float array, shape=[N]): rotation_y in kitti label file. Returns: [type]: [description] """ # 'length' in kitti format is in x axis. # xyz(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar) # center in kitti format is [0.5, 1.0, 0.5] in xyz. corners = corners_nd(dims, origin=origin) # corners: [N, 4, 2] if angles is not None: corners = rotation_2d(corners, angles) corners += centers.reshape([-1, 1, 2]) return corners @numba.jit(nopython=True) def box2d_to_corner_jit(boxes): num_box = boxes.shape[0] corners_norm = np.zeros((4, 2), dtype=boxes.dtype) corners_norm[1, 1] = 1.0 corners_norm[2] = 1.0 corners_norm[3, 0] = 1.0 corners_norm -= np.array([0.5, 0.5], dtype=boxes.dtype) corners = boxes.reshape(num_box, 1, 5)[:, :, 2:4] * corners_norm.reshape(1, 4, 2) rot_mat_T = np.zeros((2, 2), dtype=boxes.dtype) box_corners = np.zeros((num_box, 4, 2), dtype=boxes.dtype) for i in range(num_box): rot_sin = np.sin(boxes[i, -1]) rot_cos = np.cos(boxes[i, -1]) rot_mat_T[0, 0] = rot_cos rot_mat_T[0, 1] = -rot_sin rot_mat_T[1, 0] = rot_sin rot_mat_T[1, 1] = rot_cos box_corners[i] = corners[i] @ rot_mat_T + boxes[i, :2] return box_corners def rbbox3d_to_corners(rbboxes, origin=[0.5, 0.5, 0.5], axis=2): return center_to_corner_box3d( rbboxes[..., :3], rbboxes[..., 3:6], rbboxes[..., 6], origin, axis=axis ) def rbbox3d_to_bev_corners(rbboxes, origin=0.5): return center_to_corner_box2d( rbboxes[..., :2], rbboxes[..., 3:5], rbboxes[..., 6], origin ) def minmax_to_corner_2d(minmax_box): ndim = minmax_box.shape[-1] // 2 center = minmax_box[..., :ndim] dims = minmax_box[..., ndim:] - center return center_to_corner_box2d(center, dims, origin=0.0) def minmax_to_corner_2d_v2(minmax_box): # N, 4 -> N 4 2 return minmax_box[..., [0, 1, 0, 3, 2, 3, 2, 1]].reshape(-1, 4, 2) def minmax_to_corner_3d(minmax_box): ndim = minmax_box.shape[-1] // 2 center = minmax_box[..., :ndim] dims = minmax_box[..., ndim:] - center return center_to_corner_box3d(center, dims, origin=0.0) def minmax_to_center_2d(minmax_box): ndim = minmax_box.shape[-1] // 2 center_min = minmax_box[..., :ndim] dims = minmax_box[..., ndim:] - center_min center = center_min + 0.5 * dims return np.concatenate([center, dims], axis=-1) def center_to_minmax_2d_0_5(centers, dims): return np.concatenate([centers - dims / 2, centers + dims / 2], axis=-1) def center_to_minmax_2d(centers, dims, origin=0.5): if origin == 0.5: return center_to_minmax_2d_0_5(centers, dims) corners = center_to_corner_box2d(centers, dims, origin=origin) return corners[:, [0, 2]].reshape([-1, 4]) def limit_period(val, offset=0.5, period=np.pi): return val - np.floor(val / period + offset) * period def projection_matrix_to_CRT_kitti(proj): # P = C @ [R|T] # C is upper triangular matrix, so we need to inverse CR and use QR # stable for all kitti camera projection matrix CR = proj[0:3, 0:3] CT = proj[0:3, 3] RinvCinv = np.linalg.inv(CR) Rinv, Cinv = np.linalg.qr(RinvCinv) C = np.linalg.inv(Cinv) R = np.linalg.inv(Rinv) T = Cinv @ CT return C, R, T def get_frustum(bbox_image, C, near_clip=0.001, far_clip=100): fku = C[0, 0] fkv = -C[1, 1] u0v0 = C[0:2, 2] z_points = np.array([near_clip] * 4 + [far_clip] * 4, dtype=C.dtype)[:, np.newaxis] b = bbox_image box_corners = np.array( [[b[0], b[1]], [b[0], b[3]], [b[2], b[3]], [b[2], b[1]]], dtype=C.dtype ) near_box_corners = (box_corners - u0v0) / np.array( [fku / near_clip, -fkv / near_clip], dtype=C.dtype ) far_box_corners = (box_corners - u0v0) / np.array( [fku / far_clip, -fkv / far_clip], dtype=C.dtype ) ret_xy = np.concatenate([near_box_corners, far_box_corners], axis=0) # [8, 2] ret_xyz = np.concatenate([ret_xy, z_points], axis=1) return ret_xyz def get_frustum_v2(bboxes, C, near_clip=0.001, far_clip=100): fku = C[0, 0] fkv = -C[1, 1] u0v0 = C[0:2, 2] num_box = bboxes.shape[0] z_points = np.array([near_clip] * 4 + [far_clip] * 4, dtype=C.dtype)[ np.newaxis, :, np.newaxis ] z_points = np.tile(z_points, [num_box, 1, 1]) box_corners = minmax_to_corner_2d_v2(bboxes) near_box_corners = (box_corners - u0v0) / np.array( [fku / near_clip, -fkv / near_clip], dtype=C.dtype ) far_box_corners = (box_corners - u0v0) / np.array( [fku / far_clip, -fkv / far_clip], dtype=C.dtype ) ret_xy = np.concatenate([near_box_corners, far_box_corners], axis=1) # [8, 2] ret_xyz = np.concatenate([ret_xy, z_points], axis=-1) return ret_xyz @numba.njit def _add_rgb_to_points_kernel(points_2d, image, points_rgb): num_points = points_2d.shape[0] image_h, image_w = image.shape[:2] for i in range(num_points): img_pos = np.floor(points_2d[i]).astype(np.int32) if img_pos[0] >= 0 and img_pos[0] < image_w: if img_pos[1] >= 0 and img_pos[1] < image_h: points_rgb[i, :] = image[img_pos[1], img_pos[0], :] # image[img_pos[1], img_pos[0]] = 0 def add_rgb_to_points(points, image, rect, Trv2c, P2, mean_size=[5, 5]): kernel = np.ones(mean_size, np.float32) / np.prod(mean_size) # image = cv2.filter2D(image, -1, kernel) points_cam = lidar_to_camera(points[:, :3], rect, Trv2c) points_2d = project_to_image(points_cam, P2) points_rgb = np.zeros([points_cam.shape[0], 3], dtype=points.dtype) _add_rgb_to_points_kernel(points_2d, image, points_rgb) return points_rgb def project_to_image(points_3d, proj_mat): points_shape = list(points_3d.shape) points_shape[-1] = 1 points_4 = np.concatenate([points_3d, np.ones(points_shape)], axis=-1) point_2d = points_4 @ proj_mat.T point_2d_res = point_2d[..., :2] / point_2d[..., 2:3] return point_2d_res def camera_to_lidar(points, r_rect, velo2cam): points_shape = list(points.shape[0:-1]) if points.shape[-1] == 3: points = np.concatenate([points, np.ones(points_shape + [1])], axis=-1) lidar_points = points @ np.linalg.inv((r_rect @ velo2cam).T) return lidar_points[..., :3] def lidar_to_camera(points, r_rect, velo2cam): points_shape = list(points.shape[:-1]) if points.shape[-1] == 3: points = np.concatenate([points, np.ones(points_shape + [1])], axis=-1) camera_points = points @ (r_rect @ velo2cam).T return camera_points[..., :3] def box_camera_to_lidar(data, r_rect, velo2cam): xyz = data[:, 0:3] l, h, w = data[:, 3:4], data[:, 4:5], data[:, 5:6] r = data[:, 6:7] xyz_lidar = camera_to_lidar(xyz, r_rect, velo2cam) return np.concatenate([xyz_lidar, w, l, h, r], axis=1) def box_lidar_to_camera(data, r_rect, velo2cam): xyz_lidar = data[:, 0:3] w, l, h = data[:, 3:4], data[:, 4:5], data[:, 5:6] r = data[:, 6:7] xyz = lidar_to_camera(xyz_lidar, r_rect, velo2cam) return np.concatenate([xyz, l, h, w, r], axis=1) def remove_outside_points(points, rect, Trv2c, P2, image_shape): # 5x faster than remove_outside_points_v1(2ms vs 10ms) C, R, T = projection_matrix_to_CRT_kitti(P2) image_bbox = [0, 0, image_shape[1], image_shape[0]] frustum = get_frustum(image_bbox, C) frustum -= T frustum = np.linalg.inv(R) @ frustum.T frustum = camera_to_lidar(frustum.T, rect, Trv2c) frustum_surfaces = corner_to_surfaces_3d_jit(frustum[np.newaxis, ...]) indices = points_in_convex_polygon_3d_jit(points[:, :3], frustum_surfaces) points = points[indices.reshape([-1])] return points @numba.jit(nopython=True) def iou_jit(boxes, query_boxes, eps=1.0): """calculate box iou. note that jit version runs 2x faster than cython in my machine! Parameters ---------- boxes: (N, 4) ndarray of float query_boxes: (K, 4) ndarray of float Returns ------- overlaps: (N, K) ndarray of overlap between boxes and query_boxes """ N = boxes.shape[0] K = query_boxes.shape[0] overlaps = np.zeros((N, K), dtype=boxes.dtype) for k in range(K): box_area = (query_boxes[k, 2] - query_boxes[k, 0] + eps) * ( query_boxes[k, 3] - query_boxes[k, 1] + eps ) for n in range(N): iw = ( min(boxes[n, 2], query_boxes[k, 2]) - max(boxes[n, 0], query_boxes[k, 0]) + eps ) if iw > 0: ih = ( min(boxes[n, 3], query_boxes[k, 3]) - max(boxes[n, 1], query_boxes[k, 1]) + eps ) if ih > 0: ua = ( (boxes[n, 2] - boxes[n, 0] + eps) * (boxes[n, 3] - boxes[n, 1] + eps) + box_area - iw * ih ) overlaps[n, k] = iw * ih / ua return overlaps @numba.jit(nopython=True) def iou_3d_jit(boxes, query_boxes, add1=True): """calculate box iou3d, ---------- boxes: (N, 6) ndarray of float query_boxes: (K, 6) ndarray of float Returns ------- overlaps: (N, K) ndarray of overlap between boxes and query_boxes """ N = boxes.shape[0] K = query_boxes.shape[0] overlaps = np.zeros((N, K), dtype=boxes.dtype) if add1: add1 = 1.0 else: add1 = 0.0 for k in range(K): box_area = ( (query_boxes[k, 3] - query_boxes[k, 0] + add1) * (query_boxes[k, 4] - query_boxes[k, 1] + add1) * (query_boxes[k, 5] - query_boxes[k, 2] + add1) ) for n in range(N): iw = ( min(boxes[n, 3], query_boxes[k, 3]) - max(boxes[n, 0], query_boxes[k, 0]) + add1 ) if iw > 0: ih = ( min(boxes[n, 4], query_boxes[k, 4]) - max(boxes[n, 1], query_boxes[k, 1]) + add1 ) if ih > 0: il = ( min(boxes[n, 5], query_boxes[k, 5]) - max(boxes[n, 2], query_boxes[k, 2]) + add1 ) if il > 0: ua = float( (boxes[n, 3] - boxes[n, 0] + add1) * (boxes[n, 4] - boxes[n, 1] + add1) * (boxes[n, 5] - boxes[n, 2] + add1) + box_area - iw * ih * il ) overlaps[n, k] = iw * ih * il / ua return overlaps @numba.jit(nopython=True) def iou_nd_jit(boxes, query_boxes, add1=True): """calculate box iou nd, 2x slower than iou_jit. ---------- boxes: (N, ndim * 2) ndarray of float query_boxes: (K, ndim * 2) ndarray of float Returns ------- overlaps: (N, K) ndarray of overlap between boxes and query_boxes """ N = boxes.shape[0] K = query_boxes.shape[0] ndim = boxes.shape[1] // 2 overlaps = np.zeros((N, K), dtype=boxes.dtype) side_lengths = np.zeros((ndim,), dtype=boxes.dtype) if add1: add1 = 1.0 else: add1 = 0.0 invalid = False for k in range(K): qbox_area = query_boxes[k, ndim] - query_boxes[k, 0] + add1 for i in range(1, ndim): qbox_area *= query_boxes[k, ndim + i] - query_boxes[k, i] + add1 for n in range(N): invalid = False for i in range(ndim): side_length = ( min(boxes[n, i + ndim], query_boxes[k, i + ndim]) - max(boxes[n, i], query_boxes[k, i]) + add1 ) if side_length <= 0: invalid = True break side_lengths[i] = side_length if not invalid: box_area = boxes[n, ndim] - boxes[n, 0] + add1 for i in range(1, ndim): box_area *= boxes[n, ndim + i] - boxes[n, i] + add1 inter = side_lengths[0] for i in range(1, ndim): inter *= side_lengths[i] # inter = np.prod(side_lengths) ua = float(box_area + qbox_area - inter) overlaps[n, k] = inter / ua return overlaps def points_in_rbbox(points, rbbox, z_axis=2, origin=(0.5, 0.5, 0.5)): rbbox_corners = center_to_corner_box3d( rbbox[:, :3], rbbox[:, 3:6], rbbox[:, -1], origin=origin, axis=z_axis ) surfaces = corner_to_surfaces_3d(rbbox_corners) indices = points_in_convex_polygon_3d_jit(points[:, :3], surfaces) return indices def corner_to_surfaces_3d(corners): """convert 3d box corners from corner function above to surfaces that normal vectors all direct to internal. Args: corners (float array, [N, 8, 3]): 3d box corners. Returns: surfaces (float array, [N, 6, 4, 3]): """ # box_corners: [N, 8, 3], must from corner functions in this module surfaces = np.array( [ [corners[:, 0], corners[:, 1], corners[:, 2], corners[:, 3]], [corners[:, 7], corners[:, 6], corners[:, 5], corners[:, 4]], [corners[:, 0], corners[:, 3], corners[:, 7], corners[:, 4]], [corners[:, 1], corners[:, 5], corners[:, 6], corners[:, 2]], [corners[:, 0], corners[:, 4], corners[:, 5], corners[:, 1]], [corners[:, 3], corners[:, 2], corners[:, 6], corners[:, 7]], ] ).transpose([2, 0, 1, 3]) return surfaces @numba.jit(nopython=True) def corner_to_surfaces_3d_jit(corners): """convert 3d box corners from corner function above to surfaces that normal vectors all direct to internal. Args: corners (float array, [N, 8, 3]): 3d box corners. Returns: surfaces (float array, [N, 6, 4, 3]): """ # box_corners: [N, 8, 3], must from corner functions in this module num_boxes = corners.shape[0] surfaces = np.zeros((num_boxes, 6, 4, 3), dtype=corners.dtype) corner_idxes = np.array( [0, 1, 2, 3, 7, 6, 5, 4, 0, 3, 7, 4, 1, 5, 6, 2, 0, 4, 5, 1, 3, 2, 6, 7] ).reshape(6, 4) for i in range(num_boxes): for j in range(6): for k in range(4): surfaces[i, j, k] = corners[i, corner_idxes[j, k]] return surfaces def assign_label_to_voxel(gt_boxes, coors, voxel_size, coors_range): """assign a 0/1 label to each voxel based on whether the center of voxel is in gt_box. LIDAR. """ voxel_size = np.array(voxel_size, dtype=gt_boxes.dtype) coors_range = np.array(coors_range, dtype=gt_boxes.dtype) shift = coors_range[:3] voxel_origins = coors[:, ::-1] * voxel_size + shift voxel_centers = voxel_origins + voxel_size * 0.5 gt_box_corners = center_to_corner_box3d( gt_boxes[:, :3] - voxel_size * 0.5, gt_boxes[:, 3:6] + voxel_size, gt_boxes[:, 6], origin=[0.5, 0.5, 0.5], axis=2, ) gt_surfaces = corner_to_surfaces_3d(gt_box_corners) ret = points_in_convex_polygon_3d_jit(voxel_centers, gt_surfaces) return np.any(ret, axis=1).astype(np.int64) def assign_label_to_voxel_v3(gt_boxes, coors, voxel_size, coors_range): """assign a 0/1 label to each voxel based on whether the center of voxel is in gt_box. LIDAR. """ voxel_size = np.array(voxel_size, dtype=gt_boxes.dtype) coors_range = np.array(coors_range, dtype=gt_boxes.dtype) shift = coors_range[:3] voxel_origins = coors[:, ::-1] * voxel_size + shift voxel_maxes = voxel_origins + voxel_size voxel_minmax = np.concatenate([voxel_origins, voxel_maxes], axis=-1) voxel_corners = minmax_to_corner_3d(voxel_minmax) gt_box_corners = center_to_corner_box3d( gt_boxes[:, :3], gt_boxes[:, 3:6], gt_boxes[:, 6], origin=[0.5, 0.5, 0.5], axis=2, ) gt_surfaces = corner_to_surfaces_3d(gt_box_corners) voxel_corners_flat = voxel_corners.reshape([-1, 3]) ret = points_in_convex_polygon_3d_jit(voxel_corners_flat, gt_surfaces) ret = ret.reshape([-1, 8, ret.shape[-1]]) return ret.any(-1).any(-1).astype(np.int64) def image_box_region_area(img_cumsum, bbox): """check a 2d voxel is contained by a box. used to filter empty anchors. Summed-area table algorithm: ==> W ------------------ | | | |------A---------B | | | | | | |----- C---------D Iabcd = ID-IB-IC+IA Args: img_cumsum: [M, H, W](yx) cumsumed image. bbox: [N, 4](xyxy) bounding box, """ N = bbox.shape[0] M = img_cumsum.shape[0] ret = np.zeros([N, M], dtype=img_cumsum.dtype) ID = img_cumsum[:, bbox[:, 3], bbox[:, 2]] IA = img_cumsum[:, bbox[:, 1], bbox[:, 0]] IB = img_cumsum[:, bbox[:, 3], bbox[:, 0]] IC = img_cumsum[:, bbox[:, 1], bbox[:, 2]] ret = ID - IB - IC + IA return ret def get_minimum_bounding_box_bv(points, voxel_size, bound, downsample=8, margin=1.6): x_vsize = voxel_size[0] y_vsize = voxel_size[1] max_x = points[:, 0].max() max_y = points[:, 1].max() min_x = points[:, 0].min() min_y = points[:, 1].min() max_x = np.floor(max_x / (x_vsize * downsample) + 1) * (x_vsize * downsample) max_y = np.floor(max_y / (y_vsize * downsample) + 1) * (y_vsize * downsample) min_x = np.floor(min_x / (x_vsize * downsample)) * (x_vsize * downsample) min_y = np.floor(min_y / (y_vsize * downsample)) * (y_vsize * downsample) max_x = np.minimum(max_x + margin, bound[2]) max_y = np.minimum(max_y + margin, bound[3]) min_x = np.maximum(min_x - margin, bound[0]) min_y = np.maximum(min_y - margin, bound[1]) return np.array([min_x, min_y, max_x, max_y]) def box3d_to_bbox(box3d, rect, Trv2c, P2): box3d_to_cam = box_lidar_to_camera(box3d, rect, Trv2c) box_corners = center_to_corner_box3d( box3d[:, :3], box3d[:, 3:6], box3d[:, 6], [0.5, 1.0, 0.5], axis=1 ) box_corners_in_image = project_to_image(box_corners, P2) # box_corners_in_image: [N, 8, 2] minxy = np.min(box_corners_in_image, axis=1) maxxy = np.max(box_corners_in_image, axis=1) bbox = np.concatenate([minxy, maxxy], axis=1) return bbox def change_box3d_center_(box3d, src, dst): dst = np.array(dst, dtype=box3d.dtype) src = np.array(src, dtype=box3d.dtype) box3d[..., :3] += box3d[..., 3:6] * (dst - src) def encode_parts(relative_shifts): parts = np.zeros((len(relative_shifts),), dtype=np.int32) mask = (relative_shifts[:, 0] >= 0) & (relative_shifts[:, 1] >= 0) parts[mask] = 0 mask = (relative_shifts[:, 0] < 0) & (relative_shifts[:, 1] >= 0) parts[mask] = 1 mask = (relative_shifts[:, 0] < 0) & (relative_shifts[:, 1] < 0) parts[mask] = 2 mask = (relative_shifts[:, 0] >= 0) & (relative_shifts[:, 1] < 0) parts[mask] = 3 return parts
def rbbox2d_to_near_bbox(rbboxes): """convert rotated bbox to nearest 'standing' or 'lying' bbox. Args: rbboxes: [N, 5(x, y, xdim, ydim, rad)] rotated bboxes Returns: bboxes: [N, 4(xmin, ymin, xmax, ymax)] bboxes """ rots = rbboxes[..., -1] rots_0_pi_div_2 = np.abs(limit_period(rots, 0.5, np.pi)) cond = (rots_0_pi_div_2 > np.pi / 4)[..., np.newaxis] bboxes_center = np.where(cond, rbboxes[:, [0, 1, 3, 2]], rbboxes[:, :4]) bboxes = center_to_minmax_2d(bboxes_center[:, :2], bboxes_center[:, 2:]) return bboxes
131
143
from pathlib import Path import numba import numpy as np from det3d.core.bbox.geometry import ( points_count_convex_polygon_3d_jit, points_in_convex_polygon_3d_jit, ) try: from spconv.utils import rbbox_intersection, rbbox_iou except: print("Import spconv fail, no support for sparse convolution!") def points_count_rbbox(points, rbbox, z_axis=2, origin=(0.5, 0.5, 0.5)): rbbox_corners = center_to_corner_box3d( rbbox[:, :3], rbbox[:, 3:6], rbbox[:, -1], origin=origin, axis=z_axis ) surfaces = corner_to_surfaces_3d(rbbox_corners) return points_count_convex_polygon_3d_jit(points[:, :3], surfaces) def riou_cc(rbboxes, qrbboxes, standup_thresh=0.0): # less than 50ms when used in second one thread. 10x slower than gpu boxes_corners = center_to_corner_box2d( rbboxes[:, :2], rbboxes[:, 2:4], rbboxes[:, 4] ) boxes_standup = corner_to_standup_nd(boxes_corners) qboxes_corners = center_to_corner_box2d( qrbboxes[:, :2], qrbboxes[:, 2:4], qrbboxes[:, 4] ) qboxes_standup = corner_to_standup_nd(qboxes_corners) # if standup box not overlapped, rbbox not overlapped too. standup_iou = iou_jit(boxes_standup, qboxes_standup, eps=0.0) return rbbox_iou(boxes_corners, qboxes_corners, standup_iou, standup_thresh) def rinter_cc(rbboxes, qrbboxes, standup_thresh=0.0): # less than 50ms when used in second one thread. 10x slower than gpu boxes_corners = center_to_corner_box2d( rbboxes[:, :2], rbboxes[:, 2:4], rbboxes[:, 4] ) boxes_standup = corner_to_standup_nd(boxes_corners) qboxes_corners = center_to_corner_box2d( qrbboxes[:, :2], qrbboxes[:, 2:4], qrbboxes[:, 4] ) qboxes_standup = corner_to_standup_nd(qboxes_corners) # if standup box not overlapped, rbbox not overlapped too. standup_iou = iou_jit(boxes_standup, qboxes_standup, eps=0.0) return rbbox_intersection( boxes_corners, qboxes_corners, standup_iou, standup_thresh ) def corners_nd(dims, origin=0.5): """generate relative box corners based on length per dim and origin point. Args: dims (float array, shape=[N, ndim]): array of length per dim origin (list or array or float): origin point relate to smallest point. Returns: float array, shape=[N, 2 ** ndim, ndim]: returned corners. point layout example: (2d) x0y0, x0y1, x1y0, x1y1; (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1 where x0 < x1, y0 < y1, z0 < z1 """ ndim = int(dims.shape[1]) corners_norm = np.stack( np.unravel_index(np.arange(2 ** ndim), [2] * ndim), axis=1 ).astype(dims.dtype) # now corners_norm has format: (2d) x0y0, x0y1, x1y0, x1y1 # (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1 # so need to convert to a format which is convenient to do other computing. # for 2d boxes, format is clockwise start with minimum point # for 3d boxes, please draw lines by your hand. if ndim == 2: # generate clockwise box corners corners_norm = corners_norm[[0, 1, 3, 2]] elif ndim == 3: corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]] corners_norm = corners_norm - np.array(origin, dtype=dims.dtype) corners = dims.reshape([-1, 1, ndim]) * corners_norm.reshape([1, 2 ** ndim, ndim]) return corners @numba.njit def corners_2d_jit(dims, origin=0.5): ndim = 2 corners_norm = np.array([[0, 0], [0, 1], [1, 1], [1, 0]], dtype=dims.dtype) corners_norm = corners_norm - np.array(origin, dtype=dims.dtype) corners = dims.reshape((-1, 1, ndim)) * corners_norm.reshape((1, 2 ** ndim, ndim)) return corners @numba.njit def corners_3d_jit(dims, origin=0.5): ndim = 3 corners_norm = np.array( [0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1], dtype=dims.dtype, ).reshape((8, 3)) corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]] corners_norm = corners_norm - np.array(origin, dtype=dims.dtype) corners = dims.reshape((-1, 1, ndim)) * corners_norm.reshape((1, 2 ** ndim, ndim)) return corners @numba.njit def corner_to_standup_nd_jit(boxes_corner): num_boxes = boxes_corner.shape[0] ndim = boxes_corner.shape[-1] result = np.zeros((num_boxes, ndim * 2), dtype=boxes_corner.dtype) for i in range(num_boxes): for j in range(ndim): result[i, j] = np.min(boxes_corner[i, :, j]) for j in range(ndim): result[i, j + ndim] = np.max(boxes_corner[i, :, j]) return result def corner_to_standup_nd(boxes_corner): assert len(boxes_corner.shape) == 3 standup_boxes = [] standup_boxes.append(np.min(boxes_corner, axis=1)) standup_boxes.append(np.max(boxes_corner, axis=1)) return np.concatenate(standup_boxes, -1) def rbbox2d_to_near_bbox(rbboxes): """convert rotated bbox to nearest 'standing' or 'lying' bbox. Args: rbboxes: [N, 5(x, y, xdim, ydim, rad)] rotated bboxes Returns: bboxes: [N, 4(xmin, ymin, xmax, ymax)] bboxes """ rots = rbboxes[..., -1] rots_0_pi_div_2 = np.abs(limit_period(rots, 0.5, np.pi)) cond = (rots_0_pi_div_2 > np.pi / 4)[..., np.newaxis] bboxes_center = np.where(cond, rbboxes[:, [0, 1, 3, 2]], rbboxes[:, :4]) bboxes = center_to_minmax_2d(bboxes_center[:, :2], bboxes_center[:, 2:]) return bboxes def rotation_3d_in_axis(points, angles, axis=0): # points: [N, point_size, 3] rot_sin = np.sin(angles) rot_cos = np.cos(angles) ones = np.ones_like(rot_cos) zeros = np.zeros_like(rot_cos) if axis == 1: rot_mat_T = np.stack( [ [rot_cos, zeros, -rot_sin], [zeros, ones, zeros], [rot_sin, zeros, rot_cos], ] ) elif axis == 2 or axis == -1: rot_mat_T = np.stack( [ [rot_cos, -rot_sin, zeros], [rot_sin, rot_cos, zeros], [zeros, zeros, ones], ] ) elif axis == 0: rot_mat_T = np.stack( [ [zeros, rot_cos, -rot_sin], [zeros, rot_sin, rot_cos], [ones, zeros, zeros], ] ) else: raise ValueError("axis should in range") return np.einsum("aij,jka->aik", points, rot_mat_T) def rotation_points_single_angle(points, angle, axis=0): # points: [N, 3] rot_sin = np.sin(angle) rot_cos = np.cos(angle) if axis == 1: rot_mat_T = np.array( [[rot_cos, 0, -rot_sin], [0, 1, 0], [rot_sin, 0, rot_cos]], dtype=points.dtype, ) elif axis == 2 or axis == -1: rot_mat_T = np.array( [[rot_cos, -rot_sin, 0], [rot_sin, rot_cos, 0], [0, 0, 1]], dtype=points.dtype, ) elif axis == 0: rot_mat_T = np.array( [[1, 0, 0], [0, rot_cos, -rot_sin], [0, rot_sin, rot_cos]], dtype=points.dtype, ) else: raise ValueError("axis should in range") return points @ rot_mat_T def rotation_2d(points, angles): """rotation 2d points based on origin point clockwise when angle positive. Args: points (float array, shape=[N, point_size, 2]): points to be rotated. angles (float array, shape=[N]): rotation angle. Returns: float array: same shape as points """ rot_sin = np.sin(angles) rot_cos = np.cos(angles) rot_mat_T = np.stack([[rot_cos, -rot_sin], [rot_sin, rot_cos]]) return np.einsum("aij,jka->aik", points, rot_mat_T) def rotation_box(box_corners, angle): """rotation 2d points based on origin point clockwise when angle positive. Args: points (float array, shape=[N, point_size, 2]): points to be rotated. angle (float): rotation angle. Returns: float array: same shape as points """ rot_sin = np.sin(angle) rot_cos = np.cos(angle) rot_mat_T = np.array( [[rot_cos, -rot_sin], [rot_sin, rot_cos]], dtype=box_corners.dtype ) return box_corners @ rot_mat_T def center_to_corner_box3d(centers, dims, angles=None, origin=(0.5, 0.5, 0.5), axis=2): """convert kitti locations, dimensions and angles to corners Args: centers (float array, shape=[N, 3]): locations in kitti label file. dims (float array, shape=[N, 3]): dimensions in kitti label file. angles (float array, shape=[N]): rotation_y in kitti label file. origin (list or array or float): origin point relate to smallest point. use [0.5, 1.0, 0.5] in camera and [0.5, 0.5, 0] in lidar. axis (int): rotation axis. 1 for camera and 2 for lidar. Returns: [type]: [description] """ # 'length' in kitti format is in x axis. # yzx(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar) # center in kitti format is [0.5, 1.0, 0.5] in xyz. corners = corners_nd(dims, origin=origin) # corners: [N, 8, 3] if angles is not None: corners = rotation_3d_in_axis(corners, angles, axis=axis) corners += centers.reshape([-1, 1, 3]) return corners def center_to_corner_box2d(centers, dims, angles=None, origin=0.5): """convert kitti locations, dimensions and angles to corners. format: center(xy), dims(xy), angles(clockwise when positive) Args: centers (float array, shape=[N, 2]): locations in kitti label file. dims (float array, shape=[N, 2]): dimensions in kitti label file. angles (float array, shape=[N]): rotation_y in kitti label file. Returns: [type]: [description] """ # 'length' in kitti format is in x axis. # xyz(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar) # center in kitti format is [0.5, 1.0, 0.5] in xyz. corners = corners_nd(dims, origin=origin) # corners: [N, 4, 2] if angles is not None: corners = rotation_2d(corners, angles) corners += centers.reshape([-1, 1, 2]) return corners @numba.jit(nopython=True) def box2d_to_corner_jit(boxes): num_box = boxes.shape[0] corners_norm = np.zeros((4, 2), dtype=boxes.dtype) corners_norm[1, 1] = 1.0 corners_norm[2] = 1.0 corners_norm[3, 0] = 1.0 corners_norm -= np.array([0.5, 0.5], dtype=boxes.dtype) corners = boxes.reshape(num_box, 1, 5)[:, :, 2:4] * corners_norm.reshape(1, 4, 2) rot_mat_T = np.zeros((2, 2), dtype=boxes.dtype) box_corners = np.zeros((num_box, 4, 2), dtype=boxes.dtype) for i in range(num_box): rot_sin = np.sin(boxes[i, -1]) rot_cos = np.cos(boxes[i, -1]) rot_mat_T[0, 0] = rot_cos rot_mat_T[0, 1] = -rot_sin rot_mat_T[1, 0] = rot_sin rot_mat_T[1, 1] = rot_cos box_corners[i] = corners[i] @ rot_mat_T + boxes[i, :2] return box_corners def rbbox3d_to_corners(rbboxes, origin=[0.5, 0.5, 0.5], axis=2): return center_to_corner_box3d( rbboxes[..., :3], rbboxes[..., 3:6], rbboxes[..., 6], origin, axis=axis ) def rbbox3d_to_bev_corners(rbboxes, origin=0.5): return center_to_corner_box2d( rbboxes[..., :2], rbboxes[..., 3:5], rbboxes[..., 6], origin ) def minmax_to_corner_2d(minmax_box): ndim = minmax_box.shape[-1] // 2 center = minmax_box[..., :ndim] dims = minmax_box[..., ndim:] - center return center_to_corner_box2d(center, dims, origin=0.0) def minmax_to_corner_2d_v2(minmax_box): # N, 4 -> N 4 2 return minmax_box[..., [0, 1, 0, 3, 2, 3, 2, 1]].reshape(-1, 4, 2) def minmax_to_corner_3d(minmax_box): ndim = minmax_box.shape[-1] // 2 center = minmax_box[..., :ndim] dims = minmax_box[..., ndim:] - center return center_to_corner_box3d(center, dims, origin=0.0) def minmax_to_center_2d(minmax_box): ndim = minmax_box.shape[-1] // 2 center_min = minmax_box[..., :ndim] dims = minmax_box[..., ndim:] - center_min center = center_min + 0.5 * dims return np.concatenate([center, dims], axis=-1) def center_to_minmax_2d_0_5(centers, dims): return np.concatenate([centers - dims / 2, centers + dims / 2], axis=-1) def center_to_minmax_2d(centers, dims, origin=0.5): if origin == 0.5: return center_to_minmax_2d_0_5(centers, dims) corners = center_to_corner_box2d(centers, dims, origin=origin) return corners[:, [0, 2]].reshape([-1, 4]) def limit_period(val, offset=0.5, period=np.pi): return val - np.floor(val / period + offset) * period def projection_matrix_to_CRT_kitti(proj): # P = C @ [R|T] # C is upper triangular matrix, so we need to inverse CR and use QR # stable for all kitti camera projection matrix CR = proj[0:3, 0:3] CT = proj[0:3, 3] RinvCinv = np.linalg.inv(CR) Rinv, Cinv = np.linalg.qr(RinvCinv) C = np.linalg.inv(Cinv) R = np.linalg.inv(Rinv) T = Cinv @ CT return C, R, T def get_frustum(bbox_image, C, near_clip=0.001, far_clip=100): fku = C[0, 0] fkv = -C[1, 1] u0v0 = C[0:2, 2] z_points = np.array([near_clip] * 4 + [far_clip] * 4, dtype=C.dtype)[:, np.newaxis] b = bbox_image box_corners = np.array( [[b[0], b[1]], [b[0], b[3]], [b[2], b[3]], [b[2], b[1]]], dtype=C.dtype ) near_box_corners = (box_corners - u0v0) / np.array( [fku / near_clip, -fkv / near_clip], dtype=C.dtype ) far_box_corners = (box_corners - u0v0) / np.array( [fku / far_clip, -fkv / far_clip], dtype=C.dtype ) ret_xy = np.concatenate([near_box_corners, far_box_corners], axis=0) # [8, 2] ret_xyz = np.concatenate([ret_xy, z_points], axis=1) return ret_xyz def get_frustum_v2(bboxes, C, near_clip=0.001, far_clip=100): fku = C[0, 0] fkv = -C[1, 1] u0v0 = C[0:2, 2] num_box = bboxes.shape[0] z_points = np.array([near_clip] * 4 + [far_clip] * 4, dtype=C.dtype)[ np.newaxis, :, np.newaxis ] z_points = np.tile(z_points, [num_box, 1, 1]) box_corners = minmax_to_corner_2d_v2(bboxes) near_box_corners = (box_corners - u0v0) / np.array( [fku / near_clip, -fkv / near_clip], dtype=C.dtype ) far_box_corners = (box_corners - u0v0) / np.array( [fku / far_clip, -fkv / far_clip], dtype=C.dtype ) ret_xy = np.concatenate([near_box_corners, far_box_corners], axis=1) # [8, 2] ret_xyz = np.concatenate([ret_xy, z_points], axis=-1) return ret_xyz @numba.njit def _add_rgb_to_points_kernel(points_2d, image, points_rgb): num_points = points_2d.shape[0] image_h, image_w = image.shape[:2] for i in range(num_points): img_pos = np.floor(points_2d[i]).astype(np.int32) if img_pos[0] >= 0 and img_pos[0] < image_w: if img_pos[1] >= 0 and img_pos[1] < image_h: points_rgb[i, :] = image[img_pos[1], img_pos[0], :] # image[img_pos[1], img_pos[0]] = 0 def add_rgb_to_points(points, image, rect, Trv2c, P2, mean_size=[5, 5]): kernel = np.ones(mean_size, np.float32) / np.prod(mean_size) # image = cv2.filter2D(image, -1, kernel) points_cam = lidar_to_camera(points[:, :3], rect, Trv2c) points_2d = project_to_image(points_cam, P2) points_rgb = np.zeros([points_cam.shape[0], 3], dtype=points.dtype) _add_rgb_to_points_kernel(points_2d, image, points_rgb) return points_rgb def project_to_image(points_3d, proj_mat): points_shape = list(points_3d.shape) points_shape[-1] = 1 points_4 = np.concatenate([points_3d, np.ones(points_shape)], axis=-1) point_2d = points_4 @ proj_mat.T point_2d_res = point_2d[..., :2] / point_2d[..., 2:3] return point_2d_res def camera_to_lidar(points, r_rect, velo2cam): points_shape = list(points.shape[0:-1]) if points.shape[-1] == 3: points = np.concatenate([points, np.ones(points_shape + [1])], axis=-1) lidar_points = points @ np.linalg.inv((r_rect @ velo2cam).T) return lidar_points[..., :3] def lidar_to_camera(points, r_rect, velo2cam): points_shape = list(points.shape[:-1]) if points.shape[-1] == 3: points = np.concatenate([points, np.ones(points_shape + [1])], axis=-1) camera_points = points @ (r_rect @ velo2cam).T return camera_points[..., :3] def box_camera_to_lidar(data, r_rect, velo2cam): xyz = data[:, 0:3] l, h, w = data[:, 3:4], data[:, 4:5], data[:, 5:6] r = data[:, 6:7] xyz_lidar = camera_to_lidar(xyz, r_rect, velo2cam) return np.concatenate([xyz_lidar, w, l, h, r], axis=1) def box_lidar_to_camera(data, r_rect, velo2cam): xyz_lidar = data[:, 0:3] w, l, h = data[:, 3:4], data[:, 4:5], data[:, 5:6] r = data[:, 6:7] xyz = lidar_to_camera(xyz_lidar, r_rect, velo2cam) return np.concatenate([xyz, l, h, w, r], axis=1) def remove_outside_points(points, rect, Trv2c, P2, image_shape): # 5x faster than remove_outside_points_v1(2ms vs 10ms) C, R, T = projection_matrix_to_CRT_kitti(P2) image_bbox = [0, 0, image_shape[1], image_shape[0]] frustum = get_frustum(image_bbox, C) frustum -= T frustum = np.linalg.inv(R) @ frustum.T frustum = camera_to_lidar(frustum.T, rect, Trv2c) frustum_surfaces = corner_to_surfaces_3d_jit(frustum[np.newaxis, ...]) indices = points_in_convex_polygon_3d_jit(points[:, :3], frustum_surfaces) points = points[indices.reshape([-1])] return points @numba.jit(nopython=True) def iou_jit(boxes, query_boxes, eps=1.0): """calculate box iou. note that jit version runs 2x faster than cython in my machine! Parameters ---------- boxes: (N, 4) ndarray of float query_boxes: (K, 4) ndarray of float Returns ------- overlaps: (N, K) ndarray of overlap between boxes and query_boxes """ N = boxes.shape[0] K = query_boxes.shape[0] overlaps = np.zeros((N, K), dtype=boxes.dtype) for k in range(K): box_area = (query_boxes[k, 2] - query_boxes[k, 0] + eps) * ( query_boxes[k, 3] - query_boxes[k, 1] + eps ) for n in range(N): iw = ( min(boxes[n, 2], query_boxes[k, 2]) - max(boxes[n, 0], query_boxes[k, 0]) + eps ) if iw > 0: ih = ( min(boxes[n, 3], query_boxes[k, 3]) - max(boxes[n, 1], query_boxes[k, 1]) + eps ) if ih > 0: ua = ( (boxes[n, 2] - boxes[n, 0] + eps) * (boxes[n, 3] - boxes[n, 1] + eps) + box_area - iw * ih ) overlaps[n, k] = iw * ih / ua return overlaps @numba.jit(nopython=True) def iou_3d_jit(boxes, query_boxes, add1=True): """calculate box iou3d, ---------- boxes: (N, 6) ndarray of float query_boxes: (K, 6) ndarray of float Returns ------- overlaps: (N, K) ndarray of overlap between boxes and query_boxes """ N = boxes.shape[0] K = query_boxes.shape[0] overlaps = np.zeros((N, K), dtype=boxes.dtype) if add1: add1 = 1.0 else: add1 = 0.0 for k in range(K): box_area = ( (query_boxes[k, 3] - query_boxes[k, 0] + add1) * (query_boxes[k, 4] - query_boxes[k, 1] + add1) * (query_boxes[k, 5] - query_boxes[k, 2] + add1) ) for n in range(N): iw = ( min(boxes[n, 3], query_boxes[k, 3]) - max(boxes[n, 0], query_boxes[k, 0]) + add1 ) if iw > 0: ih = ( min(boxes[n, 4], query_boxes[k, 4]) - max(boxes[n, 1], query_boxes[k, 1]) + add1 ) if ih > 0: il = ( min(boxes[n, 5], query_boxes[k, 5]) - max(boxes[n, 2], query_boxes[k, 2]) + add1 ) if il > 0: ua = float( (boxes[n, 3] - boxes[n, 0] + add1) * (boxes[n, 4] - boxes[n, 1] + add1) * (boxes[n, 5] - boxes[n, 2] + add1) + box_area - iw * ih * il ) overlaps[n, k] = iw * ih * il / ua return overlaps @numba.jit(nopython=True) def iou_nd_jit(boxes, query_boxes, add1=True): """calculate box iou nd, 2x slower than iou_jit. ---------- boxes: (N, ndim * 2) ndarray of float query_boxes: (K, ndim * 2) ndarray of float Returns ------- overlaps: (N, K) ndarray of overlap between boxes and query_boxes """ N = boxes.shape[0] K = query_boxes.shape[0] ndim = boxes.shape[1] // 2 overlaps = np.zeros((N, K), dtype=boxes.dtype) side_lengths = np.zeros((ndim,), dtype=boxes.dtype) if add1: add1 = 1.0 else: add1 = 0.0 invalid = False for k in range(K): qbox_area = query_boxes[k, ndim] - query_boxes[k, 0] + add1 for i in range(1, ndim): qbox_area *= query_boxes[k, ndim + i] - query_boxes[k, i] + add1 for n in range(N): invalid = False for i in range(ndim): side_length = ( min(boxes[n, i + ndim], query_boxes[k, i + ndim]) - max(boxes[n, i], query_boxes[k, i]) + add1 ) if side_length <= 0: invalid = True break side_lengths[i] = side_length if not invalid: box_area = boxes[n, ndim] - boxes[n, 0] + add1 for i in range(1, ndim): box_area *= boxes[n, ndim + i] - boxes[n, i] + add1 inter = side_lengths[0] for i in range(1, ndim): inter *= side_lengths[i] # inter = np.prod(side_lengths) ua = float(box_area + qbox_area - inter) overlaps[n, k] = inter / ua return overlaps def points_in_rbbox(points, rbbox, z_axis=2, origin=(0.5, 0.5, 0.5)): rbbox_corners = center_to_corner_box3d( rbbox[:, :3], rbbox[:, 3:6], rbbox[:, -1], origin=origin, axis=z_axis ) surfaces = corner_to_surfaces_3d(rbbox_corners) indices = points_in_convex_polygon_3d_jit(points[:, :3], surfaces) return indices def corner_to_surfaces_3d(corners): """convert 3d box corners from corner function above to surfaces that normal vectors all direct to internal. Args: corners (float array, [N, 8, 3]): 3d box corners. Returns: surfaces (float array, [N, 6, 4, 3]): """ # box_corners: [N, 8, 3], must from corner functions in this module surfaces = np.array( [ [corners[:, 0], corners[:, 1], corners[:, 2], corners[:, 3]], [corners[:, 7], corners[:, 6], corners[:, 5], corners[:, 4]], [corners[:, 0], corners[:, 3], corners[:, 7], corners[:, 4]], [corners[:, 1], corners[:, 5], corners[:, 6], corners[:, 2]], [corners[:, 0], corners[:, 4], corners[:, 5], corners[:, 1]], [corners[:, 3], corners[:, 2], corners[:, 6], corners[:, 7]], ] ).transpose([2, 0, 1, 3]) return surfaces @numba.jit(nopython=True) def corner_to_surfaces_3d_jit(corners): """convert 3d box corners from corner function above to surfaces that normal vectors all direct to internal. Args: corners (float array, [N, 8, 3]): 3d box corners. Returns: surfaces (float array, [N, 6, 4, 3]): """ # box_corners: [N, 8, 3], must from corner functions in this module num_boxes = corners.shape[0] surfaces = np.zeros((num_boxes, 6, 4, 3), dtype=corners.dtype) corner_idxes = np.array( [0, 1, 2, 3, 7, 6, 5, 4, 0, 3, 7, 4, 1, 5, 6, 2, 0, 4, 5, 1, 3, 2, 6, 7] ).reshape(6, 4) for i in range(num_boxes): for j in range(6): for k in range(4): surfaces[i, j, k] = corners[i, corner_idxes[j, k]] return surfaces def assign_label_to_voxel(gt_boxes, coors, voxel_size, coors_range): """assign a 0/1 label to each voxel based on whether the center of voxel is in gt_box. LIDAR. """ voxel_size = np.array(voxel_size, dtype=gt_boxes.dtype) coors_range = np.array(coors_range, dtype=gt_boxes.dtype) shift = coors_range[:3] voxel_origins = coors[:, ::-1] * voxel_size + shift voxel_centers = voxel_origins + voxel_size * 0.5 gt_box_corners = center_to_corner_box3d( gt_boxes[:, :3] - voxel_size * 0.5, gt_boxes[:, 3:6] + voxel_size, gt_boxes[:, 6], origin=[0.5, 0.5, 0.5], axis=2, ) gt_surfaces = corner_to_surfaces_3d(gt_box_corners) ret = points_in_convex_polygon_3d_jit(voxel_centers, gt_surfaces) return np.any(ret, axis=1).astype(np.int64) def assign_label_to_voxel_v3(gt_boxes, coors, voxel_size, coors_range): """assign a 0/1 label to each voxel based on whether the center of voxel is in gt_box. LIDAR. """ voxel_size = np.array(voxel_size, dtype=gt_boxes.dtype) coors_range = np.array(coors_range, dtype=gt_boxes.dtype) shift = coors_range[:3] voxel_origins = coors[:, ::-1] * voxel_size + shift voxel_maxes = voxel_origins + voxel_size voxel_minmax = np.concatenate([voxel_origins, voxel_maxes], axis=-1) voxel_corners = minmax_to_corner_3d(voxel_minmax) gt_box_corners = center_to_corner_box3d( gt_boxes[:, :3], gt_boxes[:, 3:6], gt_boxes[:, 6], origin=[0.5, 0.5, 0.5], axis=2, ) gt_surfaces = corner_to_surfaces_3d(gt_box_corners) voxel_corners_flat = voxel_corners.reshape([-1, 3]) ret = points_in_convex_polygon_3d_jit(voxel_corners_flat, gt_surfaces) ret = ret.reshape([-1, 8, ret.shape[-1]]) return ret.any(-1).any(-1).astype(np.int64) def image_box_region_area(img_cumsum, bbox): """check a 2d voxel is contained by a box. used to filter empty anchors. Summed-area table algorithm: ==> W ------------------ | | | |------A---------B | | | | | | |----- C---------D Iabcd = ID-IB-IC+IA Args: img_cumsum: [M, H, W](yx) cumsumed image. bbox: [N, 4](xyxy) bounding box, """ N = bbox.shape[0] M = img_cumsum.shape[0] ret = np.zeros([N, M], dtype=img_cumsum.dtype) ID = img_cumsum[:, bbox[:, 3], bbox[:, 2]] IA = img_cumsum[:, bbox[:, 1], bbox[:, 0]] IB = img_cumsum[:, bbox[:, 3], bbox[:, 0]] IC = img_cumsum[:, bbox[:, 1], bbox[:, 2]] ret = ID - IB - IC + IA return ret def get_minimum_bounding_box_bv(points, voxel_size, bound, downsample=8, margin=1.6): x_vsize = voxel_size[0] y_vsize = voxel_size[1] max_x = points[:, 0].max() max_y = points[:, 1].max() min_x = points[:, 0].min() min_y = points[:, 1].min() max_x = np.floor(max_x / (x_vsize * downsample) + 1) * (x_vsize * downsample) max_y = np.floor(max_y / (y_vsize * downsample) + 1) * (y_vsize * downsample) min_x = np.floor(min_x / (x_vsize * downsample)) * (x_vsize * downsample) min_y = np.floor(min_y / (y_vsize * downsample)) * (y_vsize * downsample) max_x = np.minimum(max_x + margin, bound[2]) max_y = np.minimum(max_y + margin, bound[3]) min_x = np.maximum(min_x - margin, bound[0]) min_y = np.maximum(min_y - margin, bound[1]) return np.array([min_x, min_y, max_x, max_y]) def box3d_to_bbox(box3d, rect, Trv2c, P2): box3d_to_cam = box_lidar_to_camera(box3d, rect, Trv2c) box_corners = center_to_corner_box3d( box3d[:, :3], box3d[:, 3:6], box3d[:, 6], [0.5, 1.0, 0.5], axis=1 ) box_corners_in_image = project_to_image(box_corners, P2) # box_corners_in_image: [N, 8, 2] minxy = np.min(box_corners_in_image, axis=1) maxxy = np.max(box_corners_in_image, axis=1) bbox = np.concatenate([minxy, maxxy], axis=1) return bbox def change_box3d_center_(box3d, src, dst): dst = np.array(dst, dtype=box3d.dtype) src = np.array(src, dtype=box3d.dtype) box3d[..., :3] += box3d[..., 3:6] * (dst - src) def encode_parts(relative_shifts): parts = np.zeros((len(relative_shifts),), dtype=np.int32) mask = (relative_shifts[:, 0] >= 0) & (relative_shifts[:, 1] >= 0) parts[mask] = 0 mask = (relative_shifts[:, 0] < 0) & (relative_shifts[:, 1] >= 0) parts[mask] = 1 mask = (relative_shifts[:, 0] < 0) & (relative_shifts[:, 1] < 0) parts[mask] = 2 mask = (relative_shifts[:, 0] >= 0) & (relative_shifts[:, 1] < 0) parts[mask] = 3 return parts
rotation_2d
rotation 2d points based on origin point clockwise when angle positive. Args: points (float array, shape=[N, point_size, 2]): points to be rotated. angles (float array, shape=[N]): rotation angle. Returns: float array: same shape as points
from pathlib import Path import numba import numpy as np from det3d.core.bbox.geometry import ( points_count_convex_polygon_3d_jit, points_in_convex_polygon_3d_jit, ) try: from spconv.utils import rbbox_intersection, rbbox_iou except: print("Import spconv fail, no support for sparse convolution!") def points_count_rbbox(points, rbbox, z_axis=2, origin=(0.5, 0.5, 0.5)): rbbox_corners = center_to_corner_box3d( rbbox[:, :3], rbbox[:, 3:6], rbbox[:, -1], origin=origin, axis=z_axis ) surfaces = corner_to_surfaces_3d(rbbox_corners) return points_count_convex_polygon_3d_jit(points[:, :3], surfaces) def riou_cc(rbboxes, qrbboxes, standup_thresh=0.0): # less than 50ms when used in second one thread. 10x slower than gpu boxes_corners = center_to_corner_box2d( rbboxes[:, :2], rbboxes[:, 2:4], rbboxes[:, 4] ) boxes_standup = corner_to_standup_nd(boxes_corners) qboxes_corners = center_to_corner_box2d( qrbboxes[:, :2], qrbboxes[:, 2:4], qrbboxes[:, 4] ) qboxes_standup = corner_to_standup_nd(qboxes_corners) # if standup box not overlapped, rbbox not overlapped too. standup_iou = iou_jit(boxes_standup, qboxes_standup, eps=0.0) return rbbox_iou(boxes_corners, qboxes_corners, standup_iou, standup_thresh) def rinter_cc(rbboxes, qrbboxes, standup_thresh=0.0): # less than 50ms when used in second one thread. 10x slower than gpu boxes_corners = center_to_corner_box2d( rbboxes[:, :2], rbboxes[:, 2:4], rbboxes[:, 4] ) boxes_standup = corner_to_standup_nd(boxes_corners) qboxes_corners = center_to_corner_box2d( qrbboxes[:, :2], qrbboxes[:, 2:4], qrbboxes[:, 4] ) qboxes_standup = corner_to_standup_nd(qboxes_corners) # if standup box not overlapped, rbbox not overlapped too. standup_iou = iou_jit(boxes_standup, qboxes_standup, eps=0.0) return rbbox_intersection( boxes_corners, qboxes_corners, standup_iou, standup_thresh ) def corners_nd(dims, origin=0.5): """generate relative box corners based on length per dim and origin point. Args: dims (float array, shape=[N, ndim]): array of length per dim origin (list or array or float): origin point relate to smallest point. Returns: float array, shape=[N, 2 ** ndim, ndim]: returned corners. point layout example: (2d) x0y0, x0y1, x1y0, x1y1; (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1 where x0 < x1, y0 < y1, z0 < z1 """ ndim = int(dims.shape[1]) corners_norm = np.stack( np.unravel_index(np.arange(2 ** ndim), [2] * ndim), axis=1 ).astype(dims.dtype) # now corners_norm has format: (2d) x0y0, x0y1, x1y0, x1y1 # (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1 # so need to convert to a format which is convenient to do other computing. # for 2d boxes, format is clockwise start with minimum point # for 3d boxes, please draw lines by your hand. if ndim == 2: # generate clockwise box corners corners_norm = corners_norm[[0, 1, 3, 2]] elif ndim == 3: corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]] corners_norm = corners_norm - np.array(origin, dtype=dims.dtype) corners = dims.reshape([-1, 1, ndim]) * corners_norm.reshape([1, 2 ** ndim, ndim]) return corners @numba.njit def corners_2d_jit(dims, origin=0.5): ndim = 2 corners_norm = np.array([[0, 0], [0, 1], [1, 1], [1, 0]], dtype=dims.dtype) corners_norm = corners_norm - np.array(origin, dtype=dims.dtype) corners = dims.reshape((-1, 1, ndim)) * corners_norm.reshape((1, 2 ** ndim, ndim)) return corners @numba.njit def corners_3d_jit(dims, origin=0.5): ndim = 3 corners_norm = np.array( [0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1], dtype=dims.dtype, ).reshape((8, 3)) corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]] corners_norm = corners_norm - np.array(origin, dtype=dims.dtype) corners = dims.reshape((-1, 1, ndim)) * corners_norm.reshape((1, 2 ** ndim, ndim)) return corners @numba.njit def corner_to_standup_nd_jit(boxes_corner): num_boxes = boxes_corner.shape[0] ndim = boxes_corner.shape[-1] result = np.zeros((num_boxes, ndim * 2), dtype=boxes_corner.dtype) for i in range(num_boxes): for j in range(ndim): result[i, j] = np.min(boxes_corner[i, :, j]) for j in range(ndim): result[i, j + ndim] = np.max(boxes_corner[i, :, j]) return result def corner_to_standup_nd(boxes_corner): assert len(boxes_corner.shape) == 3 standup_boxes = [] standup_boxes.append(np.min(boxes_corner, axis=1)) standup_boxes.append(np.max(boxes_corner, axis=1)) return np.concatenate(standup_boxes, -1) def rbbox2d_to_near_bbox(rbboxes): """convert rotated bbox to nearest 'standing' or 'lying' bbox. Args: rbboxes: [N, 5(x, y, xdim, ydim, rad)] rotated bboxes Returns: bboxes: [N, 4(xmin, ymin, xmax, ymax)] bboxes """ rots = rbboxes[..., -1] rots_0_pi_div_2 = np.abs(limit_period(rots, 0.5, np.pi)) cond = (rots_0_pi_div_2 > np.pi / 4)[..., np.newaxis] bboxes_center = np.where(cond, rbboxes[:, [0, 1, 3, 2]], rbboxes[:, :4]) bboxes = center_to_minmax_2d(bboxes_center[:, :2], bboxes_center[:, 2:]) return bboxes def rotation_3d_in_axis(points, angles, axis=0): # points: [N, point_size, 3] rot_sin = np.sin(angles) rot_cos = np.cos(angles) ones = np.ones_like(rot_cos) zeros = np.zeros_like(rot_cos) if axis == 1: rot_mat_T = np.stack( [ [rot_cos, zeros, -rot_sin], [zeros, ones, zeros], [rot_sin, zeros, rot_cos], ] ) elif axis == 2 or axis == -1: rot_mat_T = np.stack( [ [rot_cos, -rot_sin, zeros], [rot_sin, rot_cos, zeros], [zeros, zeros, ones], ] ) elif axis == 0: rot_mat_T = np.stack( [ [zeros, rot_cos, -rot_sin], [zeros, rot_sin, rot_cos], [ones, zeros, zeros], ] ) else: raise ValueError("axis should in range") return np.einsum("aij,jka->aik", points, rot_mat_T) def rotation_points_single_angle(points, angle, axis=0): # points: [N, 3] rot_sin = np.sin(angle) rot_cos = np.cos(angle) if axis == 1: rot_mat_T = np.array( [[rot_cos, 0, -rot_sin], [0, 1, 0], [rot_sin, 0, rot_cos]], dtype=points.dtype, ) elif axis == 2 or axis == -1: rot_mat_T = np.array( [[rot_cos, -rot_sin, 0], [rot_sin, rot_cos, 0], [0, 0, 1]], dtype=points.dtype, ) elif axis == 0: rot_mat_T = np.array( [[1, 0, 0], [0, rot_cos, -rot_sin], [0, rot_sin, rot_cos]], dtype=points.dtype, ) else: raise ValueError("axis should in range") return points @ rot_mat_T # MASKED: rotation_2d function (lines 207-220) def rotation_box(box_corners, angle): """rotation 2d points based on origin point clockwise when angle positive. Args: points (float array, shape=[N, point_size, 2]): points to be rotated. angle (float): rotation angle. Returns: float array: same shape as points """ rot_sin = np.sin(angle) rot_cos = np.cos(angle) rot_mat_T = np.array( [[rot_cos, -rot_sin], [rot_sin, rot_cos]], dtype=box_corners.dtype ) return box_corners @ rot_mat_T def center_to_corner_box3d(centers, dims, angles=None, origin=(0.5, 0.5, 0.5), axis=2): """convert kitti locations, dimensions and angles to corners Args: centers (float array, shape=[N, 3]): locations in kitti label file. dims (float array, shape=[N, 3]): dimensions in kitti label file. angles (float array, shape=[N]): rotation_y in kitti label file. origin (list or array or float): origin point relate to smallest point. use [0.5, 1.0, 0.5] in camera and [0.5, 0.5, 0] in lidar. axis (int): rotation axis. 1 for camera and 2 for lidar. Returns: [type]: [description] """ # 'length' in kitti format is in x axis. # yzx(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar) # center in kitti format is [0.5, 1.0, 0.5] in xyz. corners = corners_nd(dims, origin=origin) # corners: [N, 8, 3] if angles is not None: corners = rotation_3d_in_axis(corners, angles, axis=axis) corners += centers.reshape([-1, 1, 3]) return corners def center_to_corner_box2d(centers, dims, angles=None, origin=0.5): """convert kitti locations, dimensions and angles to corners. format: center(xy), dims(xy), angles(clockwise when positive) Args: centers (float array, shape=[N, 2]): locations in kitti label file. dims (float array, shape=[N, 2]): dimensions in kitti label file. angles (float array, shape=[N]): rotation_y in kitti label file. Returns: [type]: [description] """ # 'length' in kitti format is in x axis. # xyz(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar) # center in kitti format is [0.5, 1.0, 0.5] in xyz. corners = corners_nd(dims, origin=origin) # corners: [N, 4, 2] if angles is not None: corners = rotation_2d(corners, angles) corners += centers.reshape([-1, 1, 2]) return corners @numba.jit(nopython=True) def box2d_to_corner_jit(boxes): num_box = boxes.shape[0] corners_norm = np.zeros((4, 2), dtype=boxes.dtype) corners_norm[1, 1] = 1.0 corners_norm[2] = 1.0 corners_norm[3, 0] = 1.0 corners_norm -= np.array([0.5, 0.5], dtype=boxes.dtype) corners = boxes.reshape(num_box, 1, 5)[:, :, 2:4] * corners_norm.reshape(1, 4, 2) rot_mat_T = np.zeros((2, 2), dtype=boxes.dtype) box_corners = np.zeros((num_box, 4, 2), dtype=boxes.dtype) for i in range(num_box): rot_sin = np.sin(boxes[i, -1]) rot_cos = np.cos(boxes[i, -1]) rot_mat_T[0, 0] = rot_cos rot_mat_T[0, 1] = -rot_sin rot_mat_T[1, 0] = rot_sin rot_mat_T[1, 1] = rot_cos box_corners[i] = corners[i] @ rot_mat_T + boxes[i, :2] return box_corners def rbbox3d_to_corners(rbboxes, origin=[0.5, 0.5, 0.5], axis=2): return center_to_corner_box3d( rbboxes[..., :3], rbboxes[..., 3:6], rbboxes[..., 6], origin, axis=axis ) def rbbox3d_to_bev_corners(rbboxes, origin=0.5): return center_to_corner_box2d( rbboxes[..., :2], rbboxes[..., 3:5], rbboxes[..., 6], origin ) def minmax_to_corner_2d(minmax_box): ndim = minmax_box.shape[-1] // 2 center = minmax_box[..., :ndim] dims = minmax_box[..., ndim:] - center return center_to_corner_box2d(center, dims, origin=0.0) def minmax_to_corner_2d_v2(minmax_box): # N, 4 -> N 4 2 return minmax_box[..., [0, 1, 0, 3, 2, 3, 2, 1]].reshape(-1, 4, 2) def minmax_to_corner_3d(minmax_box): ndim = minmax_box.shape[-1] // 2 center = minmax_box[..., :ndim] dims = minmax_box[..., ndim:] - center return center_to_corner_box3d(center, dims, origin=0.0) def minmax_to_center_2d(minmax_box): ndim = minmax_box.shape[-1] // 2 center_min = minmax_box[..., :ndim] dims = minmax_box[..., ndim:] - center_min center = center_min + 0.5 * dims return np.concatenate([center, dims], axis=-1) def center_to_minmax_2d_0_5(centers, dims): return np.concatenate([centers - dims / 2, centers + dims / 2], axis=-1) def center_to_minmax_2d(centers, dims, origin=0.5): if origin == 0.5: return center_to_minmax_2d_0_5(centers, dims) corners = center_to_corner_box2d(centers, dims, origin=origin) return corners[:, [0, 2]].reshape([-1, 4]) def limit_period(val, offset=0.5, period=np.pi): return val - np.floor(val / period + offset) * period def projection_matrix_to_CRT_kitti(proj): # P = C @ [R|T] # C is upper triangular matrix, so we need to inverse CR and use QR # stable for all kitti camera projection matrix CR = proj[0:3, 0:3] CT = proj[0:3, 3] RinvCinv = np.linalg.inv(CR) Rinv, Cinv = np.linalg.qr(RinvCinv) C = np.linalg.inv(Cinv) R = np.linalg.inv(Rinv) T = Cinv @ CT return C, R, T def get_frustum(bbox_image, C, near_clip=0.001, far_clip=100): fku = C[0, 0] fkv = -C[1, 1] u0v0 = C[0:2, 2] z_points = np.array([near_clip] * 4 + [far_clip] * 4, dtype=C.dtype)[:, np.newaxis] b = bbox_image box_corners = np.array( [[b[0], b[1]], [b[0], b[3]], [b[2], b[3]], [b[2], b[1]]], dtype=C.dtype ) near_box_corners = (box_corners - u0v0) / np.array( [fku / near_clip, -fkv / near_clip], dtype=C.dtype ) far_box_corners = (box_corners - u0v0) / np.array( [fku / far_clip, -fkv / far_clip], dtype=C.dtype ) ret_xy = np.concatenate([near_box_corners, far_box_corners], axis=0) # [8, 2] ret_xyz = np.concatenate([ret_xy, z_points], axis=1) return ret_xyz def get_frustum_v2(bboxes, C, near_clip=0.001, far_clip=100): fku = C[0, 0] fkv = -C[1, 1] u0v0 = C[0:2, 2] num_box = bboxes.shape[0] z_points = np.array([near_clip] * 4 + [far_clip] * 4, dtype=C.dtype)[ np.newaxis, :, np.newaxis ] z_points = np.tile(z_points, [num_box, 1, 1]) box_corners = minmax_to_corner_2d_v2(bboxes) near_box_corners = (box_corners - u0v0) / np.array( [fku / near_clip, -fkv / near_clip], dtype=C.dtype ) far_box_corners = (box_corners - u0v0) / np.array( [fku / far_clip, -fkv / far_clip], dtype=C.dtype ) ret_xy = np.concatenate([near_box_corners, far_box_corners], axis=1) # [8, 2] ret_xyz = np.concatenate([ret_xy, z_points], axis=-1) return ret_xyz @numba.njit def _add_rgb_to_points_kernel(points_2d, image, points_rgb): num_points = points_2d.shape[0] image_h, image_w = image.shape[:2] for i in range(num_points): img_pos = np.floor(points_2d[i]).astype(np.int32) if img_pos[0] >= 0 and img_pos[0] < image_w: if img_pos[1] >= 0 and img_pos[1] < image_h: points_rgb[i, :] = image[img_pos[1], img_pos[0], :] # image[img_pos[1], img_pos[0]] = 0 def add_rgb_to_points(points, image, rect, Trv2c, P2, mean_size=[5, 5]): kernel = np.ones(mean_size, np.float32) / np.prod(mean_size) # image = cv2.filter2D(image, -1, kernel) points_cam = lidar_to_camera(points[:, :3], rect, Trv2c) points_2d = project_to_image(points_cam, P2) points_rgb = np.zeros([points_cam.shape[0], 3], dtype=points.dtype) _add_rgb_to_points_kernel(points_2d, image, points_rgb) return points_rgb def project_to_image(points_3d, proj_mat): points_shape = list(points_3d.shape) points_shape[-1] = 1 points_4 = np.concatenate([points_3d, np.ones(points_shape)], axis=-1) point_2d = points_4 @ proj_mat.T point_2d_res = point_2d[..., :2] / point_2d[..., 2:3] return point_2d_res def camera_to_lidar(points, r_rect, velo2cam): points_shape = list(points.shape[0:-1]) if points.shape[-1] == 3: points = np.concatenate([points, np.ones(points_shape + [1])], axis=-1) lidar_points = points @ np.linalg.inv((r_rect @ velo2cam).T) return lidar_points[..., :3] def lidar_to_camera(points, r_rect, velo2cam): points_shape = list(points.shape[:-1]) if points.shape[-1] == 3: points = np.concatenate([points, np.ones(points_shape + [1])], axis=-1) camera_points = points @ (r_rect @ velo2cam).T return camera_points[..., :3] def box_camera_to_lidar(data, r_rect, velo2cam): xyz = data[:, 0:3] l, h, w = data[:, 3:4], data[:, 4:5], data[:, 5:6] r = data[:, 6:7] xyz_lidar = camera_to_lidar(xyz, r_rect, velo2cam) return np.concatenate([xyz_lidar, w, l, h, r], axis=1) def box_lidar_to_camera(data, r_rect, velo2cam): xyz_lidar = data[:, 0:3] w, l, h = data[:, 3:4], data[:, 4:5], data[:, 5:6] r = data[:, 6:7] xyz = lidar_to_camera(xyz_lidar, r_rect, velo2cam) return np.concatenate([xyz, l, h, w, r], axis=1) def remove_outside_points(points, rect, Trv2c, P2, image_shape): # 5x faster than remove_outside_points_v1(2ms vs 10ms) C, R, T = projection_matrix_to_CRT_kitti(P2) image_bbox = [0, 0, image_shape[1], image_shape[0]] frustum = get_frustum(image_bbox, C) frustum -= T frustum = np.linalg.inv(R) @ frustum.T frustum = camera_to_lidar(frustum.T, rect, Trv2c) frustum_surfaces = corner_to_surfaces_3d_jit(frustum[np.newaxis, ...]) indices = points_in_convex_polygon_3d_jit(points[:, :3], frustum_surfaces) points = points[indices.reshape([-1])] return points @numba.jit(nopython=True) def iou_jit(boxes, query_boxes, eps=1.0): """calculate box iou. note that jit version runs 2x faster than cython in my machine! Parameters ---------- boxes: (N, 4) ndarray of float query_boxes: (K, 4) ndarray of float Returns ------- overlaps: (N, K) ndarray of overlap between boxes and query_boxes """ N = boxes.shape[0] K = query_boxes.shape[0] overlaps = np.zeros((N, K), dtype=boxes.dtype) for k in range(K): box_area = (query_boxes[k, 2] - query_boxes[k, 0] + eps) * ( query_boxes[k, 3] - query_boxes[k, 1] + eps ) for n in range(N): iw = ( min(boxes[n, 2], query_boxes[k, 2]) - max(boxes[n, 0], query_boxes[k, 0]) + eps ) if iw > 0: ih = ( min(boxes[n, 3], query_boxes[k, 3]) - max(boxes[n, 1], query_boxes[k, 1]) + eps ) if ih > 0: ua = ( (boxes[n, 2] - boxes[n, 0] + eps) * (boxes[n, 3] - boxes[n, 1] + eps) + box_area - iw * ih ) overlaps[n, k] = iw * ih / ua return overlaps @numba.jit(nopython=True) def iou_3d_jit(boxes, query_boxes, add1=True): """calculate box iou3d, ---------- boxes: (N, 6) ndarray of float query_boxes: (K, 6) ndarray of float Returns ------- overlaps: (N, K) ndarray of overlap between boxes and query_boxes """ N = boxes.shape[0] K = query_boxes.shape[0] overlaps = np.zeros((N, K), dtype=boxes.dtype) if add1: add1 = 1.0 else: add1 = 0.0 for k in range(K): box_area = ( (query_boxes[k, 3] - query_boxes[k, 0] + add1) * (query_boxes[k, 4] - query_boxes[k, 1] + add1) * (query_boxes[k, 5] - query_boxes[k, 2] + add1) ) for n in range(N): iw = ( min(boxes[n, 3], query_boxes[k, 3]) - max(boxes[n, 0], query_boxes[k, 0]) + add1 ) if iw > 0: ih = ( min(boxes[n, 4], query_boxes[k, 4]) - max(boxes[n, 1], query_boxes[k, 1]) + add1 ) if ih > 0: il = ( min(boxes[n, 5], query_boxes[k, 5]) - max(boxes[n, 2], query_boxes[k, 2]) + add1 ) if il > 0: ua = float( (boxes[n, 3] - boxes[n, 0] + add1) * (boxes[n, 4] - boxes[n, 1] + add1) * (boxes[n, 5] - boxes[n, 2] + add1) + box_area - iw * ih * il ) overlaps[n, k] = iw * ih * il / ua return overlaps @numba.jit(nopython=True) def iou_nd_jit(boxes, query_boxes, add1=True): """calculate box iou nd, 2x slower than iou_jit. ---------- boxes: (N, ndim * 2) ndarray of float query_boxes: (K, ndim * 2) ndarray of float Returns ------- overlaps: (N, K) ndarray of overlap between boxes and query_boxes """ N = boxes.shape[0] K = query_boxes.shape[0] ndim = boxes.shape[1] // 2 overlaps = np.zeros((N, K), dtype=boxes.dtype) side_lengths = np.zeros((ndim,), dtype=boxes.dtype) if add1: add1 = 1.0 else: add1 = 0.0 invalid = False for k in range(K): qbox_area = query_boxes[k, ndim] - query_boxes[k, 0] + add1 for i in range(1, ndim): qbox_area *= query_boxes[k, ndim + i] - query_boxes[k, i] + add1 for n in range(N): invalid = False for i in range(ndim): side_length = ( min(boxes[n, i + ndim], query_boxes[k, i + ndim]) - max(boxes[n, i], query_boxes[k, i]) + add1 ) if side_length <= 0: invalid = True break side_lengths[i] = side_length if not invalid: box_area = boxes[n, ndim] - boxes[n, 0] + add1 for i in range(1, ndim): box_area *= boxes[n, ndim + i] - boxes[n, i] + add1 inter = side_lengths[0] for i in range(1, ndim): inter *= side_lengths[i] # inter = np.prod(side_lengths) ua = float(box_area + qbox_area - inter) overlaps[n, k] = inter / ua return overlaps def points_in_rbbox(points, rbbox, z_axis=2, origin=(0.5, 0.5, 0.5)): rbbox_corners = center_to_corner_box3d( rbbox[:, :3], rbbox[:, 3:6], rbbox[:, -1], origin=origin, axis=z_axis ) surfaces = corner_to_surfaces_3d(rbbox_corners) indices = points_in_convex_polygon_3d_jit(points[:, :3], surfaces) return indices def corner_to_surfaces_3d(corners): """convert 3d box corners from corner function above to surfaces that normal vectors all direct to internal. Args: corners (float array, [N, 8, 3]): 3d box corners. Returns: surfaces (float array, [N, 6, 4, 3]): """ # box_corners: [N, 8, 3], must from corner functions in this module surfaces = np.array( [ [corners[:, 0], corners[:, 1], corners[:, 2], corners[:, 3]], [corners[:, 7], corners[:, 6], corners[:, 5], corners[:, 4]], [corners[:, 0], corners[:, 3], corners[:, 7], corners[:, 4]], [corners[:, 1], corners[:, 5], corners[:, 6], corners[:, 2]], [corners[:, 0], corners[:, 4], corners[:, 5], corners[:, 1]], [corners[:, 3], corners[:, 2], corners[:, 6], corners[:, 7]], ] ).transpose([2, 0, 1, 3]) return surfaces @numba.jit(nopython=True) def corner_to_surfaces_3d_jit(corners): """convert 3d box corners from corner function above to surfaces that normal vectors all direct to internal. Args: corners (float array, [N, 8, 3]): 3d box corners. Returns: surfaces (float array, [N, 6, 4, 3]): """ # box_corners: [N, 8, 3], must from corner functions in this module num_boxes = corners.shape[0] surfaces = np.zeros((num_boxes, 6, 4, 3), dtype=corners.dtype) corner_idxes = np.array( [0, 1, 2, 3, 7, 6, 5, 4, 0, 3, 7, 4, 1, 5, 6, 2, 0, 4, 5, 1, 3, 2, 6, 7] ).reshape(6, 4) for i in range(num_boxes): for j in range(6): for k in range(4): surfaces[i, j, k] = corners[i, corner_idxes[j, k]] return surfaces def assign_label_to_voxel(gt_boxes, coors, voxel_size, coors_range): """assign a 0/1 label to each voxel based on whether the center of voxel is in gt_box. LIDAR. """ voxel_size = np.array(voxel_size, dtype=gt_boxes.dtype) coors_range = np.array(coors_range, dtype=gt_boxes.dtype) shift = coors_range[:3] voxel_origins = coors[:, ::-1] * voxel_size + shift voxel_centers = voxel_origins + voxel_size * 0.5 gt_box_corners = center_to_corner_box3d( gt_boxes[:, :3] - voxel_size * 0.5, gt_boxes[:, 3:6] + voxel_size, gt_boxes[:, 6], origin=[0.5, 0.5, 0.5], axis=2, ) gt_surfaces = corner_to_surfaces_3d(gt_box_corners) ret = points_in_convex_polygon_3d_jit(voxel_centers, gt_surfaces) return np.any(ret, axis=1).astype(np.int64) def assign_label_to_voxel_v3(gt_boxes, coors, voxel_size, coors_range): """assign a 0/1 label to each voxel based on whether the center of voxel is in gt_box. LIDAR. """ voxel_size = np.array(voxel_size, dtype=gt_boxes.dtype) coors_range = np.array(coors_range, dtype=gt_boxes.dtype) shift = coors_range[:3] voxel_origins = coors[:, ::-1] * voxel_size + shift voxel_maxes = voxel_origins + voxel_size voxel_minmax = np.concatenate([voxel_origins, voxel_maxes], axis=-1) voxel_corners = minmax_to_corner_3d(voxel_minmax) gt_box_corners = center_to_corner_box3d( gt_boxes[:, :3], gt_boxes[:, 3:6], gt_boxes[:, 6], origin=[0.5, 0.5, 0.5], axis=2, ) gt_surfaces = corner_to_surfaces_3d(gt_box_corners) voxel_corners_flat = voxel_corners.reshape([-1, 3]) ret = points_in_convex_polygon_3d_jit(voxel_corners_flat, gt_surfaces) ret = ret.reshape([-1, 8, ret.shape[-1]]) return ret.any(-1).any(-1).astype(np.int64) def image_box_region_area(img_cumsum, bbox): """check a 2d voxel is contained by a box. used to filter empty anchors. Summed-area table algorithm: ==> W ------------------ | | | |------A---------B | | | | | | |----- C---------D Iabcd = ID-IB-IC+IA Args: img_cumsum: [M, H, W](yx) cumsumed image. bbox: [N, 4](xyxy) bounding box, """ N = bbox.shape[0] M = img_cumsum.shape[0] ret = np.zeros([N, M], dtype=img_cumsum.dtype) ID = img_cumsum[:, bbox[:, 3], bbox[:, 2]] IA = img_cumsum[:, bbox[:, 1], bbox[:, 0]] IB = img_cumsum[:, bbox[:, 3], bbox[:, 0]] IC = img_cumsum[:, bbox[:, 1], bbox[:, 2]] ret = ID - IB - IC + IA return ret def get_minimum_bounding_box_bv(points, voxel_size, bound, downsample=8, margin=1.6): x_vsize = voxel_size[0] y_vsize = voxel_size[1] max_x = points[:, 0].max() max_y = points[:, 1].max() min_x = points[:, 0].min() min_y = points[:, 1].min() max_x = np.floor(max_x / (x_vsize * downsample) + 1) * (x_vsize * downsample) max_y = np.floor(max_y / (y_vsize * downsample) + 1) * (y_vsize * downsample) min_x = np.floor(min_x / (x_vsize * downsample)) * (x_vsize * downsample) min_y = np.floor(min_y / (y_vsize * downsample)) * (y_vsize * downsample) max_x = np.minimum(max_x + margin, bound[2]) max_y = np.minimum(max_y + margin, bound[3]) min_x = np.maximum(min_x - margin, bound[0]) min_y = np.maximum(min_y - margin, bound[1]) return np.array([min_x, min_y, max_x, max_y]) def box3d_to_bbox(box3d, rect, Trv2c, P2): box3d_to_cam = box_lidar_to_camera(box3d, rect, Trv2c) box_corners = center_to_corner_box3d( box3d[:, :3], box3d[:, 3:6], box3d[:, 6], [0.5, 1.0, 0.5], axis=1 ) box_corners_in_image = project_to_image(box_corners, P2) # box_corners_in_image: [N, 8, 2] minxy = np.min(box_corners_in_image, axis=1) maxxy = np.max(box_corners_in_image, axis=1) bbox = np.concatenate([minxy, maxxy], axis=1) return bbox def change_box3d_center_(box3d, src, dst): dst = np.array(dst, dtype=box3d.dtype) src = np.array(src, dtype=box3d.dtype) box3d[..., :3] += box3d[..., 3:6] * (dst - src) def encode_parts(relative_shifts): parts = np.zeros((len(relative_shifts),), dtype=np.int32) mask = (relative_shifts[:, 0] >= 0) & (relative_shifts[:, 1] >= 0) parts[mask] = 0 mask = (relative_shifts[:, 0] < 0) & (relative_shifts[:, 1] >= 0) parts[mask] = 1 mask = (relative_shifts[:, 0] < 0) & (relative_shifts[:, 1] < 0) parts[mask] = 2 mask = (relative_shifts[:, 0] >= 0) & (relative_shifts[:, 1] < 0) parts[mask] = 3 return parts
def rotation_2d(points, angles): """rotation 2d points based on origin point clockwise when angle positive. Args: points (float array, shape=[N, point_size, 2]): points to be rotated. angles (float array, shape=[N]): rotation angle. Returns: float array: same shape as points """ rot_sin = np.sin(angles) rot_cos = np.cos(angles) rot_mat_T = np.stack([[rot_cos, -rot_sin], [rot_sin, rot_cos]]) return np.einsum("aij,jka->aik", points, rot_mat_T)
207
220
from pathlib import Path import numba import numpy as np from det3d.core.bbox.geometry import ( points_count_convex_polygon_3d_jit, points_in_convex_polygon_3d_jit, ) try: from spconv.utils import rbbox_intersection, rbbox_iou except: print("Import spconv fail, no support for sparse convolution!") def points_count_rbbox(points, rbbox, z_axis=2, origin=(0.5, 0.5, 0.5)): rbbox_corners = center_to_corner_box3d( rbbox[:, :3], rbbox[:, 3:6], rbbox[:, -1], origin=origin, axis=z_axis ) surfaces = corner_to_surfaces_3d(rbbox_corners) return points_count_convex_polygon_3d_jit(points[:, :3], surfaces) def riou_cc(rbboxes, qrbboxes, standup_thresh=0.0): # less than 50ms when used in second one thread. 10x slower than gpu boxes_corners = center_to_corner_box2d( rbboxes[:, :2], rbboxes[:, 2:4], rbboxes[:, 4] ) boxes_standup = corner_to_standup_nd(boxes_corners) qboxes_corners = center_to_corner_box2d( qrbboxes[:, :2], qrbboxes[:, 2:4], qrbboxes[:, 4] ) qboxes_standup = corner_to_standup_nd(qboxes_corners) # if standup box not overlapped, rbbox not overlapped too. standup_iou = iou_jit(boxes_standup, qboxes_standup, eps=0.0) return rbbox_iou(boxes_corners, qboxes_corners, standup_iou, standup_thresh) def rinter_cc(rbboxes, qrbboxes, standup_thresh=0.0): # less than 50ms when used in second one thread. 10x slower than gpu boxes_corners = center_to_corner_box2d( rbboxes[:, :2], rbboxes[:, 2:4], rbboxes[:, 4] ) boxes_standup = corner_to_standup_nd(boxes_corners) qboxes_corners = center_to_corner_box2d( qrbboxes[:, :2], qrbboxes[:, 2:4], qrbboxes[:, 4] ) qboxes_standup = corner_to_standup_nd(qboxes_corners) # if standup box not overlapped, rbbox not overlapped too. standup_iou = iou_jit(boxes_standup, qboxes_standup, eps=0.0) return rbbox_intersection( boxes_corners, qboxes_corners, standup_iou, standup_thresh ) def corners_nd(dims, origin=0.5): """generate relative box corners based on length per dim and origin point. Args: dims (float array, shape=[N, ndim]): array of length per dim origin (list or array or float): origin point relate to smallest point. Returns: float array, shape=[N, 2 ** ndim, ndim]: returned corners. point layout example: (2d) x0y0, x0y1, x1y0, x1y1; (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1 where x0 < x1, y0 < y1, z0 < z1 """ ndim = int(dims.shape[1]) corners_norm = np.stack( np.unravel_index(np.arange(2 ** ndim), [2] * ndim), axis=1 ).astype(dims.dtype) # now corners_norm has format: (2d) x0y0, x0y1, x1y0, x1y1 # (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1 # so need to convert to a format which is convenient to do other computing. # for 2d boxes, format is clockwise start with minimum point # for 3d boxes, please draw lines by your hand. if ndim == 2: # generate clockwise box corners corners_norm = corners_norm[[0, 1, 3, 2]] elif ndim == 3: corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]] corners_norm = corners_norm - np.array(origin, dtype=dims.dtype) corners = dims.reshape([-1, 1, ndim]) * corners_norm.reshape([1, 2 ** ndim, ndim]) return corners @numba.njit def corners_2d_jit(dims, origin=0.5): ndim = 2 corners_norm = np.array([[0, 0], [0, 1], [1, 1], [1, 0]], dtype=dims.dtype) corners_norm = corners_norm - np.array(origin, dtype=dims.dtype) corners = dims.reshape((-1, 1, ndim)) * corners_norm.reshape((1, 2 ** ndim, ndim)) return corners @numba.njit def corners_3d_jit(dims, origin=0.5): ndim = 3 corners_norm = np.array( [0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1], dtype=dims.dtype, ).reshape((8, 3)) corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]] corners_norm = corners_norm - np.array(origin, dtype=dims.dtype) corners = dims.reshape((-1, 1, ndim)) * corners_norm.reshape((1, 2 ** ndim, ndim)) return corners @numba.njit def corner_to_standup_nd_jit(boxes_corner): num_boxes = boxes_corner.shape[0] ndim = boxes_corner.shape[-1] result = np.zeros((num_boxes, ndim * 2), dtype=boxes_corner.dtype) for i in range(num_boxes): for j in range(ndim): result[i, j] = np.min(boxes_corner[i, :, j]) for j in range(ndim): result[i, j + ndim] = np.max(boxes_corner[i, :, j]) return result def corner_to_standup_nd(boxes_corner): assert len(boxes_corner.shape) == 3 standup_boxes = [] standup_boxes.append(np.min(boxes_corner, axis=1)) standup_boxes.append(np.max(boxes_corner, axis=1)) return np.concatenate(standup_boxes, -1) def rbbox2d_to_near_bbox(rbboxes): """convert rotated bbox to nearest 'standing' or 'lying' bbox. Args: rbboxes: [N, 5(x, y, xdim, ydim, rad)] rotated bboxes Returns: bboxes: [N, 4(xmin, ymin, xmax, ymax)] bboxes """ rots = rbboxes[..., -1] rots_0_pi_div_2 = np.abs(limit_period(rots, 0.5, np.pi)) cond = (rots_0_pi_div_2 > np.pi / 4)[..., np.newaxis] bboxes_center = np.where(cond, rbboxes[:, [0, 1, 3, 2]], rbboxes[:, :4]) bboxes = center_to_minmax_2d(bboxes_center[:, :2], bboxes_center[:, 2:]) return bboxes def rotation_3d_in_axis(points, angles, axis=0): # points: [N, point_size, 3] rot_sin = np.sin(angles) rot_cos = np.cos(angles) ones = np.ones_like(rot_cos) zeros = np.zeros_like(rot_cos) if axis == 1: rot_mat_T = np.stack( [ [rot_cos, zeros, -rot_sin], [zeros, ones, zeros], [rot_sin, zeros, rot_cos], ] ) elif axis == 2 or axis == -1: rot_mat_T = np.stack( [ [rot_cos, -rot_sin, zeros], [rot_sin, rot_cos, zeros], [zeros, zeros, ones], ] ) elif axis == 0: rot_mat_T = np.stack( [ [zeros, rot_cos, -rot_sin], [zeros, rot_sin, rot_cos], [ones, zeros, zeros], ] ) else: raise ValueError("axis should in range") return np.einsum("aij,jka->aik", points, rot_mat_T) def rotation_points_single_angle(points, angle, axis=0): # points: [N, 3] rot_sin = np.sin(angle) rot_cos = np.cos(angle) if axis == 1: rot_mat_T = np.array( [[rot_cos, 0, -rot_sin], [0, 1, 0], [rot_sin, 0, rot_cos]], dtype=points.dtype, ) elif axis == 2 or axis == -1: rot_mat_T = np.array( [[rot_cos, -rot_sin, 0], [rot_sin, rot_cos, 0], [0, 0, 1]], dtype=points.dtype, ) elif axis == 0: rot_mat_T = np.array( [[1, 0, 0], [0, rot_cos, -rot_sin], [0, rot_sin, rot_cos]], dtype=points.dtype, ) else: raise ValueError("axis should in range") return points @ rot_mat_T def rotation_2d(points, angles): """rotation 2d points based on origin point clockwise when angle positive. Args: points (float array, shape=[N, point_size, 2]): points to be rotated. angles (float array, shape=[N]): rotation angle. Returns: float array: same shape as points """ rot_sin = np.sin(angles) rot_cos = np.cos(angles) rot_mat_T = np.stack([[rot_cos, -rot_sin], [rot_sin, rot_cos]]) return np.einsum("aij,jka->aik", points, rot_mat_T) def rotation_box(box_corners, angle): """rotation 2d points based on origin point clockwise when angle positive. Args: points (float array, shape=[N, point_size, 2]): points to be rotated. angle (float): rotation angle. Returns: float array: same shape as points """ rot_sin = np.sin(angle) rot_cos = np.cos(angle) rot_mat_T = np.array( [[rot_cos, -rot_sin], [rot_sin, rot_cos]], dtype=box_corners.dtype ) return box_corners @ rot_mat_T def center_to_corner_box3d(centers, dims, angles=None, origin=(0.5, 0.5, 0.5), axis=2): """convert kitti locations, dimensions and angles to corners Args: centers (float array, shape=[N, 3]): locations in kitti label file. dims (float array, shape=[N, 3]): dimensions in kitti label file. angles (float array, shape=[N]): rotation_y in kitti label file. origin (list or array or float): origin point relate to smallest point. use [0.5, 1.0, 0.5] in camera and [0.5, 0.5, 0] in lidar. axis (int): rotation axis. 1 for camera and 2 for lidar. Returns: [type]: [description] """ # 'length' in kitti format is in x axis. # yzx(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar) # center in kitti format is [0.5, 1.0, 0.5] in xyz. corners = corners_nd(dims, origin=origin) # corners: [N, 8, 3] if angles is not None: corners = rotation_3d_in_axis(corners, angles, axis=axis) corners += centers.reshape([-1, 1, 3]) return corners def center_to_corner_box2d(centers, dims, angles=None, origin=0.5): """convert kitti locations, dimensions and angles to corners. format: center(xy), dims(xy), angles(clockwise when positive) Args: centers (float array, shape=[N, 2]): locations in kitti label file. dims (float array, shape=[N, 2]): dimensions in kitti label file. angles (float array, shape=[N]): rotation_y in kitti label file. Returns: [type]: [description] """ # 'length' in kitti format is in x axis. # xyz(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar) # center in kitti format is [0.5, 1.0, 0.5] in xyz. corners = corners_nd(dims, origin=origin) # corners: [N, 4, 2] if angles is not None: corners = rotation_2d(corners, angles) corners += centers.reshape([-1, 1, 2]) return corners @numba.jit(nopython=True) def box2d_to_corner_jit(boxes): num_box = boxes.shape[0] corners_norm = np.zeros((4, 2), dtype=boxes.dtype) corners_norm[1, 1] = 1.0 corners_norm[2] = 1.0 corners_norm[3, 0] = 1.0 corners_norm -= np.array([0.5, 0.5], dtype=boxes.dtype) corners = boxes.reshape(num_box, 1, 5)[:, :, 2:4] * corners_norm.reshape(1, 4, 2) rot_mat_T = np.zeros((2, 2), dtype=boxes.dtype) box_corners = np.zeros((num_box, 4, 2), dtype=boxes.dtype) for i in range(num_box): rot_sin = np.sin(boxes[i, -1]) rot_cos = np.cos(boxes[i, -1]) rot_mat_T[0, 0] = rot_cos rot_mat_T[0, 1] = -rot_sin rot_mat_T[1, 0] = rot_sin rot_mat_T[1, 1] = rot_cos box_corners[i] = corners[i] @ rot_mat_T + boxes[i, :2] return box_corners def rbbox3d_to_corners(rbboxes, origin=[0.5, 0.5, 0.5], axis=2): return center_to_corner_box3d( rbboxes[..., :3], rbboxes[..., 3:6], rbboxes[..., 6], origin, axis=axis ) def rbbox3d_to_bev_corners(rbboxes, origin=0.5): return center_to_corner_box2d( rbboxes[..., :2], rbboxes[..., 3:5], rbboxes[..., 6], origin ) def minmax_to_corner_2d(minmax_box): ndim = minmax_box.shape[-1] // 2 center = minmax_box[..., :ndim] dims = minmax_box[..., ndim:] - center return center_to_corner_box2d(center, dims, origin=0.0) def minmax_to_corner_2d_v2(minmax_box): # N, 4 -> N 4 2 return minmax_box[..., [0, 1, 0, 3, 2, 3, 2, 1]].reshape(-1, 4, 2) def minmax_to_corner_3d(minmax_box): ndim = minmax_box.shape[-1] // 2 center = minmax_box[..., :ndim] dims = minmax_box[..., ndim:] - center return center_to_corner_box3d(center, dims, origin=0.0) def minmax_to_center_2d(minmax_box): ndim = minmax_box.shape[-1] // 2 center_min = minmax_box[..., :ndim] dims = minmax_box[..., ndim:] - center_min center = center_min + 0.5 * dims return np.concatenate([center, dims], axis=-1) def center_to_minmax_2d_0_5(centers, dims): return np.concatenate([centers - dims / 2, centers + dims / 2], axis=-1) def center_to_minmax_2d(centers, dims, origin=0.5): if origin == 0.5: return center_to_minmax_2d_0_5(centers, dims) corners = center_to_corner_box2d(centers, dims, origin=origin) return corners[:, [0, 2]].reshape([-1, 4]) def limit_period(val, offset=0.5, period=np.pi): return val - np.floor(val / period + offset) * period def projection_matrix_to_CRT_kitti(proj): # P = C @ [R|T] # C is upper triangular matrix, so we need to inverse CR and use QR # stable for all kitti camera projection matrix CR = proj[0:3, 0:3] CT = proj[0:3, 3] RinvCinv = np.linalg.inv(CR) Rinv, Cinv = np.linalg.qr(RinvCinv) C = np.linalg.inv(Cinv) R = np.linalg.inv(Rinv) T = Cinv @ CT return C, R, T def get_frustum(bbox_image, C, near_clip=0.001, far_clip=100): fku = C[0, 0] fkv = -C[1, 1] u0v0 = C[0:2, 2] z_points = np.array([near_clip] * 4 + [far_clip] * 4, dtype=C.dtype)[:, np.newaxis] b = bbox_image box_corners = np.array( [[b[0], b[1]], [b[0], b[3]], [b[2], b[3]], [b[2], b[1]]], dtype=C.dtype ) near_box_corners = (box_corners - u0v0) / np.array( [fku / near_clip, -fkv / near_clip], dtype=C.dtype ) far_box_corners = (box_corners - u0v0) / np.array( [fku / far_clip, -fkv / far_clip], dtype=C.dtype ) ret_xy = np.concatenate([near_box_corners, far_box_corners], axis=0) # [8, 2] ret_xyz = np.concatenate([ret_xy, z_points], axis=1) return ret_xyz def get_frustum_v2(bboxes, C, near_clip=0.001, far_clip=100): fku = C[0, 0] fkv = -C[1, 1] u0v0 = C[0:2, 2] num_box = bboxes.shape[0] z_points = np.array([near_clip] * 4 + [far_clip] * 4, dtype=C.dtype)[ np.newaxis, :, np.newaxis ] z_points = np.tile(z_points, [num_box, 1, 1]) box_corners = minmax_to_corner_2d_v2(bboxes) near_box_corners = (box_corners - u0v0) / np.array( [fku / near_clip, -fkv / near_clip], dtype=C.dtype ) far_box_corners = (box_corners - u0v0) / np.array( [fku / far_clip, -fkv / far_clip], dtype=C.dtype ) ret_xy = np.concatenate([near_box_corners, far_box_corners], axis=1) # [8, 2] ret_xyz = np.concatenate([ret_xy, z_points], axis=-1) return ret_xyz @numba.njit def _add_rgb_to_points_kernel(points_2d, image, points_rgb): num_points = points_2d.shape[0] image_h, image_w = image.shape[:2] for i in range(num_points): img_pos = np.floor(points_2d[i]).astype(np.int32) if img_pos[0] >= 0 and img_pos[0] < image_w: if img_pos[1] >= 0 and img_pos[1] < image_h: points_rgb[i, :] = image[img_pos[1], img_pos[0], :] # image[img_pos[1], img_pos[0]] = 0 def add_rgb_to_points(points, image, rect, Trv2c, P2, mean_size=[5, 5]): kernel = np.ones(mean_size, np.float32) / np.prod(mean_size) # image = cv2.filter2D(image, -1, kernel) points_cam = lidar_to_camera(points[:, :3], rect, Trv2c) points_2d = project_to_image(points_cam, P2) points_rgb = np.zeros([points_cam.shape[0], 3], dtype=points.dtype) _add_rgb_to_points_kernel(points_2d, image, points_rgb) return points_rgb def project_to_image(points_3d, proj_mat): points_shape = list(points_3d.shape) points_shape[-1] = 1 points_4 = np.concatenate([points_3d, np.ones(points_shape)], axis=-1) point_2d = points_4 @ proj_mat.T point_2d_res = point_2d[..., :2] / point_2d[..., 2:3] return point_2d_res def camera_to_lidar(points, r_rect, velo2cam): points_shape = list(points.shape[0:-1]) if points.shape[-1] == 3: points = np.concatenate([points, np.ones(points_shape + [1])], axis=-1) lidar_points = points @ np.linalg.inv((r_rect @ velo2cam).T) return lidar_points[..., :3] def lidar_to_camera(points, r_rect, velo2cam): points_shape = list(points.shape[:-1]) if points.shape[-1] == 3: points = np.concatenate([points, np.ones(points_shape + [1])], axis=-1) camera_points = points @ (r_rect @ velo2cam).T return camera_points[..., :3] def box_camera_to_lidar(data, r_rect, velo2cam): xyz = data[:, 0:3] l, h, w = data[:, 3:4], data[:, 4:5], data[:, 5:6] r = data[:, 6:7] xyz_lidar = camera_to_lidar(xyz, r_rect, velo2cam) return np.concatenate([xyz_lidar, w, l, h, r], axis=1) def box_lidar_to_camera(data, r_rect, velo2cam): xyz_lidar = data[:, 0:3] w, l, h = data[:, 3:4], data[:, 4:5], data[:, 5:6] r = data[:, 6:7] xyz = lidar_to_camera(xyz_lidar, r_rect, velo2cam) return np.concatenate([xyz, l, h, w, r], axis=1) def remove_outside_points(points, rect, Trv2c, P2, image_shape): # 5x faster than remove_outside_points_v1(2ms vs 10ms) C, R, T = projection_matrix_to_CRT_kitti(P2) image_bbox = [0, 0, image_shape[1], image_shape[0]] frustum = get_frustum(image_bbox, C) frustum -= T frustum = np.linalg.inv(R) @ frustum.T frustum = camera_to_lidar(frustum.T, rect, Trv2c) frustum_surfaces = corner_to_surfaces_3d_jit(frustum[np.newaxis, ...]) indices = points_in_convex_polygon_3d_jit(points[:, :3], frustum_surfaces) points = points[indices.reshape([-1])] return points @numba.jit(nopython=True) def iou_jit(boxes, query_boxes, eps=1.0): """calculate box iou. note that jit version runs 2x faster than cython in my machine! Parameters ---------- boxes: (N, 4) ndarray of float query_boxes: (K, 4) ndarray of float Returns ------- overlaps: (N, K) ndarray of overlap between boxes and query_boxes """ N = boxes.shape[0] K = query_boxes.shape[0] overlaps = np.zeros((N, K), dtype=boxes.dtype) for k in range(K): box_area = (query_boxes[k, 2] - query_boxes[k, 0] + eps) * ( query_boxes[k, 3] - query_boxes[k, 1] + eps ) for n in range(N): iw = ( min(boxes[n, 2], query_boxes[k, 2]) - max(boxes[n, 0], query_boxes[k, 0]) + eps ) if iw > 0: ih = ( min(boxes[n, 3], query_boxes[k, 3]) - max(boxes[n, 1], query_boxes[k, 1]) + eps ) if ih > 0: ua = ( (boxes[n, 2] - boxes[n, 0] + eps) * (boxes[n, 3] - boxes[n, 1] + eps) + box_area - iw * ih ) overlaps[n, k] = iw * ih / ua return overlaps @numba.jit(nopython=True) def iou_3d_jit(boxes, query_boxes, add1=True): """calculate box iou3d, ---------- boxes: (N, 6) ndarray of float query_boxes: (K, 6) ndarray of float Returns ------- overlaps: (N, K) ndarray of overlap between boxes and query_boxes """ N = boxes.shape[0] K = query_boxes.shape[0] overlaps = np.zeros((N, K), dtype=boxes.dtype) if add1: add1 = 1.0 else: add1 = 0.0 for k in range(K): box_area = ( (query_boxes[k, 3] - query_boxes[k, 0] + add1) * (query_boxes[k, 4] - query_boxes[k, 1] + add1) * (query_boxes[k, 5] - query_boxes[k, 2] + add1) ) for n in range(N): iw = ( min(boxes[n, 3], query_boxes[k, 3]) - max(boxes[n, 0], query_boxes[k, 0]) + add1 ) if iw > 0: ih = ( min(boxes[n, 4], query_boxes[k, 4]) - max(boxes[n, 1], query_boxes[k, 1]) + add1 ) if ih > 0: il = ( min(boxes[n, 5], query_boxes[k, 5]) - max(boxes[n, 2], query_boxes[k, 2]) + add1 ) if il > 0: ua = float( (boxes[n, 3] - boxes[n, 0] + add1) * (boxes[n, 4] - boxes[n, 1] + add1) * (boxes[n, 5] - boxes[n, 2] + add1) + box_area - iw * ih * il ) overlaps[n, k] = iw * ih * il / ua return overlaps @numba.jit(nopython=True) def iou_nd_jit(boxes, query_boxes, add1=True): """calculate box iou nd, 2x slower than iou_jit. ---------- boxes: (N, ndim * 2) ndarray of float query_boxes: (K, ndim * 2) ndarray of float Returns ------- overlaps: (N, K) ndarray of overlap between boxes and query_boxes """ N = boxes.shape[0] K = query_boxes.shape[0] ndim = boxes.shape[1] // 2 overlaps = np.zeros((N, K), dtype=boxes.dtype) side_lengths = np.zeros((ndim,), dtype=boxes.dtype) if add1: add1 = 1.0 else: add1 = 0.0 invalid = False for k in range(K): qbox_area = query_boxes[k, ndim] - query_boxes[k, 0] + add1 for i in range(1, ndim): qbox_area *= query_boxes[k, ndim + i] - query_boxes[k, i] + add1 for n in range(N): invalid = False for i in range(ndim): side_length = ( min(boxes[n, i + ndim], query_boxes[k, i + ndim]) - max(boxes[n, i], query_boxes[k, i]) + add1 ) if side_length <= 0: invalid = True break side_lengths[i] = side_length if not invalid: box_area = boxes[n, ndim] - boxes[n, 0] + add1 for i in range(1, ndim): box_area *= boxes[n, ndim + i] - boxes[n, i] + add1 inter = side_lengths[0] for i in range(1, ndim): inter *= side_lengths[i] # inter = np.prod(side_lengths) ua = float(box_area + qbox_area - inter) overlaps[n, k] = inter / ua return overlaps def points_in_rbbox(points, rbbox, z_axis=2, origin=(0.5, 0.5, 0.5)): rbbox_corners = center_to_corner_box3d( rbbox[:, :3], rbbox[:, 3:6], rbbox[:, -1], origin=origin, axis=z_axis ) surfaces = corner_to_surfaces_3d(rbbox_corners) indices = points_in_convex_polygon_3d_jit(points[:, :3], surfaces) return indices def corner_to_surfaces_3d(corners): """convert 3d box corners from corner function above to surfaces that normal vectors all direct to internal. Args: corners (float array, [N, 8, 3]): 3d box corners. Returns: surfaces (float array, [N, 6, 4, 3]): """ # box_corners: [N, 8, 3], must from corner functions in this module surfaces = np.array( [ [corners[:, 0], corners[:, 1], corners[:, 2], corners[:, 3]], [corners[:, 7], corners[:, 6], corners[:, 5], corners[:, 4]], [corners[:, 0], corners[:, 3], corners[:, 7], corners[:, 4]], [corners[:, 1], corners[:, 5], corners[:, 6], corners[:, 2]], [corners[:, 0], corners[:, 4], corners[:, 5], corners[:, 1]], [corners[:, 3], corners[:, 2], corners[:, 6], corners[:, 7]], ] ).transpose([2, 0, 1, 3]) return surfaces @numba.jit(nopython=True) def corner_to_surfaces_3d_jit(corners): """convert 3d box corners from corner function above to surfaces that normal vectors all direct to internal. Args: corners (float array, [N, 8, 3]): 3d box corners. Returns: surfaces (float array, [N, 6, 4, 3]): """ # box_corners: [N, 8, 3], must from corner functions in this module num_boxes = corners.shape[0] surfaces = np.zeros((num_boxes, 6, 4, 3), dtype=corners.dtype) corner_idxes = np.array( [0, 1, 2, 3, 7, 6, 5, 4, 0, 3, 7, 4, 1, 5, 6, 2, 0, 4, 5, 1, 3, 2, 6, 7] ).reshape(6, 4) for i in range(num_boxes): for j in range(6): for k in range(4): surfaces[i, j, k] = corners[i, corner_idxes[j, k]] return surfaces def assign_label_to_voxel(gt_boxes, coors, voxel_size, coors_range): """assign a 0/1 label to each voxel based on whether the center of voxel is in gt_box. LIDAR. """ voxel_size = np.array(voxel_size, dtype=gt_boxes.dtype) coors_range = np.array(coors_range, dtype=gt_boxes.dtype) shift = coors_range[:3] voxel_origins = coors[:, ::-1] * voxel_size + shift voxel_centers = voxel_origins + voxel_size * 0.5 gt_box_corners = center_to_corner_box3d( gt_boxes[:, :3] - voxel_size * 0.5, gt_boxes[:, 3:6] + voxel_size, gt_boxes[:, 6], origin=[0.5, 0.5, 0.5], axis=2, ) gt_surfaces = corner_to_surfaces_3d(gt_box_corners) ret = points_in_convex_polygon_3d_jit(voxel_centers, gt_surfaces) return np.any(ret, axis=1).astype(np.int64) def assign_label_to_voxel_v3(gt_boxes, coors, voxel_size, coors_range): """assign a 0/1 label to each voxel based on whether the center of voxel is in gt_box. LIDAR. """ voxel_size = np.array(voxel_size, dtype=gt_boxes.dtype) coors_range = np.array(coors_range, dtype=gt_boxes.dtype) shift = coors_range[:3] voxel_origins = coors[:, ::-1] * voxel_size + shift voxel_maxes = voxel_origins + voxel_size voxel_minmax = np.concatenate([voxel_origins, voxel_maxes], axis=-1) voxel_corners = minmax_to_corner_3d(voxel_minmax) gt_box_corners = center_to_corner_box3d( gt_boxes[:, :3], gt_boxes[:, 3:6], gt_boxes[:, 6], origin=[0.5, 0.5, 0.5], axis=2, ) gt_surfaces = corner_to_surfaces_3d(gt_box_corners) voxel_corners_flat = voxel_corners.reshape([-1, 3]) ret = points_in_convex_polygon_3d_jit(voxel_corners_flat, gt_surfaces) ret = ret.reshape([-1, 8, ret.shape[-1]]) return ret.any(-1).any(-1).astype(np.int64) def image_box_region_area(img_cumsum, bbox): """check a 2d voxel is contained by a box. used to filter empty anchors. Summed-area table algorithm: ==> W ------------------ | | | |------A---------B | | | | | | |----- C---------D Iabcd = ID-IB-IC+IA Args: img_cumsum: [M, H, W](yx) cumsumed image. bbox: [N, 4](xyxy) bounding box, """ N = bbox.shape[0] M = img_cumsum.shape[0] ret = np.zeros([N, M], dtype=img_cumsum.dtype) ID = img_cumsum[:, bbox[:, 3], bbox[:, 2]] IA = img_cumsum[:, bbox[:, 1], bbox[:, 0]] IB = img_cumsum[:, bbox[:, 3], bbox[:, 0]] IC = img_cumsum[:, bbox[:, 1], bbox[:, 2]] ret = ID - IB - IC + IA return ret def get_minimum_bounding_box_bv(points, voxel_size, bound, downsample=8, margin=1.6): x_vsize = voxel_size[0] y_vsize = voxel_size[1] max_x = points[:, 0].max() max_y = points[:, 1].max() min_x = points[:, 0].min() min_y = points[:, 1].min() max_x = np.floor(max_x / (x_vsize * downsample) + 1) * (x_vsize * downsample) max_y = np.floor(max_y / (y_vsize * downsample) + 1) * (y_vsize * downsample) min_x = np.floor(min_x / (x_vsize * downsample)) * (x_vsize * downsample) min_y = np.floor(min_y / (y_vsize * downsample)) * (y_vsize * downsample) max_x = np.minimum(max_x + margin, bound[2]) max_y = np.minimum(max_y + margin, bound[3]) min_x = np.maximum(min_x - margin, bound[0]) min_y = np.maximum(min_y - margin, bound[1]) return np.array([min_x, min_y, max_x, max_y]) def box3d_to_bbox(box3d, rect, Trv2c, P2): box3d_to_cam = box_lidar_to_camera(box3d, rect, Trv2c) box_corners = center_to_corner_box3d( box3d[:, :3], box3d[:, 3:6], box3d[:, 6], [0.5, 1.0, 0.5], axis=1 ) box_corners_in_image = project_to_image(box_corners, P2) # box_corners_in_image: [N, 8, 2] minxy = np.min(box_corners_in_image, axis=1) maxxy = np.max(box_corners_in_image, axis=1) bbox = np.concatenate([minxy, maxxy], axis=1) return bbox def change_box3d_center_(box3d, src, dst): dst = np.array(dst, dtype=box3d.dtype) src = np.array(src, dtype=box3d.dtype) box3d[..., :3] += box3d[..., 3:6] * (dst - src) def encode_parts(relative_shifts): parts = np.zeros((len(relative_shifts),), dtype=np.int32) mask = (relative_shifts[:, 0] >= 0) & (relative_shifts[:, 1] >= 0) parts[mask] = 0 mask = (relative_shifts[:, 0] < 0) & (relative_shifts[:, 1] >= 0) parts[mask] = 1 mask = (relative_shifts[:, 0] < 0) & (relative_shifts[:, 1] < 0) parts[mask] = 2 mask = (relative_shifts[:, 0] >= 0) & (relative_shifts[:, 1] < 0) parts[mask] = 3 return parts
rotation_box
rotation 2d points based on origin point clockwise when angle positive. Args: points (float array, shape=[N, point_size, 2]): points to be rotated. angle (float): rotation angle. Returns: float array: same shape as points
from pathlib import Path import numba import numpy as np from det3d.core.bbox.geometry import ( points_count_convex_polygon_3d_jit, points_in_convex_polygon_3d_jit, ) try: from spconv.utils import rbbox_intersection, rbbox_iou except: print("Import spconv fail, no support for sparse convolution!") def points_count_rbbox(points, rbbox, z_axis=2, origin=(0.5, 0.5, 0.5)): rbbox_corners = center_to_corner_box3d( rbbox[:, :3], rbbox[:, 3:6], rbbox[:, -1], origin=origin, axis=z_axis ) surfaces = corner_to_surfaces_3d(rbbox_corners) return points_count_convex_polygon_3d_jit(points[:, :3], surfaces) def riou_cc(rbboxes, qrbboxes, standup_thresh=0.0): # less than 50ms when used in second one thread. 10x slower than gpu boxes_corners = center_to_corner_box2d( rbboxes[:, :2], rbboxes[:, 2:4], rbboxes[:, 4] ) boxes_standup = corner_to_standup_nd(boxes_corners) qboxes_corners = center_to_corner_box2d( qrbboxes[:, :2], qrbboxes[:, 2:4], qrbboxes[:, 4] ) qboxes_standup = corner_to_standup_nd(qboxes_corners) # if standup box not overlapped, rbbox not overlapped too. standup_iou = iou_jit(boxes_standup, qboxes_standup, eps=0.0) return rbbox_iou(boxes_corners, qboxes_corners, standup_iou, standup_thresh) def rinter_cc(rbboxes, qrbboxes, standup_thresh=0.0): # less than 50ms when used in second one thread. 10x slower than gpu boxes_corners = center_to_corner_box2d( rbboxes[:, :2], rbboxes[:, 2:4], rbboxes[:, 4] ) boxes_standup = corner_to_standup_nd(boxes_corners) qboxes_corners = center_to_corner_box2d( qrbboxes[:, :2], qrbboxes[:, 2:4], qrbboxes[:, 4] ) qboxes_standup = corner_to_standup_nd(qboxes_corners) # if standup box not overlapped, rbbox not overlapped too. standup_iou = iou_jit(boxes_standup, qboxes_standup, eps=0.0) return rbbox_intersection( boxes_corners, qboxes_corners, standup_iou, standup_thresh ) def corners_nd(dims, origin=0.5): """generate relative box corners based on length per dim and origin point. Args: dims (float array, shape=[N, ndim]): array of length per dim origin (list or array or float): origin point relate to smallest point. Returns: float array, shape=[N, 2 ** ndim, ndim]: returned corners. point layout example: (2d) x0y0, x0y1, x1y0, x1y1; (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1 where x0 < x1, y0 < y1, z0 < z1 """ ndim = int(dims.shape[1]) corners_norm = np.stack( np.unravel_index(np.arange(2 ** ndim), [2] * ndim), axis=1 ).astype(dims.dtype) # now corners_norm has format: (2d) x0y0, x0y1, x1y0, x1y1 # (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1 # so need to convert to a format which is convenient to do other computing. # for 2d boxes, format is clockwise start with minimum point # for 3d boxes, please draw lines by your hand. if ndim == 2: # generate clockwise box corners corners_norm = corners_norm[[0, 1, 3, 2]] elif ndim == 3: corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]] corners_norm = corners_norm - np.array(origin, dtype=dims.dtype) corners = dims.reshape([-1, 1, ndim]) * corners_norm.reshape([1, 2 ** ndim, ndim]) return corners @numba.njit def corners_2d_jit(dims, origin=0.5): ndim = 2 corners_norm = np.array([[0, 0], [0, 1], [1, 1], [1, 0]], dtype=dims.dtype) corners_norm = corners_norm - np.array(origin, dtype=dims.dtype) corners = dims.reshape((-1, 1, ndim)) * corners_norm.reshape((1, 2 ** ndim, ndim)) return corners @numba.njit def corners_3d_jit(dims, origin=0.5): ndim = 3 corners_norm = np.array( [0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1], dtype=dims.dtype, ).reshape((8, 3)) corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]] corners_norm = corners_norm - np.array(origin, dtype=dims.dtype) corners = dims.reshape((-1, 1, ndim)) * corners_norm.reshape((1, 2 ** ndim, ndim)) return corners @numba.njit def corner_to_standup_nd_jit(boxes_corner): num_boxes = boxes_corner.shape[0] ndim = boxes_corner.shape[-1] result = np.zeros((num_boxes, ndim * 2), dtype=boxes_corner.dtype) for i in range(num_boxes): for j in range(ndim): result[i, j] = np.min(boxes_corner[i, :, j]) for j in range(ndim): result[i, j + ndim] = np.max(boxes_corner[i, :, j]) return result def corner_to_standup_nd(boxes_corner): assert len(boxes_corner.shape) == 3 standup_boxes = [] standup_boxes.append(np.min(boxes_corner, axis=1)) standup_boxes.append(np.max(boxes_corner, axis=1)) return np.concatenate(standup_boxes, -1) def rbbox2d_to_near_bbox(rbboxes): """convert rotated bbox to nearest 'standing' or 'lying' bbox. Args: rbboxes: [N, 5(x, y, xdim, ydim, rad)] rotated bboxes Returns: bboxes: [N, 4(xmin, ymin, xmax, ymax)] bboxes """ rots = rbboxes[..., -1] rots_0_pi_div_2 = np.abs(limit_period(rots, 0.5, np.pi)) cond = (rots_0_pi_div_2 > np.pi / 4)[..., np.newaxis] bboxes_center = np.where(cond, rbboxes[:, [0, 1, 3, 2]], rbboxes[:, :4]) bboxes = center_to_minmax_2d(bboxes_center[:, :2], bboxes_center[:, 2:]) return bboxes def rotation_3d_in_axis(points, angles, axis=0): # points: [N, point_size, 3] rot_sin = np.sin(angles) rot_cos = np.cos(angles) ones = np.ones_like(rot_cos) zeros = np.zeros_like(rot_cos) if axis == 1: rot_mat_T = np.stack( [ [rot_cos, zeros, -rot_sin], [zeros, ones, zeros], [rot_sin, zeros, rot_cos], ] ) elif axis == 2 or axis == -1: rot_mat_T = np.stack( [ [rot_cos, -rot_sin, zeros], [rot_sin, rot_cos, zeros], [zeros, zeros, ones], ] ) elif axis == 0: rot_mat_T = np.stack( [ [zeros, rot_cos, -rot_sin], [zeros, rot_sin, rot_cos], [ones, zeros, zeros], ] ) else: raise ValueError("axis should in range") return np.einsum("aij,jka->aik", points, rot_mat_T) def rotation_points_single_angle(points, angle, axis=0): # points: [N, 3] rot_sin = np.sin(angle) rot_cos = np.cos(angle) if axis == 1: rot_mat_T = np.array( [[rot_cos, 0, -rot_sin], [0, 1, 0], [rot_sin, 0, rot_cos]], dtype=points.dtype, ) elif axis == 2 or axis == -1: rot_mat_T = np.array( [[rot_cos, -rot_sin, 0], [rot_sin, rot_cos, 0], [0, 0, 1]], dtype=points.dtype, ) elif axis == 0: rot_mat_T = np.array( [[1, 0, 0], [0, rot_cos, -rot_sin], [0, rot_sin, rot_cos]], dtype=points.dtype, ) else: raise ValueError("axis should in range") return points @ rot_mat_T def rotation_2d(points, angles): """rotation 2d points based on origin point clockwise when angle positive. Args: points (float array, shape=[N, point_size, 2]): points to be rotated. angles (float array, shape=[N]): rotation angle. Returns: float array: same shape as points """ rot_sin = np.sin(angles) rot_cos = np.cos(angles) rot_mat_T = np.stack([[rot_cos, -rot_sin], [rot_sin, rot_cos]]) return np.einsum("aij,jka->aik", points, rot_mat_T) # MASKED: rotation_box function (lines 223-238) def center_to_corner_box3d(centers, dims, angles=None, origin=(0.5, 0.5, 0.5), axis=2): """convert kitti locations, dimensions and angles to corners Args: centers (float array, shape=[N, 3]): locations in kitti label file. dims (float array, shape=[N, 3]): dimensions in kitti label file. angles (float array, shape=[N]): rotation_y in kitti label file. origin (list or array or float): origin point relate to smallest point. use [0.5, 1.0, 0.5] in camera and [0.5, 0.5, 0] in lidar. axis (int): rotation axis. 1 for camera and 2 for lidar. Returns: [type]: [description] """ # 'length' in kitti format is in x axis. # yzx(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar) # center in kitti format is [0.5, 1.0, 0.5] in xyz. corners = corners_nd(dims, origin=origin) # corners: [N, 8, 3] if angles is not None: corners = rotation_3d_in_axis(corners, angles, axis=axis) corners += centers.reshape([-1, 1, 3]) return corners def center_to_corner_box2d(centers, dims, angles=None, origin=0.5): """convert kitti locations, dimensions and angles to corners. format: center(xy), dims(xy), angles(clockwise when positive) Args: centers (float array, shape=[N, 2]): locations in kitti label file. dims (float array, shape=[N, 2]): dimensions in kitti label file. angles (float array, shape=[N]): rotation_y in kitti label file. Returns: [type]: [description] """ # 'length' in kitti format is in x axis. # xyz(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar) # center in kitti format is [0.5, 1.0, 0.5] in xyz. corners = corners_nd(dims, origin=origin) # corners: [N, 4, 2] if angles is not None: corners = rotation_2d(corners, angles) corners += centers.reshape([-1, 1, 2]) return corners @numba.jit(nopython=True) def box2d_to_corner_jit(boxes): num_box = boxes.shape[0] corners_norm = np.zeros((4, 2), dtype=boxes.dtype) corners_norm[1, 1] = 1.0 corners_norm[2] = 1.0 corners_norm[3, 0] = 1.0 corners_norm -= np.array([0.5, 0.5], dtype=boxes.dtype) corners = boxes.reshape(num_box, 1, 5)[:, :, 2:4] * corners_norm.reshape(1, 4, 2) rot_mat_T = np.zeros((2, 2), dtype=boxes.dtype) box_corners = np.zeros((num_box, 4, 2), dtype=boxes.dtype) for i in range(num_box): rot_sin = np.sin(boxes[i, -1]) rot_cos = np.cos(boxes[i, -1]) rot_mat_T[0, 0] = rot_cos rot_mat_T[0, 1] = -rot_sin rot_mat_T[1, 0] = rot_sin rot_mat_T[1, 1] = rot_cos box_corners[i] = corners[i] @ rot_mat_T + boxes[i, :2] return box_corners def rbbox3d_to_corners(rbboxes, origin=[0.5, 0.5, 0.5], axis=2): return center_to_corner_box3d( rbboxes[..., :3], rbboxes[..., 3:6], rbboxes[..., 6], origin, axis=axis ) def rbbox3d_to_bev_corners(rbboxes, origin=0.5): return center_to_corner_box2d( rbboxes[..., :2], rbboxes[..., 3:5], rbboxes[..., 6], origin ) def minmax_to_corner_2d(minmax_box): ndim = minmax_box.shape[-1] // 2 center = minmax_box[..., :ndim] dims = minmax_box[..., ndim:] - center return center_to_corner_box2d(center, dims, origin=0.0) def minmax_to_corner_2d_v2(minmax_box): # N, 4 -> N 4 2 return minmax_box[..., [0, 1, 0, 3, 2, 3, 2, 1]].reshape(-1, 4, 2) def minmax_to_corner_3d(minmax_box): ndim = minmax_box.shape[-1] // 2 center = minmax_box[..., :ndim] dims = minmax_box[..., ndim:] - center return center_to_corner_box3d(center, dims, origin=0.0) def minmax_to_center_2d(minmax_box): ndim = minmax_box.shape[-1] // 2 center_min = minmax_box[..., :ndim] dims = minmax_box[..., ndim:] - center_min center = center_min + 0.5 * dims return np.concatenate([center, dims], axis=-1) def center_to_minmax_2d_0_5(centers, dims): return np.concatenate([centers - dims / 2, centers + dims / 2], axis=-1) def center_to_minmax_2d(centers, dims, origin=0.5): if origin == 0.5: return center_to_minmax_2d_0_5(centers, dims) corners = center_to_corner_box2d(centers, dims, origin=origin) return corners[:, [0, 2]].reshape([-1, 4]) def limit_period(val, offset=0.5, period=np.pi): return val - np.floor(val / period + offset) * period def projection_matrix_to_CRT_kitti(proj): # P = C @ [R|T] # C is upper triangular matrix, so we need to inverse CR and use QR # stable for all kitti camera projection matrix CR = proj[0:3, 0:3] CT = proj[0:3, 3] RinvCinv = np.linalg.inv(CR) Rinv, Cinv = np.linalg.qr(RinvCinv) C = np.linalg.inv(Cinv) R = np.linalg.inv(Rinv) T = Cinv @ CT return C, R, T def get_frustum(bbox_image, C, near_clip=0.001, far_clip=100): fku = C[0, 0] fkv = -C[1, 1] u0v0 = C[0:2, 2] z_points = np.array([near_clip] * 4 + [far_clip] * 4, dtype=C.dtype)[:, np.newaxis] b = bbox_image box_corners = np.array( [[b[0], b[1]], [b[0], b[3]], [b[2], b[3]], [b[2], b[1]]], dtype=C.dtype ) near_box_corners = (box_corners - u0v0) / np.array( [fku / near_clip, -fkv / near_clip], dtype=C.dtype ) far_box_corners = (box_corners - u0v0) / np.array( [fku / far_clip, -fkv / far_clip], dtype=C.dtype ) ret_xy = np.concatenate([near_box_corners, far_box_corners], axis=0) # [8, 2] ret_xyz = np.concatenate([ret_xy, z_points], axis=1) return ret_xyz def get_frustum_v2(bboxes, C, near_clip=0.001, far_clip=100): fku = C[0, 0] fkv = -C[1, 1] u0v0 = C[0:2, 2] num_box = bboxes.shape[0] z_points = np.array([near_clip] * 4 + [far_clip] * 4, dtype=C.dtype)[ np.newaxis, :, np.newaxis ] z_points = np.tile(z_points, [num_box, 1, 1]) box_corners = minmax_to_corner_2d_v2(bboxes) near_box_corners = (box_corners - u0v0) / np.array( [fku / near_clip, -fkv / near_clip], dtype=C.dtype ) far_box_corners = (box_corners - u0v0) / np.array( [fku / far_clip, -fkv / far_clip], dtype=C.dtype ) ret_xy = np.concatenate([near_box_corners, far_box_corners], axis=1) # [8, 2] ret_xyz = np.concatenate([ret_xy, z_points], axis=-1) return ret_xyz @numba.njit def _add_rgb_to_points_kernel(points_2d, image, points_rgb): num_points = points_2d.shape[0] image_h, image_w = image.shape[:2] for i in range(num_points): img_pos = np.floor(points_2d[i]).astype(np.int32) if img_pos[0] >= 0 and img_pos[0] < image_w: if img_pos[1] >= 0 and img_pos[1] < image_h: points_rgb[i, :] = image[img_pos[1], img_pos[0], :] # image[img_pos[1], img_pos[0]] = 0 def add_rgb_to_points(points, image, rect, Trv2c, P2, mean_size=[5, 5]): kernel = np.ones(mean_size, np.float32) / np.prod(mean_size) # image = cv2.filter2D(image, -1, kernel) points_cam = lidar_to_camera(points[:, :3], rect, Trv2c) points_2d = project_to_image(points_cam, P2) points_rgb = np.zeros([points_cam.shape[0], 3], dtype=points.dtype) _add_rgb_to_points_kernel(points_2d, image, points_rgb) return points_rgb def project_to_image(points_3d, proj_mat): points_shape = list(points_3d.shape) points_shape[-1] = 1 points_4 = np.concatenate([points_3d, np.ones(points_shape)], axis=-1) point_2d = points_4 @ proj_mat.T point_2d_res = point_2d[..., :2] / point_2d[..., 2:3] return point_2d_res def camera_to_lidar(points, r_rect, velo2cam): points_shape = list(points.shape[0:-1]) if points.shape[-1] == 3: points = np.concatenate([points, np.ones(points_shape + [1])], axis=-1) lidar_points = points @ np.linalg.inv((r_rect @ velo2cam).T) return lidar_points[..., :3] def lidar_to_camera(points, r_rect, velo2cam): points_shape = list(points.shape[:-1]) if points.shape[-1] == 3: points = np.concatenate([points, np.ones(points_shape + [1])], axis=-1) camera_points = points @ (r_rect @ velo2cam).T return camera_points[..., :3] def box_camera_to_lidar(data, r_rect, velo2cam): xyz = data[:, 0:3] l, h, w = data[:, 3:4], data[:, 4:5], data[:, 5:6] r = data[:, 6:7] xyz_lidar = camera_to_lidar(xyz, r_rect, velo2cam) return np.concatenate([xyz_lidar, w, l, h, r], axis=1) def box_lidar_to_camera(data, r_rect, velo2cam): xyz_lidar = data[:, 0:3] w, l, h = data[:, 3:4], data[:, 4:5], data[:, 5:6] r = data[:, 6:7] xyz = lidar_to_camera(xyz_lidar, r_rect, velo2cam) return np.concatenate([xyz, l, h, w, r], axis=1) def remove_outside_points(points, rect, Trv2c, P2, image_shape): # 5x faster than remove_outside_points_v1(2ms vs 10ms) C, R, T = projection_matrix_to_CRT_kitti(P2) image_bbox = [0, 0, image_shape[1], image_shape[0]] frustum = get_frustum(image_bbox, C) frustum -= T frustum = np.linalg.inv(R) @ frustum.T frustum = camera_to_lidar(frustum.T, rect, Trv2c) frustum_surfaces = corner_to_surfaces_3d_jit(frustum[np.newaxis, ...]) indices = points_in_convex_polygon_3d_jit(points[:, :3], frustum_surfaces) points = points[indices.reshape([-1])] return points @numba.jit(nopython=True) def iou_jit(boxes, query_boxes, eps=1.0): """calculate box iou. note that jit version runs 2x faster than cython in my machine! Parameters ---------- boxes: (N, 4) ndarray of float query_boxes: (K, 4) ndarray of float Returns ------- overlaps: (N, K) ndarray of overlap between boxes and query_boxes """ N = boxes.shape[0] K = query_boxes.shape[0] overlaps = np.zeros((N, K), dtype=boxes.dtype) for k in range(K): box_area = (query_boxes[k, 2] - query_boxes[k, 0] + eps) * ( query_boxes[k, 3] - query_boxes[k, 1] + eps ) for n in range(N): iw = ( min(boxes[n, 2], query_boxes[k, 2]) - max(boxes[n, 0], query_boxes[k, 0]) + eps ) if iw > 0: ih = ( min(boxes[n, 3], query_boxes[k, 3]) - max(boxes[n, 1], query_boxes[k, 1]) + eps ) if ih > 0: ua = ( (boxes[n, 2] - boxes[n, 0] + eps) * (boxes[n, 3] - boxes[n, 1] + eps) + box_area - iw * ih ) overlaps[n, k] = iw * ih / ua return overlaps @numba.jit(nopython=True) def iou_3d_jit(boxes, query_boxes, add1=True): """calculate box iou3d, ---------- boxes: (N, 6) ndarray of float query_boxes: (K, 6) ndarray of float Returns ------- overlaps: (N, K) ndarray of overlap between boxes and query_boxes """ N = boxes.shape[0] K = query_boxes.shape[0] overlaps = np.zeros((N, K), dtype=boxes.dtype) if add1: add1 = 1.0 else: add1 = 0.0 for k in range(K): box_area = ( (query_boxes[k, 3] - query_boxes[k, 0] + add1) * (query_boxes[k, 4] - query_boxes[k, 1] + add1) * (query_boxes[k, 5] - query_boxes[k, 2] + add1) ) for n in range(N): iw = ( min(boxes[n, 3], query_boxes[k, 3]) - max(boxes[n, 0], query_boxes[k, 0]) + add1 ) if iw > 0: ih = ( min(boxes[n, 4], query_boxes[k, 4]) - max(boxes[n, 1], query_boxes[k, 1]) + add1 ) if ih > 0: il = ( min(boxes[n, 5], query_boxes[k, 5]) - max(boxes[n, 2], query_boxes[k, 2]) + add1 ) if il > 0: ua = float( (boxes[n, 3] - boxes[n, 0] + add1) * (boxes[n, 4] - boxes[n, 1] + add1) * (boxes[n, 5] - boxes[n, 2] + add1) + box_area - iw * ih * il ) overlaps[n, k] = iw * ih * il / ua return overlaps @numba.jit(nopython=True) def iou_nd_jit(boxes, query_boxes, add1=True): """calculate box iou nd, 2x slower than iou_jit. ---------- boxes: (N, ndim * 2) ndarray of float query_boxes: (K, ndim * 2) ndarray of float Returns ------- overlaps: (N, K) ndarray of overlap between boxes and query_boxes """ N = boxes.shape[0] K = query_boxes.shape[0] ndim = boxes.shape[1] // 2 overlaps = np.zeros((N, K), dtype=boxes.dtype) side_lengths = np.zeros((ndim,), dtype=boxes.dtype) if add1: add1 = 1.0 else: add1 = 0.0 invalid = False for k in range(K): qbox_area = query_boxes[k, ndim] - query_boxes[k, 0] + add1 for i in range(1, ndim): qbox_area *= query_boxes[k, ndim + i] - query_boxes[k, i] + add1 for n in range(N): invalid = False for i in range(ndim): side_length = ( min(boxes[n, i + ndim], query_boxes[k, i + ndim]) - max(boxes[n, i], query_boxes[k, i]) + add1 ) if side_length <= 0: invalid = True break side_lengths[i] = side_length if not invalid: box_area = boxes[n, ndim] - boxes[n, 0] + add1 for i in range(1, ndim): box_area *= boxes[n, ndim + i] - boxes[n, i] + add1 inter = side_lengths[0] for i in range(1, ndim): inter *= side_lengths[i] # inter = np.prod(side_lengths) ua = float(box_area + qbox_area - inter) overlaps[n, k] = inter / ua return overlaps def points_in_rbbox(points, rbbox, z_axis=2, origin=(0.5, 0.5, 0.5)): rbbox_corners = center_to_corner_box3d( rbbox[:, :3], rbbox[:, 3:6], rbbox[:, -1], origin=origin, axis=z_axis ) surfaces = corner_to_surfaces_3d(rbbox_corners) indices = points_in_convex_polygon_3d_jit(points[:, :3], surfaces) return indices def corner_to_surfaces_3d(corners): """convert 3d box corners from corner function above to surfaces that normal vectors all direct to internal. Args: corners (float array, [N, 8, 3]): 3d box corners. Returns: surfaces (float array, [N, 6, 4, 3]): """ # box_corners: [N, 8, 3], must from corner functions in this module surfaces = np.array( [ [corners[:, 0], corners[:, 1], corners[:, 2], corners[:, 3]], [corners[:, 7], corners[:, 6], corners[:, 5], corners[:, 4]], [corners[:, 0], corners[:, 3], corners[:, 7], corners[:, 4]], [corners[:, 1], corners[:, 5], corners[:, 6], corners[:, 2]], [corners[:, 0], corners[:, 4], corners[:, 5], corners[:, 1]], [corners[:, 3], corners[:, 2], corners[:, 6], corners[:, 7]], ] ).transpose([2, 0, 1, 3]) return surfaces @numba.jit(nopython=True) def corner_to_surfaces_3d_jit(corners): """convert 3d box corners from corner function above to surfaces that normal vectors all direct to internal. Args: corners (float array, [N, 8, 3]): 3d box corners. Returns: surfaces (float array, [N, 6, 4, 3]): """ # box_corners: [N, 8, 3], must from corner functions in this module num_boxes = corners.shape[0] surfaces = np.zeros((num_boxes, 6, 4, 3), dtype=corners.dtype) corner_idxes = np.array( [0, 1, 2, 3, 7, 6, 5, 4, 0, 3, 7, 4, 1, 5, 6, 2, 0, 4, 5, 1, 3, 2, 6, 7] ).reshape(6, 4) for i in range(num_boxes): for j in range(6): for k in range(4): surfaces[i, j, k] = corners[i, corner_idxes[j, k]] return surfaces def assign_label_to_voxel(gt_boxes, coors, voxel_size, coors_range): """assign a 0/1 label to each voxel based on whether the center of voxel is in gt_box. LIDAR. """ voxel_size = np.array(voxel_size, dtype=gt_boxes.dtype) coors_range = np.array(coors_range, dtype=gt_boxes.dtype) shift = coors_range[:3] voxel_origins = coors[:, ::-1] * voxel_size + shift voxel_centers = voxel_origins + voxel_size * 0.5 gt_box_corners = center_to_corner_box3d( gt_boxes[:, :3] - voxel_size * 0.5, gt_boxes[:, 3:6] + voxel_size, gt_boxes[:, 6], origin=[0.5, 0.5, 0.5], axis=2, ) gt_surfaces = corner_to_surfaces_3d(gt_box_corners) ret = points_in_convex_polygon_3d_jit(voxel_centers, gt_surfaces) return np.any(ret, axis=1).astype(np.int64) def assign_label_to_voxel_v3(gt_boxes, coors, voxel_size, coors_range): """assign a 0/1 label to each voxel based on whether the center of voxel is in gt_box. LIDAR. """ voxel_size = np.array(voxel_size, dtype=gt_boxes.dtype) coors_range = np.array(coors_range, dtype=gt_boxes.dtype) shift = coors_range[:3] voxel_origins = coors[:, ::-1] * voxel_size + shift voxel_maxes = voxel_origins + voxel_size voxel_minmax = np.concatenate([voxel_origins, voxel_maxes], axis=-1) voxel_corners = minmax_to_corner_3d(voxel_minmax) gt_box_corners = center_to_corner_box3d( gt_boxes[:, :3], gt_boxes[:, 3:6], gt_boxes[:, 6], origin=[0.5, 0.5, 0.5], axis=2, ) gt_surfaces = corner_to_surfaces_3d(gt_box_corners) voxel_corners_flat = voxel_corners.reshape([-1, 3]) ret = points_in_convex_polygon_3d_jit(voxel_corners_flat, gt_surfaces) ret = ret.reshape([-1, 8, ret.shape[-1]]) return ret.any(-1).any(-1).astype(np.int64) def image_box_region_area(img_cumsum, bbox): """check a 2d voxel is contained by a box. used to filter empty anchors. Summed-area table algorithm: ==> W ------------------ | | | |------A---------B | | | | | | |----- C---------D Iabcd = ID-IB-IC+IA Args: img_cumsum: [M, H, W](yx) cumsumed image. bbox: [N, 4](xyxy) bounding box, """ N = bbox.shape[0] M = img_cumsum.shape[0] ret = np.zeros([N, M], dtype=img_cumsum.dtype) ID = img_cumsum[:, bbox[:, 3], bbox[:, 2]] IA = img_cumsum[:, bbox[:, 1], bbox[:, 0]] IB = img_cumsum[:, bbox[:, 3], bbox[:, 0]] IC = img_cumsum[:, bbox[:, 1], bbox[:, 2]] ret = ID - IB - IC + IA return ret def get_minimum_bounding_box_bv(points, voxel_size, bound, downsample=8, margin=1.6): x_vsize = voxel_size[0] y_vsize = voxel_size[1] max_x = points[:, 0].max() max_y = points[:, 1].max() min_x = points[:, 0].min() min_y = points[:, 1].min() max_x = np.floor(max_x / (x_vsize * downsample) + 1) * (x_vsize * downsample) max_y = np.floor(max_y / (y_vsize * downsample) + 1) * (y_vsize * downsample) min_x = np.floor(min_x / (x_vsize * downsample)) * (x_vsize * downsample) min_y = np.floor(min_y / (y_vsize * downsample)) * (y_vsize * downsample) max_x = np.minimum(max_x + margin, bound[2]) max_y = np.minimum(max_y + margin, bound[3]) min_x = np.maximum(min_x - margin, bound[0]) min_y = np.maximum(min_y - margin, bound[1]) return np.array([min_x, min_y, max_x, max_y]) def box3d_to_bbox(box3d, rect, Trv2c, P2): box3d_to_cam = box_lidar_to_camera(box3d, rect, Trv2c) box_corners = center_to_corner_box3d( box3d[:, :3], box3d[:, 3:6], box3d[:, 6], [0.5, 1.0, 0.5], axis=1 ) box_corners_in_image = project_to_image(box_corners, P2) # box_corners_in_image: [N, 8, 2] minxy = np.min(box_corners_in_image, axis=1) maxxy = np.max(box_corners_in_image, axis=1) bbox = np.concatenate([minxy, maxxy], axis=1) return bbox def change_box3d_center_(box3d, src, dst): dst = np.array(dst, dtype=box3d.dtype) src = np.array(src, dtype=box3d.dtype) box3d[..., :3] += box3d[..., 3:6] * (dst - src) def encode_parts(relative_shifts): parts = np.zeros((len(relative_shifts),), dtype=np.int32) mask = (relative_shifts[:, 0] >= 0) & (relative_shifts[:, 1] >= 0) parts[mask] = 0 mask = (relative_shifts[:, 0] < 0) & (relative_shifts[:, 1] >= 0) parts[mask] = 1 mask = (relative_shifts[:, 0] < 0) & (relative_shifts[:, 1] < 0) parts[mask] = 2 mask = (relative_shifts[:, 0] >= 0) & (relative_shifts[:, 1] < 0) parts[mask] = 3 return parts
def rotation_box(box_corners, angle): """rotation 2d points based on origin point clockwise when angle positive. Args: points (float array, shape=[N, point_size, 2]): points to be rotated. angle (float): rotation angle. Returns: float array: same shape as points """ rot_sin = np.sin(angle) rot_cos = np.cos(angle) rot_mat_T = np.array( [[rot_cos, -rot_sin], [rot_sin, rot_cos]], dtype=box_corners.dtype ) return box_corners @ rot_mat_T
223
238
from pathlib import Path import numba import numpy as np from det3d.core.bbox.geometry import ( points_count_convex_polygon_3d_jit, points_in_convex_polygon_3d_jit, ) try: from spconv.utils import rbbox_intersection, rbbox_iou except: print("Import spconv fail, no support for sparse convolution!") def points_count_rbbox(points, rbbox, z_axis=2, origin=(0.5, 0.5, 0.5)): rbbox_corners = center_to_corner_box3d( rbbox[:, :3], rbbox[:, 3:6], rbbox[:, -1], origin=origin, axis=z_axis ) surfaces = corner_to_surfaces_3d(rbbox_corners) return points_count_convex_polygon_3d_jit(points[:, :3], surfaces) def riou_cc(rbboxes, qrbboxes, standup_thresh=0.0): # less than 50ms when used in second one thread. 10x slower than gpu boxes_corners = center_to_corner_box2d( rbboxes[:, :2], rbboxes[:, 2:4], rbboxes[:, 4] ) boxes_standup = corner_to_standup_nd(boxes_corners) qboxes_corners = center_to_corner_box2d( qrbboxes[:, :2], qrbboxes[:, 2:4], qrbboxes[:, 4] ) qboxes_standup = corner_to_standup_nd(qboxes_corners) # if standup box not overlapped, rbbox not overlapped too. standup_iou = iou_jit(boxes_standup, qboxes_standup, eps=0.0) return rbbox_iou(boxes_corners, qboxes_corners, standup_iou, standup_thresh) def rinter_cc(rbboxes, qrbboxes, standup_thresh=0.0): # less than 50ms when used in second one thread. 10x slower than gpu boxes_corners = center_to_corner_box2d( rbboxes[:, :2], rbboxes[:, 2:4], rbboxes[:, 4] ) boxes_standup = corner_to_standup_nd(boxes_corners) qboxes_corners = center_to_corner_box2d( qrbboxes[:, :2], qrbboxes[:, 2:4], qrbboxes[:, 4] ) qboxes_standup = corner_to_standup_nd(qboxes_corners) # if standup box not overlapped, rbbox not overlapped too. standup_iou = iou_jit(boxes_standup, qboxes_standup, eps=0.0) return rbbox_intersection( boxes_corners, qboxes_corners, standup_iou, standup_thresh ) def corners_nd(dims, origin=0.5): """generate relative box corners based on length per dim and origin point. Args: dims (float array, shape=[N, ndim]): array of length per dim origin (list or array or float): origin point relate to smallest point. Returns: float array, shape=[N, 2 ** ndim, ndim]: returned corners. point layout example: (2d) x0y0, x0y1, x1y0, x1y1; (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1 where x0 < x1, y0 < y1, z0 < z1 """ ndim = int(dims.shape[1]) corners_norm = np.stack( np.unravel_index(np.arange(2 ** ndim), [2] * ndim), axis=1 ).astype(dims.dtype) # now corners_norm has format: (2d) x0y0, x0y1, x1y0, x1y1 # (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1 # so need to convert to a format which is convenient to do other computing. # for 2d boxes, format is clockwise start with minimum point # for 3d boxes, please draw lines by your hand. if ndim == 2: # generate clockwise box corners corners_norm = corners_norm[[0, 1, 3, 2]] elif ndim == 3: corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]] corners_norm = corners_norm - np.array(origin, dtype=dims.dtype) corners = dims.reshape([-1, 1, ndim]) * corners_norm.reshape([1, 2 ** ndim, ndim]) return corners @numba.njit def corners_2d_jit(dims, origin=0.5): ndim = 2 corners_norm = np.array([[0, 0], [0, 1], [1, 1], [1, 0]], dtype=dims.dtype) corners_norm = corners_norm - np.array(origin, dtype=dims.dtype) corners = dims.reshape((-1, 1, ndim)) * corners_norm.reshape((1, 2 ** ndim, ndim)) return corners @numba.njit def corners_3d_jit(dims, origin=0.5): ndim = 3 corners_norm = np.array( [0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1], dtype=dims.dtype, ).reshape((8, 3)) corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]] corners_norm = corners_norm - np.array(origin, dtype=dims.dtype) corners = dims.reshape((-1, 1, ndim)) * corners_norm.reshape((1, 2 ** ndim, ndim)) return corners @numba.njit def corner_to_standup_nd_jit(boxes_corner): num_boxes = boxes_corner.shape[0] ndim = boxes_corner.shape[-1] result = np.zeros((num_boxes, ndim * 2), dtype=boxes_corner.dtype) for i in range(num_boxes): for j in range(ndim): result[i, j] = np.min(boxes_corner[i, :, j]) for j in range(ndim): result[i, j + ndim] = np.max(boxes_corner[i, :, j]) return result def corner_to_standup_nd(boxes_corner): assert len(boxes_corner.shape) == 3 standup_boxes = [] standup_boxes.append(np.min(boxes_corner, axis=1)) standup_boxes.append(np.max(boxes_corner, axis=1)) return np.concatenate(standup_boxes, -1) def rbbox2d_to_near_bbox(rbboxes): """convert rotated bbox to nearest 'standing' or 'lying' bbox. Args: rbboxes: [N, 5(x, y, xdim, ydim, rad)] rotated bboxes Returns: bboxes: [N, 4(xmin, ymin, xmax, ymax)] bboxes """ rots = rbboxes[..., -1] rots_0_pi_div_2 = np.abs(limit_period(rots, 0.5, np.pi)) cond = (rots_0_pi_div_2 > np.pi / 4)[..., np.newaxis] bboxes_center = np.where(cond, rbboxes[:, [0, 1, 3, 2]], rbboxes[:, :4]) bboxes = center_to_minmax_2d(bboxes_center[:, :2], bboxes_center[:, 2:]) return bboxes def rotation_3d_in_axis(points, angles, axis=0): # points: [N, point_size, 3] rot_sin = np.sin(angles) rot_cos = np.cos(angles) ones = np.ones_like(rot_cos) zeros = np.zeros_like(rot_cos) if axis == 1: rot_mat_T = np.stack( [ [rot_cos, zeros, -rot_sin], [zeros, ones, zeros], [rot_sin, zeros, rot_cos], ] ) elif axis == 2 or axis == -1: rot_mat_T = np.stack( [ [rot_cos, -rot_sin, zeros], [rot_sin, rot_cos, zeros], [zeros, zeros, ones], ] ) elif axis == 0: rot_mat_T = np.stack( [ [zeros, rot_cos, -rot_sin], [zeros, rot_sin, rot_cos], [ones, zeros, zeros], ] ) else: raise ValueError("axis should in range") return np.einsum("aij,jka->aik", points, rot_mat_T) def rotation_points_single_angle(points, angle, axis=0): # points: [N, 3] rot_sin = np.sin(angle) rot_cos = np.cos(angle) if axis == 1: rot_mat_T = np.array( [[rot_cos, 0, -rot_sin], [0, 1, 0], [rot_sin, 0, rot_cos]], dtype=points.dtype, ) elif axis == 2 or axis == -1: rot_mat_T = np.array( [[rot_cos, -rot_sin, 0], [rot_sin, rot_cos, 0], [0, 0, 1]], dtype=points.dtype, ) elif axis == 0: rot_mat_T = np.array( [[1, 0, 0], [0, rot_cos, -rot_sin], [0, rot_sin, rot_cos]], dtype=points.dtype, ) else: raise ValueError("axis should in range") return points @ rot_mat_T def rotation_2d(points, angles): """rotation 2d points based on origin point clockwise when angle positive. Args: points (float array, shape=[N, point_size, 2]): points to be rotated. angles (float array, shape=[N]): rotation angle. Returns: float array: same shape as points """ rot_sin = np.sin(angles) rot_cos = np.cos(angles) rot_mat_T = np.stack([[rot_cos, -rot_sin], [rot_sin, rot_cos]]) return np.einsum("aij,jka->aik", points, rot_mat_T) def rotation_box(box_corners, angle): """rotation 2d points based on origin point clockwise when angle positive. Args: points (float array, shape=[N, point_size, 2]): points to be rotated. angle (float): rotation angle. Returns: float array: same shape as points """ rot_sin = np.sin(angle) rot_cos = np.cos(angle) rot_mat_T = np.array( [[rot_cos, -rot_sin], [rot_sin, rot_cos]], dtype=box_corners.dtype ) return box_corners @ rot_mat_T def center_to_corner_box3d(centers, dims, angles=None, origin=(0.5, 0.5, 0.5), axis=2): """convert kitti locations, dimensions and angles to corners Args: centers (float array, shape=[N, 3]): locations in kitti label file. dims (float array, shape=[N, 3]): dimensions in kitti label file. angles (float array, shape=[N]): rotation_y in kitti label file. origin (list or array or float): origin point relate to smallest point. use [0.5, 1.0, 0.5] in camera and [0.5, 0.5, 0] in lidar. axis (int): rotation axis. 1 for camera and 2 for lidar. Returns: [type]: [description] """ # 'length' in kitti format is in x axis. # yzx(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar) # center in kitti format is [0.5, 1.0, 0.5] in xyz. corners = corners_nd(dims, origin=origin) # corners: [N, 8, 3] if angles is not None: corners = rotation_3d_in_axis(corners, angles, axis=axis) corners += centers.reshape([-1, 1, 3]) return corners def center_to_corner_box2d(centers, dims, angles=None, origin=0.5): """convert kitti locations, dimensions and angles to corners. format: center(xy), dims(xy), angles(clockwise when positive) Args: centers (float array, shape=[N, 2]): locations in kitti label file. dims (float array, shape=[N, 2]): dimensions in kitti label file. angles (float array, shape=[N]): rotation_y in kitti label file. Returns: [type]: [description] """ # 'length' in kitti format is in x axis. # xyz(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar) # center in kitti format is [0.5, 1.0, 0.5] in xyz. corners = corners_nd(dims, origin=origin) # corners: [N, 4, 2] if angles is not None: corners = rotation_2d(corners, angles) corners += centers.reshape([-1, 1, 2]) return corners @numba.jit(nopython=True) def box2d_to_corner_jit(boxes): num_box = boxes.shape[0] corners_norm = np.zeros((4, 2), dtype=boxes.dtype) corners_norm[1, 1] = 1.0 corners_norm[2] = 1.0 corners_norm[3, 0] = 1.0 corners_norm -= np.array([0.5, 0.5], dtype=boxes.dtype) corners = boxes.reshape(num_box, 1, 5)[:, :, 2:4] * corners_norm.reshape(1, 4, 2) rot_mat_T = np.zeros((2, 2), dtype=boxes.dtype) box_corners = np.zeros((num_box, 4, 2), dtype=boxes.dtype) for i in range(num_box): rot_sin = np.sin(boxes[i, -1]) rot_cos = np.cos(boxes[i, -1]) rot_mat_T[0, 0] = rot_cos rot_mat_T[0, 1] = -rot_sin rot_mat_T[1, 0] = rot_sin rot_mat_T[1, 1] = rot_cos box_corners[i] = corners[i] @ rot_mat_T + boxes[i, :2] return box_corners def rbbox3d_to_corners(rbboxes, origin=[0.5, 0.5, 0.5], axis=2): return center_to_corner_box3d( rbboxes[..., :3], rbboxes[..., 3:6], rbboxes[..., 6], origin, axis=axis ) def rbbox3d_to_bev_corners(rbboxes, origin=0.5): return center_to_corner_box2d( rbboxes[..., :2], rbboxes[..., 3:5], rbboxes[..., 6], origin ) def minmax_to_corner_2d(minmax_box): ndim = minmax_box.shape[-1] // 2 center = minmax_box[..., :ndim] dims = minmax_box[..., ndim:] - center return center_to_corner_box2d(center, dims, origin=0.0) def minmax_to_corner_2d_v2(minmax_box): # N, 4 -> N 4 2 return minmax_box[..., [0, 1, 0, 3, 2, 3, 2, 1]].reshape(-1, 4, 2) def minmax_to_corner_3d(minmax_box): ndim = minmax_box.shape[-1] // 2 center = minmax_box[..., :ndim] dims = minmax_box[..., ndim:] - center return center_to_corner_box3d(center, dims, origin=0.0) def minmax_to_center_2d(minmax_box): ndim = minmax_box.shape[-1] // 2 center_min = minmax_box[..., :ndim] dims = minmax_box[..., ndim:] - center_min center = center_min + 0.5 * dims return np.concatenate([center, dims], axis=-1) def center_to_minmax_2d_0_5(centers, dims): return np.concatenate([centers - dims / 2, centers + dims / 2], axis=-1) def center_to_minmax_2d(centers, dims, origin=0.5): if origin == 0.5: return center_to_minmax_2d_0_5(centers, dims) corners = center_to_corner_box2d(centers, dims, origin=origin) return corners[:, [0, 2]].reshape([-1, 4]) def limit_period(val, offset=0.5, period=np.pi): return val - np.floor(val / period + offset) * period def projection_matrix_to_CRT_kitti(proj): # P = C @ [R|T] # C is upper triangular matrix, so we need to inverse CR and use QR # stable for all kitti camera projection matrix CR = proj[0:3, 0:3] CT = proj[0:3, 3] RinvCinv = np.linalg.inv(CR) Rinv, Cinv = np.linalg.qr(RinvCinv) C = np.linalg.inv(Cinv) R = np.linalg.inv(Rinv) T = Cinv @ CT return C, R, T def get_frustum(bbox_image, C, near_clip=0.001, far_clip=100): fku = C[0, 0] fkv = -C[1, 1] u0v0 = C[0:2, 2] z_points = np.array([near_clip] * 4 + [far_clip] * 4, dtype=C.dtype)[:, np.newaxis] b = bbox_image box_corners = np.array( [[b[0], b[1]], [b[0], b[3]], [b[2], b[3]], [b[2], b[1]]], dtype=C.dtype ) near_box_corners = (box_corners - u0v0) / np.array( [fku / near_clip, -fkv / near_clip], dtype=C.dtype ) far_box_corners = (box_corners - u0v0) / np.array( [fku / far_clip, -fkv / far_clip], dtype=C.dtype ) ret_xy = np.concatenate([near_box_corners, far_box_corners], axis=0) # [8, 2] ret_xyz = np.concatenate([ret_xy, z_points], axis=1) return ret_xyz def get_frustum_v2(bboxes, C, near_clip=0.001, far_clip=100): fku = C[0, 0] fkv = -C[1, 1] u0v0 = C[0:2, 2] num_box = bboxes.shape[0] z_points = np.array([near_clip] * 4 + [far_clip] * 4, dtype=C.dtype)[ np.newaxis, :, np.newaxis ] z_points = np.tile(z_points, [num_box, 1, 1]) box_corners = minmax_to_corner_2d_v2(bboxes) near_box_corners = (box_corners - u0v0) / np.array( [fku / near_clip, -fkv / near_clip], dtype=C.dtype ) far_box_corners = (box_corners - u0v0) / np.array( [fku / far_clip, -fkv / far_clip], dtype=C.dtype ) ret_xy = np.concatenate([near_box_corners, far_box_corners], axis=1) # [8, 2] ret_xyz = np.concatenate([ret_xy, z_points], axis=-1) return ret_xyz @numba.njit def _add_rgb_to_points_kernel(points_2d, image, points_rgb): num_points = points_2d.shape[0] image_h, image_w = image.shape[:2] for i in range(num_points): img_pos = np.floor(points_2d[i]).astype(np.int32) if img_pos[0] >= 0 and img_pos[0] < image_w: if img_pos[1] >= 0 and img_pos[1] < image_h: points_rgb[i, :] = image[img_pos[1], img_pos[0], :] # image[img_pos[1], img_pos[0]] = 0 def add_rgb_to_points(points, image, rect, Trv2c, P2, mean_size=[5, 5]): kernel = np.ones(mean_size, np.float32) / np.prod(mean_size) # image = cv2.filter2D(image, -1, kernel) points_cam = lidar_to_camera(points[:, :3], rect, Trv2c) points_2d = project_to_image(points_cam, P2) points_rgb = np.zeros([points_cam.shape[0], 3], dtype=points.dtype) _add_rgb_to_points_kernel(points_2d, image, points_rgb) return points_rgb def project_to_image(points_3d, proj_mat): points_shape = list(points_3d.shape) points_shape[-1] = 1 points_4 = np.concatenate([points_3d, np.ones(points_shape)], axis=-1) point_2d = points_4 @ proj_mat.T point_2d_res = point_2d[..., :2] / point_2d[..., 2:3] return point_2d_res def camera_to_lidar(points, r_rect, velo2cam): points_shape = list(points.shape[0:-1]) if points.shape[-1] == 3: points = np.concatenate([points, np.ones(points_shape + [1])], axis=-1) lidar_points = points @ np.linalg.inv((r_rect @ velo2cam).T) return lidar_points[..., :3] def lidar_to_camera(points, r_rect, velo2cam): points_shape = list(points.shape[:-1]) if points.shape[-1] == 3: points = np.concatenate([points, np.ones(points_shape + [1])], axis=-1) camera_points = points @ (r_rect @ velo2cam).T return camera_points[..., :3] def box_camera_to_lidar(data, r_rect, velo2cam): xyz = data[:, 0:3] l, h, w = data[:, 3:4], data[:, 4:5], data[:, 5:6] r = data[:, 6:7] xyz_lidar = camera_to_lidar(xyz, r_rect, velo2cam) return np.concatenate([xyz_lidar, w, l, h, r], axis=1) def box_lidar_to_camera(data, r_rect, velo2cam): xyz_lidar = data[:, 0:3] w, l, h = data[:, 3:4], data[:, 4:5], data[:, 5:6] r = data[:, 6:7] xyz = lidar_to_camera(xyz_lidar, r_rect, velo2cam) return np.concatenate([xyz, l, h, w, r], axis=1) def remove_outside_points(points, rect, Trv2c, P2, image_shape): # 5x faster than remove_outside_points_v1(2ms vs 10ms) C, R, T = projection_matrix_to_CRT_kitti(P2) image_bbox = [0, 0, image_shape[1], image_shape[0]] frustum = get_frustum(image_bbox, C) frustum -= T frustum = np.linalg.inv(R) @ frustum.T frustum = camera_to_lidar(frustum.T, rect, Trv2c) frustum_surfaces = corner_to_surfaces_3d_jit(frustum[np.newaxis, ...]) indices = points_in_convex_polygon_3d_jit(points[:, :3], frustum_surfaces) points = points[indices.reshape([-1])] return points @numba.jit(nopython=True) def iou_jit(boxes, query_boxes, eps=1.0): """calculate box iou. note that jit version runs 2x faster than cython in my machine! Parameters ---------- boxes: (N, 4) ndarray of float query_boxes: (K, 4) ndarray of float Returns ------- overlaps: (N, K) ndarray of overlap between boxes and query_boxes """ N = boxes.shape[0] K = query_boxes.shape[0] overlaps = np.zeros((N, K), dtype=boxes.dtype) for k in range(K): box_area = (query_boxes[k, 2] - query_boxes[k, 0] + eps) * ( query_boxes[k, 3] - query_boxes[k, 1] + eps ) for n in range(N): iw = ( min(boxes[n, 2], query_boxes[k, 2]) - max(boxes[n, 0], query_boxes[k, 0]) + eps ) if iw > 0: ih = ( min(boxes[n, 3], query_boxes[k, 3]) - max(boxes[n, 1], query_boxes[k, 1]) + eps ) if ih > 0: ua = ( (boxes[n, 2] - boxes[n, 0] + eps) * (boxes[n, 3] - boxes[n, 1] + eps) + box_area - iw * ih ) overlaps[n, k] = iw * ih / ua return overlaps @numba.jit(nopython=True) def iou_3d_jit(boxes, query_boxes, add1=True): """calculate box iou3d, ---------- boxes: (N, 6) ndarray of float query_boxes: (K, 6) ndarray of float Returns ------- overlaps: (N, K) ndarray of overlap between boxes and query_boxes """ N = boxes.shape[0] K = query_boxes.shape[0] overlaps = np.zeros((N, K), dtype=boxes.dtype) if add1: add1 = 1.0 else: add1 = 0.0 for k in range(K): box_area = ( (query_boxes[k, 3] - query_boxes[k, 0] + add1) * (query_boxes[k, 4] - query_boxes[k, 1] + add1) * (query_boxes[k, 5] - query_boxes[k, 2] + add1) ) for n in range(N): iw = ( min(boxes[n, 3], query_boxes[k, 3]) - max(boxes[n, 0], query_boxes[k, 0]) + add1 ) if iw > 0: ih = ( min(boxes[n, 4], query_boxes[k, 4]) - max(boxes[n, 1], query_boxes[k, 1]) + add1 ) if ih > 0: il = ( min(boxes[n, 5], query_boxes[k, 5]) - max(boxes[n, 2], query_boxes[k, 2]) + add1 ) if il > 0: ua = float( (boxes[n, 3] - boxes[n, 0] + add1) * (boxes[n, 4] - boxes[n, 1] + add1) * (boxes[n, 5] - boxes[n, 2] + add1) + box_area - iw * ih * il ) overlaps[n, k] = iw * ih * il / ua return overlaps @numba.jit(nopython=True) def iou_nd_jit(boxes, query_boxes, add1=True): """calculate box iou nd, 2x slower than iou_jit. ---------- boxes: (N, ndim * 2) ndarray of float query_boxes: (K, ndim * 2) ndarray of float Returns ------- overlaps: (N, K) ndarray of overlap between boxes and query_boxes """ N = boxes.shape[0] K = query_boxes.shape[0] ndim = boxes.shape[1] // 2 overlaps = np.zeros((N, K), dtype=boxes.dtype) side_lengths = np.zeros((ndim,), dtype=boxes.dtype) if add1: add1 = 1.0 else: add1 = 0.0 invalid = False for k in range(K): qbox_area = query_boxes[k, ndim] - query_boxes[k, 0] + add1 for i in range(1, ndim): qbox_area *= query_boxes[k, ndim + i] - query_boxes[k, i] + add1 for n in range(N): invalid = False for i in range(ndim): side_length = ( min(boxes[n, i + ndim], query_boxes[k, i + ndim]) - max(boxes[n, i], query_boxes[k, i]) + add1 ) if side_length <= 0: invalid = True break side_lengths[i] = side_length if not invalid: box_area = boxes[n, ndim] - boxes[n, 0] + add1 for i in range(1, ndim): box_area *= boxes[n, ndim + i] - boxes[n, i] + add1 inter = side_lengths[0] for i in range(1, ndim): inter *= side_lengths[i] # inter = np.prod(side_lengths) ua = float(box_area + qbox_area - inter) overlaps[n, k] = inter / ua return overlaps def points_in_rbbox(points, rbbox, z_axis=2, origin=(0.5, 0.5, 0.5)): rbbox_corners = center_to_corner_box3d( rbbox[:, :3], rbbox[:, 3:6], rbbox[:, -1], origin=origin, axis=z_axis ) surfaces = corner_to_surfaces_3d(rbbox_corners) indices = points_in_convex_polygon_3d_jit(points[:, :3], surfaces) return indices def corner_to_surfaces_3d(corners): """convert 3d box corners from corner function above to surfaces that normal vectors all direct to internal. Args: corners (float array, [N, 8, 3]): 3d box corners. Returns: surfaces (float array, [N, 6, 4, 3]): """ # box_corners: [N, 8, 3], must from corner functions in this module surfaces = np.array( [ [corners[:, 0], corners[:, 1], corners[:, 2], corners[:, 3]], [corners[:, 7], corners[:, 6], corners[:, 5], corners[:, 4]], [corners[:, 0], corners[:, 3], corners[:, 7], corners[:, 4]], [corners[:, 1], corners[:, 5], corners[:, 6], corners[:, 2]], [corners[:, 0], corners[:, 4], corners[:, 5], corners[:, 1]], [corners[:, 3], corners[:, 2], corners[:, 6], corners[:, 7]], ] ).transpose([2, 0, 1, 3]) return surfaces @numba.jit(nopython=True) def corner_to_surfaces_3d_jit(corners): """convert 3d box corners from corner function above to surfaces that normal vectors all direct to internal. Args: corners (float array, [N, 8, 3]): 3d box corners. Returns: surfaces (float array, [N, 6, 4, 3]): """ # box_corners: [N, 8, 3], must from corner functions in this module num_boxes = corners.shape[0] surfaces = np.zeros((num_boxes, 6, 4, 3), dtype=corners.dtype) corner_idxes = np.array( [0, 1, 2, 3, 7, 6, 5, 4, 0, 3, 7, 4, 1, 5, 6, 2, 0, 4, 5, 1, 3, 2, 6, 7] ).reshape(6, 4) for i in range(num_boxes): for j in range(6): for k in range(4): surfaces[i, j, k] = corners[i, corner_idxes[j, k]] return surfaces def assign_label_to_voxel(gt_boxes, coors, voxel_size, coors_range): """assign a 0/1 label to each voxel based on whether the center of voxel is in gt_box. LIDAR. """ voxel_size = np.array(voxel_size, dtype=gt_boxes.dtype) coors_range = np.array(coors_range, dtype=gt_boxes.dtype) shift = coors_range[:3] voxel_origins = coors[:, ::-1] * voxel_size + shift voxel_centers = voxel_origins + voxel_size * 0.5 gt_box_corners = center_to_corner_box3d( gt_boxes[:, :3] - voxel_size * 0.5, gt_boxes[:, 3:6] + voxel_size, gt_boxes[:, 6], origin=[0.5, 0.5, 0.5], axis=2, ) gt_surfaces = corner_to_surfaces_3d(gt_box_corners) ret = points_in_convex_polygon_3d_jit(voxel_centers, gt_surfaces) return np.any(ret, axis=1).astype(np.int64) def assign_label_to_voxel_v3(gt_boxes, coors, voxel_size, coors_range): """assign a 0/1 label to each voxel based on whether the center of voxel is in gt_box. LIDAR. """ voxel_size = np.array(voxel_size, dtype=gt_boxes.dtype) coors_range = np.array(coors_range, dtype=gt_boxes.dtype) shift = coors_range[:3] voxel_origins = coors[:, ::-1] * voxel_size + shift voxel_maxes = voxel_origins + voxel_size voxel_minmax = np.concatenate([voxel_origins, voxel_maxes], axis=-1) voxel_corners = minmax_to_corner_3d(voxel_minmax) gt_box_corners = center_to_corner_box3d( gt_boxes[:, :3], gt_boxes[:, 3:6], gt_boxes[:, 6], origin=[0.5, 0.5, 0.5], axis=2, ) gt_surfaces = corner_to_surfaces_3d(gt_box_corners) voxel_corners_flat = voxel_corners.reshape([-1, 3]) ret = points_in_convex_polygon_3d_jit(voxel_corners_flat, gt_surfaces) ret = ret.reshape([-1, 8, ret.shape[-1]]) return ret.any(-1).any(-1).astype(np.int64) def image_box_region_area(img_cumsum, bbox): """check a 2d voxel is contained by a box. used to filter empty anchors. Summed-area table algorithm: ==> W ------------------ | | | |------A---------B | | | | | | |----- C---------D Iabcd = ID-IB-IC+IA Args: img_cumsum: [M, H, W](yx) cumsumed image. bbox: [N, 4](xyxy) bounding box, """ N = bbox.shape[0] M = img_cumsum.shape[0] ret = np.zeros([N, M], dtype=img_cumsum.dtype) ID = img_cumsum[:, bbox[:, 3], bbox[:, 2]] IA = img_cumsum[:, bbox[:, 1], bbox[:, 0]] IB = img_cumsum[:, bbox[:, 3], bbox[:, 0]] IC = img_cumsum[:, bbox[:, 1], bbox[:, 2]] ret = ID - IB - IC + IA return ret def get_minimum_bounding_box_bv(points, voxel_size, bound, downsample=8, margin=1.6): x_vsize = voxel_size[0] y_vsize = voxel_size[1] max_x = points[:, 0].max() max_y = points[:, 1].max() min_x = points[:, 0].min() min_y = points[:, 1].min() max_x = np.floor(max_x / (x_vsize * downsample) + 1) * (x_vsize * downsample) max_y = np.floor(max_y / (y_vsize * downsample) + 1) * (y_vsize * downsample) min_x = np.floor(min_x / (x_vsize * downsample)) * (x_vsize * downsample) min_y = np.floor(min_y / (y_vsize * downsample)) * (y_vsize * downsample) max_x = np.minimum(max_x + margin, bound[2]) max_y = np.minimum(max_y + margin, bound[3]) min_x = np.maximum(min_x - margin, bound[0]) min_y = np.maximum(min_y - margin, bound[1]) return np.array([min_x, min_y, max_x, max_y]) def box3d_to_bbox(box3d, rect, Trv2c, P2): box3d_to_cam = box_lidar_to_camera(box3d, rect, Trv2c) box_corners = center_to_corner_box3d( box3d[:, :3], box3d[:, 3:6], box3d[:, 6], [0.5, 1.0, 0.5], axis=1 ) box_corners_in_image = project_to_image(box_corners, P2) # box_corners_in_image: [N, 8, 2] minxy = np.min(box_corners_in_image, axis=1) maxxy = np.max(box_corners_in_image, axis=1) bbox = np.concatenate([minxy, maxxy], axis=1) return bbox def change_box3d_center_(box3d, src, dst): dst = np.array(dst, dtype=box3d.dtype) src = np.array(src, dtype=box3d.dtype) box3d[..., :3] += box3d[..., 3:6] * (dst - src) def encode_parts(relative_shifts): parts = np.zeros((len(relative_shifts),), dtype=np.int32) mask = (relative_shifts[:, 0] >= 0) & (relative_shifts[:, 1] >= 0) parts[mask] = 0 mask = (relative_shifts[:, 0] < 0) & (relative_shifts[:, 1] >= 0) parts[mask] = 1 mask = (relative_shifts[:, 0] < 0) & (relative_shifts[:, 1] < 0) parts[mask] = 2 mask = (relative_shifts[:, 0] >= 0) & (relative_shifts[:, 1] < 0) parts[mask] = 3 return parts
center_to_corner_box3d
convert kitti locations, dimensions and angles to corners Args: centers (float array, shape=[N, 3]): locations in kitti label file. dims (float array, shape=[N, 3]): dimensions in kitti label file. angles (float array, shape=[N]): rotation_y in kitti label file. origin (list or array or float): origin point relate to smallest point. use [0.5, 1.0, 0.5] in camera and [0.5, 0.5, 0] in lidar. axis (int): rotation axis. 1 for camera and 2 for lidar. Returns: [type]: [description]
from pathlib import Path import numba import numpy as np from det3d.core.bbox.geometry import ( points_count_convex_polygon_3d_jit, points_in_convex_polygon_3d_jit, ) try: from spconv.utils import rbbox_intersection, rbbox_iou except: print("Import spconv fail, no support for sparse convolution!") def points_count_rbbox(points, rbbox, z_axis=2, origin=(0.5, 0.5, 0.5)): rbbox_corners = center_to_corner_box3d( rbbox[:, :3], rbbox[:, 3:6], rbbox[:, -1], origin=origin, axis=z_axis ) surfaces = corner_to_surfaces_3d(rbbox_corners) return points_count_convex_polygon_3d_jit(points[:, :3], surfaces) def riou_cc(rbboxes, qrbboxes, standup_thresh=0.0): # less than 50ms when used in second one thread. 10x slower than gpu boxes_corners = center_to_corner_box2d( rbboxes[:, :2], rbboxes[:, 2:4], rbboxes[:, 4] ) boxes_standup = corner_to_standup_nd(boxes_corners) qboxes_corners = center_to_corner_box2d( qrbboxes[:, :2], qrbboxes[:, 2:4], qrbboxes[:, 4] ) qboxes_standup = corner_to_standup_nd(qboxes_corners) # if standup box not overlapped, rbbox not overlapped too. standup_iou = iou_jit(boxes_standup, qboxes_standup, eps=0.0) return rbbox_iou(boxes_corners, qboxes_corners, standup_iou, standup_thresh) def rinter_cc(rbboxes, qrbboxes, standup_thresh=0.0): # less than 50ms when used in second one thread. 10x slower than gpu boxes_corners = center_to_corner_box2d( rbboxes[:, :2], rbboxes[:, 2:4], rbboxes[:, 4] ) boxes_standup = corner_to_standup_nd(boxes_corners) qboxes_corners = center_to_corner_box2d( qrbboxes[:, :2], qrbboxes[:, 2:4], qrbboxes[:, 4] ) qboxes_standup = corner_to_standup_nd(qboxes_corners) # if standup box not overlapped, rbbox not overlapped too. standup_iou = iou_jit(boxes_standup, qboxes_standup, eps=0.0) return rbbox_intersection( boxes_corners, qboxes_corners, standup_iou, standup_thresh ) def corners_nd(dims, origin=0.5): """generate relative box corners based on length per dim and origin point. Args: dims (float array, shape=[N, ndim]): array of length per dim origin (list or array or float): origin point relate to smallest point. Returns: float array, shape=[N, 2 ** ndim, ndim]: returned corners. point layout example: (2d) x0y0, x0y1, x1y0, x1y1; (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1 where x0 < x1, y0 < y1, z0 < z1 """ ndim = int(dims.shape[1]) corners_norm = np.stack( np.unravel_index(np.arange(2 ** ndim), [2] * ndim), axis=1 ).astype(dims.dtype) # now corners_norm has format: (2d) x0y0, x0y1, x1y0, x1y1 # (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1 # so need to convert to a format which is convenient to do other computing. # for 2d boxes, format is clockwise start with minimum point # for 3d boxes, please draw lines by your hand. if ndim == 2: # generate clockwise box corners corners_norm = corners_norm[[0, 1, 3, 2]] elif ndim == 3: corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]] corners_norm = corners_norm - np.array(origin, dtype=dims.dtype) corners = dims.reshape([-1, 1, ndim]) * corners_norm.reshape([1, 2 ** ndim, ndim]) return corners @numba.njit def corners_2d_jit(dims, origin=0.5): ndim = 2 corners_norm = np.array([[0, 0], [0, 1], [1, 1], [1, 0]], dtype=dims.dtype) corners_norm = corners_norm - np.array(origin, dtype=dims.dtype) corners = dims.reshape((-1, 1, ndim)) * corners_norm.reshape((1, 2 ** ndim, ndim)) return corners @numba.njit def corners_3d_jit(dims, origin=0.5): ndim = 3 corners_norm = np.array( [0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1], dtype=dims.dtype, ).reshape((8, 3)) corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]] corners_norm = corners_norm - np.array(origin, dtype=dims.dtype) corners = dims.reshape((-1, 1, ndim)) * corners_norm.reshape((1, 2 ** ndim, ndim)) return corners @numba.njit def corner_to_standup_nd_jit(boxes_corner): num_boxes = boxes_corner.shape[0] ndim = boxes_corner.shape[-1] result = np.zeros((num_boxes, ndim * 2), dtype=boxes_corner.dtype) for i in range(num_boxes): for j in range(ndim): result[i, j] = np.min(boxes_corner[i, :, j]) for j in range(ndim): result[i, j + ndim] = np.max(boxes_corner[i, :, j]) return result def corner_to_standup_nd(boxes_corner): assert len(boxes_corner.shape) == 3 standup_boxes = [] standup_boxes.append(np.min(boxes_corner, axis=1)) standup_boxes.append(np.max(boxes_corner, axis=1)) return np.concatenate(standup_boxes, -1) def rbbox2d_to_near_bbox(rbboxes): """convert rotated bbox to nearest 'standing' or 'lying' bbox. Args: rbboxes: [N, 5(x, y, xdim, ydim, rad)] rotated bboxes Returns: bboxes: [N, 4(xmin, ymin, xmax, ymax)] bboxes """ rots = rbboxes[..., -1] rots_0_pi_div_2 = np.abs(limit_period(rots, 0.5, np.pi)) cond = (rots_0_pi_div_2 > np.pi / 4)[..., np.newaxis] bboxes_center = np.where(cond, rbboxes[:, [0, 1, 3, 2]], rbboxes[:, :4]) bboxes = center_to_minmax_2d(bboxes_center[:, :2], bboxes_center[:, 2:]) return bboxes def rotation_3d_in_axis(points, angles, axis=0): # points: [N, point_size, 3] rot_sin = np.sin(angles) rot_cos = np.cos(angles) ones = np.ones_like(rot_cos) zeros = np.zeros_like(rot_cos) if axis == 1: rot_mat_T = np.stack( [ [rot_cos, zeros, -rot_sin], [zeros, ones, zeros], [rot_sin, zeros, rot_cos], ] ) elif axis == 2 or axis == -1: rot_mat_T = np.stack( [ [rot_cos, -rot_sin, zeros], [rot_sin, rot_cos, zeros], [zeros, zeros, ones], ] ) elif axis == 0: rot_mat_T = np.stack( [ [zeros, rot_cos, -rot_sin], [zeros, rot_sin, rot_cos], [ones, zeros, zeros], ] ) else: raise ValueError("axis should in range") return np.einsum("aij,jka->aik", points, rot_mat_T) def rotation_points_single_angle(points, angle, axis=0): # points: [N, 3] rot_sin = np.sin(angle) rot_cos = np.cos(angle) if axis == 1: rot_mat_T = np.array( [[rot_cos, 0, -rot_sin], [0, 1, 0], [rot_sin, 0, rot_cos]], dtype=points.dtype, ) elif axis == 2 or axis == -1: rot_mat_T = np.array( [[rot_cos, -rot_sin, 0], [rot_sin, rot_cos, 0], [0, 0, 1]], dtype=points.dtype, ) elif axis == 0: rot_mat_T = np.array( [[1, 0, 0], [0, rot_cos, -rot_sin], [0, rot_sin, rot_cos]], dtype=points.dtype, ) else: raise ValueError("axis should in range") return points @ rot_mat_T def rotation_2d(points, angles): """rotation 2d points based on origin point clockwise when angle positive. Args: points (float array, shape=[N, point_size, 2]): points to be rotated. angles (float array, shape=[N]): rotation angle. Returns: float array: same shape as points """ rot_sin = np.sin(angles) rot_cos = np.cos(angles) rot_mat_T = np.stack([[rot_cos, -rot_sin], [rot_sin, rot_cos]]) return np.einsum("aij,jka->aik", points, rot_mat_T) def rotation_box(box_corners, angle): """rotation 2d points based on origin point clockwise when angle positive. Args: points (float array, shape=[N, point_size, 2]): points to be rotated. angle (float): rotation angle. Returns: float array: same shape as points """ rot_sin = np.sin(angle) rot_cos = np.cos(angle) rot_mat_T = np.array( [[rot_cos, -rot_sin], [rot_sin, rot_cos]], dtype=box_corners.dtype ) return box_corners @ rot_mat_T # MASKED: center_to_corner_box3d function (lines 241-262) def center_to_corner_box2d(centers, dims, angles=None, origin=0.5): """convert kitti locations, dimensions and angles to corners. format: center(xy), dims(xy), angles(clockwise when positive) Args: centers (float array, shape=[N, 2]): locations in kitti label file. dims (float array, shape=[N, 2]): dimensions in kitti label file. angles (float array, shape=[N]): rotation_y in kitti label file. Returns: [type]: [description] """ # 'length' in kitti format is in x axis. # xyz(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar) # center in kitti format is [0.5, 1.0, 0.5] in xyz. corners = corners_nd(dims, origin=origin) # corners: [N, 4, 2] if angles is not None: corners = rotation_2d(corners, angles) corners += centers.reshape([-1, 1, 2]) return corners @numba.jit(nopython=True) def box2d_to_corner_jit(boxes): num_box = boxes.shape[0] corners_norm = np.zeros((4, 2), dtype=boxes.dtype) corners_norm[1, 1] = 1.0 corners_norm[2] = 1.0 corners_norm[3, 0] = 1.0 corners_norm -= np.array([0.5, 0.5], dtype=boxes.dtype) corners = boxes.reshape(num_box, 1, 5)[:, :, 2:4] * corners_norm.reshape(1, 4, 2) rot_mat_T = np.zeros((2, 2), dtype=boxes.dtype) box_corners = np.zeros((num_box, 4, 2), dtype=boxes.dtype) for i in range(num_box): rot_sin = np.sin(boxes[i, -1]) rot_cos = np.cos(boxes[i, -1]) rot_mat_T[0, 0] = rot_cos rot_mat_T[0, 1] = -rot_sin rot_mat_T[1, 0] = rot_sin rot_mat_T[1, 1] = rot_cos box_corners[i] = corners[i] @ rot_mat_T + boxes[i, :2] return box_corners def rbbox3d_to_corners(rbboxes, origin=[0.5, 0.5, 0.5], axis=2): return center_to_corner_box3d( rbboxes[..., :3], rbboxes[..., 3:6], rbboxes[..., 6], origin, axis=axis ) def rbbox3d_to_bev_corners(rbboxes, origin=0.5): return center_to_corner_box2d( rbboxes[..., :2], rbboxes[..., 3:5], rbboxes[..., 6], origin ) def minmax_to_corner_2d(minmax_box): ndim = minmax_box.shape[-1] // 2 center = minmax_box[..., :ndim] dims = minmax_box[..., ndim:] - center return center_to_corner_box2d(center, dims, origin=0.0) def minmax_to_corner_2d_v2(minmax_box): # N, 4 -> N 4 2 return minmax_box[..., [0, 1, 0, 3, 2, 3, 2, 1]].reshape(-1, 4, 2) def minmax_to_corner_3d(minmax_box): ndim = minmax_box.shape[-1] // 2 center = minmax_box[..., :ndim] dims = minmax_box[..., ndim:] - center return center_to_corner_box3d(center, dims, origin=0.0) def minmax_to_center_2d(minmax_box): ndim = minmax_box.shape[-1] // 2 center_min = minmax_box[..., :ndim] dims = minmax_box[..., ndim:] - center_min center = center_min + 0.5 * dims return np.concatenate([center, dims], axis=-1) def center_to_minmax_2d_0_5(centers, dims): return np.concatenate([centers - dims / 2, centers + dims / 2], axis=-1) def center_to_minmax_2d(centers, dims, origin=0.5): if origin == 0.5: return center_to_minmax_2d_0_5(centers, dims) corners = center_to_corner_box2d(centers, dims, origin=origin) return corners[:, [0, 2]].reshape([-1, 4]) def limit_period(val, offset=0.5, period=np.pi): return val - np.floor(val / period + offset) * period def projection_matrix_to_CRT_kitti(proj): # P = C @ [R|T] # C is upper triangular matrix, so we need to inverse CR and use QR # stable for all kitti camera projection matrix CR = proj[0:3, 0:3] CT = proj[0:3, 3] RinvCinv = np.linalg.inv(CR) Rinv, Cinv = np.linalg.qr(RinvCinv) C = np.linalg.inv(Cinv) R = np.linalg.inv(Rinv) T = Cinv @ CT return C, R, T def get_frustum(bbox_image, C, near_clip=0.001, far_clip=100): fku = C[0, 0] fkv = -C[1, 1] u0v0 = C[0:2, 2] z_points = np.array([near_clip] * 4 + [far_clip] * 4, dtype=C.dtype)[:, np.newaxis] b = bbox_image box_corners = np.array( [[b[0], b[1]], [b[0], b[3]], [b[2], b[3]], [b[2], b[1]]], dtype=C.dtype ) near_box_corners = (box_corners - u0v0) / np.array( [fku / near_clip, -fkv / near_clip], dtype=C.dtype ) far_box_corners = (box_corners - u0v0) / np.array( [fku / far_clip, -fkv / far_clip], dtype=C.dtype ) ret_xy = np.concatenate([near_box_corners, far_box_corners], axis=0) # [8, 2] ret_xyz = np.concatenate([ret_xy, z_points], axis=1) return ret_xyz def get_frustum_v2(bboxes, C, near_clip=0.001, far_clip=100): fku = C[0, 0] fkv = -C[1, 1] u0v0 = C[0:2, 2] num_box = bboxes.shape[0] z_points = np.array([near_clip] * 4 + [far_clip] * 4, dtype=C.dtype)[ np.newaxis, :, np.newaxis ] z_points = np.tile(z_points, [num_box, 1, 1]) box_corners = minmax_to_corner_2d_v2(bboxes) near_box_corners = (box_corners - u0v0) / np.array( [fku / near_clip, -fkv / near_clip], dtype=C.dtype ) far_box_corners = (box_corners - u0v0) / np.array( [fku / far_clip, -fkv / far_clip], dtype=C.dtype ) ret_xy = np.concatenate([near_box_corners, far_box_corners], axis=1) # [8, 2] ret_xyz = np.concatenate([ret_xy, z_points], axis=-1) return ret_xyz @numba.njit def _add_rgb_to_points_kernel(points_2d, image, points_rgb): num_points = points_2d.shape[0] image_h, image_w = image.shape[:2] for i in range(num_points): img_pos = np.floor(points_2d[i]).astype(np.int32) if img_pos[0] >= 0 and img_pos[0] < image_w: if img_pos[1] >= 0 and img_pos[1] < image_h: points_rgb[i, :] = image[img_pos[1], img_pos[0], :] # image[img_pos[1], img_pos[0]] = 0 def add_rgb_to_points(points, image, rect, Trv2c, P2, mean_size=[5, 5]): kernel = np.ones(mean_size, np.float32) / np.prod(mean_size) # image = cv2.filter2D(image, -1, kernel) points_cam = lidar_to_camera(points[:, :3], rect, Trv2c) points_2d = project_to_image(points_cam, P2) points_rgb = np.zeros([points_cam.shape[0], 3], dtype=points.dtype) _add_rgb_to_points_kernel(points_2d, image, points_rgb) return points_rgb def project_to_image(points_3d, proj_mat): points_shape = list(points_3d.shape) points_shape[-1] = 1 points_4 = np.concatenate([points_3d, np.ones(points_shape)], axis=-1) point_2d = points_4 @ proj_mat.T point_2d_res = point_2d[..., :2] / point_2d[..., 2:3] return point_2d_res def camera_to_lidar(points, r_rect, velo2cam): points_shape = list(points.shape[0:-1]) if points.shape[-1] == 3: points = np.concatenate([points, np.ones(points_shape + [1])], axis=-1) lidar_points = points @ np.linalg.inv((r_rect @ velo2cam).T) return lidar_points[..., :3] def lidar_to_camera(points, r_rect, velo2cam): points_shape = list(points.shape[:-1]) if points.shape[-1] == 3: points = np.concatenate([points, np.ones(points_shape + [1])], axis=-1) camera_points = points @ (r_rect @ velo2cam).T return camera_points[..., :3] def box_camera_to_lidar(data, r_rect, velo2cam): xyz = data[:, 0:3] l, h, w = data[:, 3:4], data[:, 4:5], data[:, 5:6] r = data[:, 6:7] xyz_lidar = camera_to_lidar(xyz, r_rect, velo2cam) return np.concatenate([xyz_lidar, w, l, h, r], axis=1) def box_lidar_to_camera(data, r_rect, velo2cam): xyz_lidar = data[:, 0:3] w, l, h = data[:, 3:4], data[:, 4:5], data[:, 5:6] r = data[:, 6:7] xyz = lidar_to_camera(xyz_lidar, r_rect, velo2cam) return np.concatenate([xyz, l, h, w, r], axis=1) def remove_outside_points(points, rect, Trv2c, P2, image_shape): # 5x faster than remove_outside_points_v1(2ms vs 10ms) C, R, T = projection_matrix_to_CRT_kitti(P2) image_bbox = [0, 0, image_shape[1], image_shape[0]] frustum = get_frustum(image_bbox, C) frustum -= T frustum = np.linalg.inv(R) @ frustum.T frustum = camera_to_lidar(frustum.T, rect, Trv2c) frustum_surfaces = corner_to_surfaces_3d_jit(frustum[np.newaxis, ...]) indices = points_in_convex_polygon_3d_jit(points[:, :3], frustum_surfaces) points = points[indices.reshape([-1])] return points @numba.jit(nopython=True) def iou_jit(boxes, query_boxes, eps=1.0): """calculate box iou. note that jit version runs 2x faster than cython in my machine! Parameters ---------- boxes: (N, 4) ndarray of float query_boxes: (K, 4) ndarray of float Returns ------- overlaps: (N, K) ndarray of overlap between boxes and query_boxes """ N = boxes.shape[0] K = query_boxes.shape[0] overlaps = np.zeros((N, K), dtype=boxes.dtype) for k in range(K): box_area = (query_boxes[k, 2] - query_boxes[k, 0] + eps) * ( query_boxes[k, 3] - query_boxes[k, 1] + eps ) for n in range(N): iw = ( min(boxes[n, 2], query_boxes[k, 2]) - max(boxes[n, 0], query_boxes[k, 0]) + eps ) if iw > 0: ih = ( min(boxes[n, 3], query_boxes[k, 3]) - max(boxes[n, 1], query_boxes[k, 1]) + eps ) if ih > 0: ua = ( (boxes[n, 2] - boxes[n, 0] + eps) * (boxes[n, 3] - boxes[n, 1] + eps) + box_area - iw * ih ) overlaps[n, k] = iw * ih / ua return overlaps @numba.jit(nopython=True) def iou_3d_jit(boxes, query_boxes, add1=True): """calculate box iou3d, ---------- boxes: (N, 6) ndarray of float query_boxes: (K, 6) ndarray of float Returns ------- overlaps: (N, K) ndarray of overlap between boxes and query_boxes """ N = boxes.shape[0] K = query_boxes.shape[0] overlaps = np.zeros((N, K), dtype=boxes.dtype) if add1: add1 = 1.0 else: add1 = 0.0 for k in range(K): box_area = ( (query_boxes[k, 3] - query_boxes[k, 0] + add1) * (query_boxes[k, 4] - query_boxes[k, 1] + add1) * (query_boxes[k, 5] - query_boxes[k, 2] + add1) ) for n in range(N): iw = ( min(boxes[n, 3], query_boxes[k, 3]) - max(boxes[n, 0], query_boxes[k, 0]) + add1 ) if iw > 0: ih = ( min(boxes[n, 4], query_boxes[k, 4]) - max(boxes[n, 1], query_boxes[k, 1]) + add1 ) if ih > 0: il = ( min(boxes[n, 5], query_boxes[k, 5]) - max(boxes[n, 2], query_boxes[k, 2]) + add1 ) if il > 0: ua = float( (boxes[n, 3] - boxes[n, 0] + add1) * (boxes[n, 4] - boxes[n, 1] + add1) * (boxes[n, 5] - boxes[n, 2] + add1) + box_area - iw * ih * il ) overlaps[n, k] = iw * ih * il / ua return overlaps @numba.jit(nopython=True) def iou_nd_jit(boxes, query_boxes, add1=True): """calculate box iou nd, 2x slower than iou_jit. ---------- boxes: (N, ndim * 2) ndarray of float query_boxes: (K, ndim * 2) ndarray of float Returns ------- overlaps: (N, K) ndarray of overlap between boxes and query_boxes """ N = boxes.shape[0] K = query_boxes.shape[0] ndim = boxes.shape[1] // 2 overlaps = np.zeros((N, K), dtype=boxes.dtype) side_lengths = np.zeros((ndim,), dtype=boxes.dtype) if add1: add1 = 1.0 else: add1 = 0.0 invalid = False for k in range(K): qbox_area = query_boxes[k, ndim] - query_boxes[k, 0] + add1 for i in range(1, ndim): qbox_area *= query_boxes[k, ndim + i] - query_boxes[k, i] + add1 for n in range(N): invalid = False for i in range(ndim): side_length = ( min(boxes[n, i + ndim], query_boxes[k, i + ndim]) - max(boxes[n, i], query_boxes[k, i]) + add1 ) if side_length <= 0: invalid = True break side_lengths[i] = side_length if not invalid: box_area = boxes[n, ndim] - boxes[n, 0] + add1 for i in range(1, ndim): box_area *= boxes[n, ndim + i] - boxes[n, i] + add1 inter = side_lengths[0] for i in range(1, ndim): inter *= side_lengths[i] # inter = np.prod(side_lengths) ua = float(box_area + qbox_area - inter) overlaps[n, k] = inter / ua return overlaps def points_in_rbbox(points, rbbox, z_axis=2, origin=(0.5, 0.5, 0.5)): rbbox_corners = center_to_corner_box3d( rbbox[:, :3], rbbox[:, 3:6], rbbox[:, -1], origin=origin, axis=z_axis ) surfaces = corner_to_surfaces_3d(rbbox_corners) indices = points_in_convex_polygon_3d_jit(points[:, :3], surfaces) return indices def corner_to_surfaces_3d(corners): """convert 3d box corners from corner function above to surfaces that normal vectors all direct to internal. Args: corners (float array, [N, 8, 3]): 3d box corners. Returns: surfaces (float array, [N, 6, 4, 3]): """ # box_corners: [N, 8, 3], must from corner functions in this module surfaces = np.array( [ [corners[:, 0], corners[:, 1], corners[:, 2], corners[:, 3]], [corners[:, 7], corners[:, 6], corners[:, 5], corners[:, 4]], [corners[:, 0], corners[:, 3], corners[:, 7], corners[:, 4]], [corners[:, 1], corners[:, 5], corners[:, 6], corners[:, 2]], [corners[:, 0], corners[:, 4], corners[:, 5], corners[:, 1]], [corners[:, 3], corners[:, 2], corners[:, 6], corners[:, 7]], ] ).transpose([2, 0, 1, 3]) return surfaces @numba.jit(nopython=True) def corner_to_surfaces_3d_jit(corners): """convert 3d box corners from corner function above to surfaces that normal vectors all direct to internal. Args: corners (float array, [N, 8, 3]): 3d box corners. Returns: surfaces (float array, [N, 6, 4, 3]): """ # box_corners: [N, 8, 3], must from corner functions in this module num_boxes = corners.shape[0] surfaces = np.zeros((num_boxes, 6, 4, 3), dtype=corners.dtype) corner_idxes = np.array( [0, 1, 2, 3, 7, 6, 5, 4, 0, 3, 7, 4, 1, 5, 6, 2, 0, 4, 5, 1, 3, 2, 6, 7] ).reshape(6, 4) for i in range(num_boxes): for j in range(6): for k in range(4): surfaces[i, j, k] = corners[i, corner_idxes[j, k]] return surfaces def assign_label_to_voxel(gt_boxes, coors, voxel_size, coors_range): """assign a 0/1 label to each voxel based on whether the center of voxel is in gt_box. LIDAR. """ voxel_size = np.array(voxel_size, dtype=gt_boxes.dtype) coors_range = np.array(coors_range, dtype=gt_boxes.dtype) shift = coors_range[:3] voxel_origins = coors[:, ::-1] * voxel_size + shift voxel_centers = voxel_origins + voxel_size * 0.5 gt_box_corners = center_to_corner_box3d( gt_boxes[:, :3] - voxel_size * 0.5, gt_boxes[:, 3:6] + voxel_size, gt_boxes[:, 6], origin=[0.5, 0.5, 0.5], axis=2, ) gt_surfaces = corner_to_surfaces_3d(gt_box_corners) ret = points_in_convex_polygon_3d_jit(voxel_centers, gt_surfaces) return np.any(ret, axis=1).astype(np.int64) def assign_label_to_voxel_v3(gt_boxes, coors, voxel_size, coors_range): """assign a 0/1 label to each voxel based on whether the center of voxel is in gt_box. LIDAR. """ voxel_size = np.array(voxel_size, dtype=gt_boxes.dtype) coors_range = np.array(coors_range, dtype=gt_boxes.dtype) shift = coors_range[:3] voxel_origins = coors[:, ::-1] * voxel_size + shift voxel_maxes = voxel_origins + voxel_size voxel_minmax = np.concatenate([voxel_origins, voxel_maxes], axis=-1) voxel_corners = minmax_to_corner_3d(voxel_minmax) gt_box_corners = center_to_corner_box3d( gt_boxes[:, :3], gt_boxes[:, 3:6], gt_boxes[:, 6], origin=[0.5, 0.5, 0.5], axis=2, ) gt_surfaces = corner_to_surfaces_3d(gt_box_corners) voxel_corners_flat = voxel_corners.reshape([-1, 3]) ret = points_in_convex_polygon_3d_jit(voxel_corners_flat, gt_surfaces) ret = ret.reshape([-1, 8, ret.shape[-1]]) return ret.any(-1).any(-1).astype(np.int64) def image_box_region_area(img_cumsum, bbox): """check a 2d voxel is contained by a box. used to filter empty anchors. Summed-area table algorithm: ==> W ------------------ | | | |------A---------B | | | | | | |----- C---------D Iabcd = ID-IB-IC+IA Args: img_cumsum: [M, H, W](yx) cumsumed image. bbox: [N, 4](xyxy) bounding box, """ N = bbox.shape[0] M = img_cumsum.shape[0] ret = np.zeros([N, M], dtype=img_cumsum.dtype) ID = img_cumsum[:, bbox[:, 3], bbox[:, 2]] IA = img_cumsum[:, bbox[:, 1], bbox[:, 0]] IB = img_cumsum[:, bbox[:, 3], bbox[:, 0]] IC = img_cumsum[:, bbox[:, 1], bbox[:, 2]] ret = ID - IB - IC + IA return ret def get_minimum_bounding_box_bv(points, voxel_size, bound, downsample=8, margin=1.6): x_vsize = voxel_size[0] y_vsize = voxel_size[1] max_x = points[:, 0].max() max_y = points[:, 1].max() min_x = points[:, 0].min() min_y = points[:, 1].min() max_x = np.floor(max_x / (x_vsize * downsample) + 1) * (x_vsize * downsample) max_y = np.floor(max_y / (y_vsize * downsample) + 1) * (y_vsize * downsample) min_x = np.floor(min_x / (x_vsize * downsample)) * (x_vsize * downsample) min_y = np.floor(min_y / (y_vsize * downsample)) * (y_vsize * downsample) max_x = np.minimum(max_x + margin, bound[2]) max_y = np.minimum(max_y + margin, bound[3]) min_x = np.maximum(min_x - margin, bound[0]) min_y = np.maximum(min_y - margin, bound[1]) return np.array([min_x, min_y, max_x, max_y]) def box3d_to_bbox(box3d, rect, Trv2c, P2): box3d_to_cam = box_lidar_to_camera(box3d, rect, Trv2c) box_corners = center_to_corner_box3d( box3d[:, :3], box3d[:, 3:6], box3d[:, 6], [0.5, 1.0, 0.5], axis=1 ) box_corners_in_image = project_to_image(box_corners, P2) # box_corners_in_image: [N, 8, 2] minxy = np.min(box_corners_in_image, axis=1) maxxy = np.max(box_corners_in_image, axis=1) bbox = np.concatenate([minxy, maxxy], axis=1) return bbox def change_box3d_center_(box3d, src, dst): dst = np.array(dst, dtype=box3d.dtype) src = np.array(src, dtype=box3d.dtype) box3d[..., :3] += box3d[..., 3:6] * (dst - src) def encode_parts(relative_shifts): parts = np.zeros((len(relative_shifts),), dtype=np.int32) mask = (relative_shifts[:, 0] >= 0) & (relative_shifts[:, 1] >= 0) parts[mask] = 0 mask = (relative_shifts[:, 0] < 0) & (relative_shifts[:, 1] >= 0) parts[mask] = 1 mask = (relative_shifts[:, 0] < 0) & (relative_shifts[:, 1] < 0) parts[mask] = 2 mask = (relative_shifts[:, 0] >= 0) & (relative_shifts[:, 1] < 0) parts[mask] = 3 return parts
def center_to_corner_box3d(centers, dims, angles=None, origin=(0.5, 0.5, 0.5), axis=2): """convert kitti locations, dimensions and angles to corners Args: centers (float array, shape=[N, 3]): locations in kitti label file. dims (float array, shape=[N, 3]): dimensions in kitti label file. angles (float array, shape=[N]): rotation_y in kitti label file. origin (list or array or float): origin point relate to smallest point. use [0.5, 1.0, 0.5] in camera and [0.5, 0.5, 0] in lidar. axis (int): rotation axis. 1 for camera and 2 for lidar. Returns: [type]: [description] """ # 'length' in kitti format is in x axis. # yzx(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar) # center in kitti format is [0.5, 1.0, 0.5] in xyz. corners = corners_nd(dims, origin=origin) # corners: [N, 8, 3] if angles is not None: corners = rotation_3d_in_axis(corners, angles, axis=axis) corners += centers.reshape([-1, 1, 3]) return corners
241
262
from pathlib import Path import numba import numpy as np from det3d.core.bbox.geometry import ( points_count_convex_polygon_3d_jit, points_in_convex_polygon_3d_jit, ) try: from spconv.utils import rbbox_intersection, rbbox_iou except: print("Import spconv fail, no support for sparse convolution!") def points_count_rbbox(points, rbbox, z_axis=2, origin=(0.5, 0.5, 0.5)): rbbox_corners = center_to_corner_box3d( rbbox[:, :3], rbbox[:, 3:6], rbbox[:, -1], origin=origin, axis=z_axis ) surfaces = corner_to_surfaces_3d(rbbox_corners) return points_count_convex_polygon_3d_jit(points[:, :3], surfaces) def riou_cc(rbboxes, qrbboxes, standup_thresh=0.0): # less than 50ms when used in second one thread. 10x slower than gpu boxes_corners = center_to_corner_box2d( rbboxes[:, :2], rbboxes[:, 2:4], rbboxes[:, 4] ) boxes_standup = corner_to_standup_nd(boxes_corners) qboxes_corners = center_to_corner_box2d( qrbboxes[:, :2], qrbboxes[:, 2:4], qrbboxes[:, 4] ) qboxes_standup = corner_to_standup_nd(qboxes_corners) # if standup box not overlapped, rbbox not overlapped too. standup_iou = iou_jit(boxes_standup, qboxes_standup, eps=0.0) return rbbox_iou(boxes_corners, qboxes_corners, standup_iou, standup_thresh) def rinter_cc(rbboxes, qrbboxes, standup_thresh=0.0): # less than 50ms when used in second one thread. 10x slower than gpu boxes_corners = center_to_corner_box2d( rbboxes[:, :2], rbboxes[:, 2:4], rbboxes[:, 4] ) boxes_standup = corner_to_standup_nd(boxes_corners) qboxes_corners = center_to_corner_box2d( qrbboxes[:, :2], qrbboxes[:, 2:4], qrbboxes[:, 4] ) qboxes_standup = corner_to_standup_nd(qboxes_corners) # if standup box not overlapped, rbbox not overlapped too. standup_iou = iou_jit(boxes_standup, qboxes_standup, eps=0.0) return rbbox_intersection( boxes_corners, qboxes_corners, standup_iou, standup_thresh ) def corners_nd(dims, origin=0.5): """generate relative box corners based on length per dim and origin point. Args: dims (float array, shape=[N, ndim]): array of length per dim origin (list or array or float): origin point relate to smallest point. Returns: float array, shape=[N, 2 ** ndim, ndim]: returned corners. point layout example: (2d) x0y0, x0y1, x1y0, x1y1; (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1 where x0 < x1, y0 < y1, z0 < z1 """ ndim = int(dims.shape[1]) corners_norm = np.stack( np.unravel_index(np.arange(2 ** ndim), [2] * ndim), axis=1 ).astype(dims.dtype) # now corners_norm has format: (2d) x0y0, x0y1, x1y0, x1y1 # (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1 # so need to convert to a format which is convenient to do other computing. # for 2d boxes, format is clockwise start with minimum point # for 3d boxes, please draw lines by your hand. if ndim == 2: # generate clockwise box corners corners_norm = corners_norm[[0, 1, 3, 2]] elif ndim == 3: corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]] corners_norm = corners_norm - np.array(origin, dtype=dims.dtype) corners = dims.reshape([-1, 1, ndim]) * corners_norm.reshape([1, 2 ** ndim, ndim]) return corners @numba.njit def corners_2d_jit(dims, origin=0.5): ndim = 2 corners_norm = np.array([[0, 0], [0, 1], [1, 1], [1, 0]], dtype=dims.dtype) corners_norm = corners_norm - np.array(origin, dtype=dims.dtype) corners = dims.reshape((-1, 1, ndim)) * corners_norm.reshape((1, 2 ** ndim, ndim)) return corners @numba.njit def corners_3d_jit(dims, origin=0.5): ndim = 3 corners_norm = np.array( [0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1], dtype=dims.dtype, ).reshape((8, 3)) corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]] corners_norm = corners_norm - np.array(origin, dtype=dims.dtype) corners = dims.reshape((-1, 1, ndim)) * corners_norm.reshape((1, 2 ** ndim, ndim)) return corners @numba.njit def corner_to_standup_nd_jit(boxes_corner): num_boxes = boxes_corner.shape[0] ndim = boxes_corner.shape[-1] result = np.zeros((num_boxes, ndim * 2), dtype=boxes_corner.dtype) for i in range(num_boxes): for j in range(ndim): result[i, j] = np.min(boxes_corner[i, :, j]) for j in range(ndim): result[i, j + ndim] = np.max(boxes_corner[i, :, j]) return result def corner_to_standup_nd(boxes_corner): assert len(boxes_corner.shape) == 3 standup_boxes = [] standup_boxes.append(np.min(boxes_corner, axis=1)) standup_boxes.append(np.max(boxes_corner, axis=1)) return np.concatenate(standup_boxes, -1) def rbbox2d_to_near_bbox(rbboxes): """convert rotated bbox to nearest 'standing' or 'lying' bbox. Args: rbboxes: [N, 5(x, y, xdim, ydim, rad)] rotated bboxes Returns: bboxes: [N, 4(xmin, ymin, xmax, ymax)] bboxes """ rots = rbboxes[..., -1] rots_0_pi_div_2 = np.abs(limit_period(rots, 0.5, np.pi)) cond = (rots_0_pi_div_2 > np.pi / 4)[..., np.newaxis] bboxes_center = np.where(cond, rbboxes[:, [0, 1, 3, 2]], rbboxes[:, :4]) bboxes = center_to_minmax_2d(bboxes_center[:, :2], bboxes_center[:, 2:]) return bboxes def rotation_3d_in_axis(points, angles, axis=0): # points: [N, point_size, 3] rot_sin = np.sin(angles) rot_cos = np.cos(angles) ones = np.ones_like(rot_cos) zeros = np.zeros_like(rot_cos) if axis == 1: rot_mat_T = np.stack( [ [rot_cos, zeros, -rot_sin], [zeros, ones, zeros], [rot_sin, zeros, rot_cos], ] ) elif axis == 2 or axis == -1: rot_mat_T = np.stack( [ [rot_cos, -rot_sin, zeros], [rot_sin, rot_cos, zeros], [zeros, zeros, ones], ] ) elif axis == 0: rot_mat_T = np.stack( [ [zeros, rot_cos, -rot_sin], [zeros, rot_sin, rot_cos], [ones, zeros, zeros], ] ) else: raise ValueError("axis should in range") return np.einsum("aij,jka->aik", points, rot_mat_T) def rotation_points_single_angle(points, angle, axis=0): # points: [N, 3] rot_sin = np.sin(angle) rot_cos = np.cos(angle) if axis == 1: rot_mat_T = np.array( [[rot_cos, 0, -rot_sin], [0, 1, 0], [rot_sin, 0, rot_cos]], dtype=points.dtype, ) elif axis == 2 or axis == -1: rot_mat_T = np.array( [[rot_cos, -rot_sin, 0], [rot_sin, rot_cos, 0], [0, 0, 1]], dtype=points.dtype, ) elif axis == 0: rot_mat_T = np.array( [[1, 0, 0], [0, rot_cos, -rot_sin], [0, rot_sin, rot_cos]], dtype=points.dtype, ) else: raise ValueError("axis should in range") return points @ rot_mat_T def rotation_2d(points, angles): """rotation 2d points based on origin point clockwise when angle positive. Args: points (float array, shape=[N, point_size, 2]): points to be rotated. angles (float array, shape=[N]): rotation angle. Returns: float array: same shape as points """ rot_sin = np.sin(angles) rot_cos = np.cos(angles) rot_mat_T = np.stack([[rot_cos, -rot_sin], [rot_sin, rot_cos]]) return np.einsum("aij,jka->aik", points, rot_mat_T) def rotation_box(box_corners, angle): """rotation 2d points based on origin point clockwise when angle positive. Args: points (float array, shape=[N, point_size, 2]): points to be rotated. angle (float): rotation angle. Returns: float array: same shape as points """ rot_sin = np.sin(angle) rot_cos = np.cos(angle) rot_mat_T = np.array( [[rot_cos, -rot_sin], [rot_sin, rot_cos]], dtype=box_corners.dtype ) return box_corners @ rot_mat_T def center_to_corner_box3d(centers, dims, angles=None, origin=(0.5, 0.5, 0.5), axis=2): """convert kitti locations, dimensions and angles to corners Args: centers (float array, shape=[N, 3]): locations in kitti label file. dims (float array, shape=[N, 3]): dimensions in kitti label file. angles (float array, shape=[N]): rotation_y in kitti label file. origin (list or array or float): origin point relate to smallest point. use [0.5, 1.0, 0.5] in camera and [0.5, 0.5, 0] in lidar. axis (int): rotation axis. 1 for camera and 2 for lidar. Returns: [type]: [description] """ # 'length' in kitti format is in x axis. # yzx(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar) # center in kitti format is [0.5, 1.0, 0.5] in xyz. corners = corners_nd(dims, origin=origin) # corners: [N, 8, 3] if angles is not None: corners = rotation_3d_in_axis(corners, angles, axis=axis) corners += centers.reshape([-1, 1, 3]) return corners def center_to_corner_box2d(centers, dims, angles=None, origin=0.5): """convert kitti locations, dimensions and angles to corners. format: center(xy), dims(xy), angles(clockwise when positive) Args: centers (float array, shape=[N, 2]): locations in kitti label file. dims (float array, shape=[N, 2]): dimensions in kitti label file. angles (float array, shape=[N]): rotation_y in kitti label file. Returns: [type]: [description] """ # 'length' in kitti format is in x axis. # xyz(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar) # center in kitti format is [0.5, 1.0, 0.5] in xyz. corners = corners_nd(dims, origin=origin) # corners: [N, 4, 2] if angles is not None: corners = rotation_2d(corners, angles) corners += centers.reshape([-1, 1, 2]) return corners @numba.jit(nopython=True) def box2d_to_corner_jit(boxes): num_box = boxes.shape[0] corners_norm = np.zeros((4, 2), dtype=boxes.dtype) corners_norm[1, 1] = 1.0 corners_norm[2] = 1.0 corners_norm[3, 0] = 1.0 corners_norm -= np.array([0.5, 0.5], dtype=boxes.dtype) corners = boxes.reshape(num_box, 1, 5)[:, :, 2:4] * corners_norm.reshape(1, 4, 2) rot_mat_T = np.zeros((2, 2), dtype=boxes.dtype) box_corners = np.zeros((num_box, 4, 2), dtype=boxes.dtype) for i in range(num_box): rot_sin = np.sin(boxes[i, -1]) rot_cos = np.cos(boxes[i, -1]) rot_mat_T[0, 0] = rot_cos rot_mat_T[0, 1] = -rot_sin rot_mat_T[1, 0] = rot_sin rot_mat_T[1, 1] = rot_cos box_corners[i] = corners[i] @ rot_mat_T + boxes[i, :2] return box_corners def rbbox3d_to_corners(rbboxes, origin=[0.5, 0.5, 0.5], axis=2): return center_to_corner_box3d( rbboxes[..., :3], rbboxes[..., 3:6], rbboxes[..., 6], origin, axis=axis ) def rbbox3d_to_bev_corners(rbboxes, origin=0.5): return center_to_corner_box2d( rbboxes[..., :2], rbboxes[..., 3:5], rbboxes[..., 6], origin ) def minmax_to_corner_2d(minmax_box): ndim = minmax_box.shape[-1] // 2 center = minmax_box[..., :ndim] dims = minmax_box[..., ndim:] - center return center_to_corner_box2d(center, dims, origin=0.0) def minmax_to_corner_2d_v2(minmax_box): # N, 4 -> N 4 2 return minmax_box[..., [0, 1, 0, 3, 2, 3, 2, 1]].reshape(-1, 4, 2) def minmax_to_corner_3d(minmax_box): ndim = minmax_box.shape[-1] // 2 center = minmax_box[..., :ndim] dims = minmax_box[..., ndim:] - center return center_to_corner_box3d(center, dims, origin=0.0) def minmax_to_center_2d(minmax_box): ndim = minmax_box.shape[-1] // 2 center_min = minmax_box[..., :ndim] dims = minmax_box[..., ndim:] - center_min center = center_min + 0.5 * dims return np.concatenate([center, dims], axis=-1) def center_to_minmax_2d_0_5(centers, dims): return np.concatenate([centers - dims / 2, centers + dims / 2], axis=-1) def center_to_minmax_2d(centers, dims, origin=0.5): if origin == 0.5: return center_to_minmax_2d_0_5(centers, dims) corners = center_to_corner_box2d(centers, dims, origin=origin) return corners[:, [0, 2]].reshape([-1, 4]) def limit_period(val, offset=0.5, period=np.pi): return val - np.floor(val / period + offset) * period def projection_matrix_to_CRT_kitti(proj): # P = C @ [R|T] # C is upper triangular matrix, so we need to inverse CR and use QR # stable for all kitti camera projection matrix CR = proj[0:3, 0:3] CT = proj[0:3, 3] RinvCinv = np.linalg.inv(CR) Rinv, Cinv = np.linalg.qr(RinvCinv) C = np.linalg.inv(Cinv) R = np.linalg.inv(Rinv) T = Cinv @ CT return C, R, T def get_frustum(bbox_image, C, near_clip=0.001, far_clip=100): fku = C[0, 0] fkv = -C[1, 1] u0v0 = C[0:2, 2] z_points = np.array([near_clip] * 4 + [far_clip] * 4, dtype=C.dtype)[:, np.newaxis] b = bbox_image box_corners = np.array( [[b[0], b[1]], [b[0], b[3]], [b[2], b[3]], [b[2], b[1]]], dtype=C.dtype ) near_box_corners = (box_corners - u0v0) / np.array( [fku / near_clip, -fkv / near_clip], dtype=C.dtype ) far_box_corners = (box_corners - u0v0) / np.array( [fku / far_clip, -fkv / far_clip], dtype=C.dtype ) ret_xy = np.concatenate([near_box_corners, far_box_corners], axis=0) # [8, 2] ret_xyz = np.concatenate([ret_xy, z_points], axis=1) return ret_xyz def get_frustum_v2(bboxes, C, near_clip=0.001, far_clip=100): fku = C[0, 0] fkv = -C[1, 1] u0v0 = C[0:2, 2] num_box = bboxes.shape[0] z_points = np.array([near_clip] * 4 + [far_clip] * 4, dtype=C.dtype)[ np.newaxis, :, np.newaxis ] z_points = np.tile(z_points, [num_box, 1, 1]) box_corners = minmax_to_corner_2d_v2(bboxes) near_box_corners = (box_corners - u0v0) / np.array( [fku / near_clip, -fkv / near_clip], dtype=C.dtype ) far_box_corners = (box_corners - u0v0) / np.array( [fku / far_clip, -fkv / far_clip], dtype=C.dtype ) ret_xy = np.concatenate([near_box_corners, far_box_corners], axis=1) # [8, 2] ret_xyz = np.concatenate([ret_xy, z_points], axis=-1) return ret_xyz @numba.njit def _add_rgb_to_points_kernel(points_2d, image, points_rgb): num_points = points_2d.shape[0] image_h, image_w = image.shape[:2] for i in range(num_points): img_pos = np.floor(points_2d[i]).astype(np.int32) if img_pos[0] >= 0 and img_pos[0] < image_w: if img_pos[1] >= 0 and img_pos[1] < image_h: points_rgb[i, :] = image[img_pos[1], img_pos[0], :] # image[img_pos[1], img_pos[0]] = 0 def add_rgb_to_points(points, image, rect, Trv2c, P2, mean_size=[5, 5]): kernel = np.ones(mean_size, np.float32) / np.prod(mean_size) # image = cv2.filter2D(image, -1, kernel) points_cam = lidar_to_camera(points[:, :3], rect, Trv2c) points_2d = project_to_image(points_cam, P2) points_rgb = np.zeros([points_cam.shape[0], 3], dtype=points.dtype) _add_rgb_to_points_kernel(points_2d, image, points_rgb) return points_rgb def project_to_image(points_3d, proj_mat): points_shape = list(points_3d.shape) points_shape[-1] = 1 points_4 = np.concatenate([points_3d, np.ones(points_shape)], axis=-1) point_2d = points_4 @ proj_mat.T point_2d_res = point_2d[..., :2] / point_2d[..., 2:3] return point_2d_res def camera_to_lidar(points, r_rect, velo2cam): points_shape = list(points.shape[0:-1]) if points.shape[-1] == 3: points = np.concatenate([points, np.ones(points_shape + [1])], axis=-1) lidar_points = points @ np.linalg.inv((r_rect @ velo2cam).T) return lidar_points[..., :3] def lidar_to_camera(points, r_rect, velo2cam): points_shape = list(points.shape[:-1]) if points.shape[-1] == 3: points = np.concatenate([points, np.ones(points_shape + [1])], axis=-1) camera_points = points @ (r_rect @ velo2cam).T return camera_points[..., :3] def box_camera_to_lidar(data, r_rect, velo2cam): xyz = data[:, 0:3] l, h, w = data[:, 3:4], data[:, 4:5], data[:, 5:6] r = data[:, 6:7] xyz_lidar = camera_to_lidar(xyz, r_rect, velo2cam) return np.concatenate([xyz_lidar, w, l, h, r], axis=1) def box_lidar_to_camera(data, r_rect, velo2cam): xyz_lidar = data[:, 0:3] w, l, h = data[:, 3:4], data[:, 4:5], data[:, 5:6] r = data[:, 6:7] xyz = lidar_to_camera(xyz_lidar, r_rect, velo2cam) return np.concatenate([xyz, l, h, w, r], axis=1) def remove_outside_points(points, rect, Trv2c, P2, image_shape): # 5x faster than remove_outside_points_v1(2ms vs 10ms) C, R, T = projection_matrix_to_CRT_kitti(P2) image_bbox = [0, 0, image_shape[1], image_shape[0]] frustum = get_frustum(image_bbox, C) frustum -= T frustum = np.linalg.inv(R) @ frustum.T frustum = camera_to_lidar(frustum.T, rect, Trv2c) frustum_surfaces = corner_to_surfaces_3d_jit(frustum[np.newaxis, ...]) indices = points_in_convex_polygon_3d_jit(points[:, :3], frustum_surfaces) points = points[indices.reshape([-1])] return points @numba.jit(nopython=True) def iou_jit(boxes, query_boxes, eps=1.0): """calculate box iou. note that jit version runs 2x faster than cython in my machine! Parameters ---------- boxes: (N, 4) ndarray of float query_boxes: (K, 4) ndarray of float Returns ------- overlaps: (N, K) ndarray of overlap between boxes and query_boxes """ N = boxes.shape[0] K = query_boxes.shape[0] overlaps = np.zeros((N, K), dtype=boxes.dtype) for k in range(K): box_area = (query_boxes[k, 2] - query_boxes[k, 0] + eps) * ( query_boxes[k, 3] - query_boxes[k, 1] + eps ) for n in range(N): iw = ( min(boxes[n, 2], query_boxes[k, 2]) - max(boxes[n, 0], query_boxes[k, 0]) + eps ) if iw > 0: ih = ( min(boxes[n, 3], query_boxes[k, 3]) - max(boxes[n, 1], query_boxes[k, 1]) + eps ) if ih > 0: ua = ( (boxes[n, 2] - boxes[n, 0] + eps) * (boxes[n, 3] - boxes[n, 1] + eps) + box_area - iw * ih ) overlaps[n, k] = iw * ih / ua return overlaps @numba.jit(nopython=True) def iou_3d_jit(boxes, query_boxes, add1=True): """calculate box iou3d, ---------- boxes: (N, 6) ndarray of float query_boxes: (K, 6) ndarray of float Returns ------- overlaps: (N, K) ndarray of overlap between boxes and query_boxes """ N = boxes.shape[0] K = query_boxes.shape[0] overlaps = np.zeros((N, K), dtype=boxes.dtype) if add1: add1 = 1.0 else: add1 = 0.0 for k in range(K): box_area = ( (query_boxes[k, 3] - query_boxes[k, 0] + add1) * (query_boxes[k, 4] - query_boxes[k, 1] + add1) * (query_boxes[k, 5] - query_boxes[k, 2] + add1) ) for n in range(N): iw = ( min(boxes[n, 3], query_boxes[k, 3]) - max(boxes[n, 0], query_boxes[k, 0]) + add1 ) if iw > 0: ih = ( min(boxes[n, 4], query_boxes[k, 4]) - max(boxes[n, 1], query_boxes[k, 1]) + add1 ) if ih > 0: il = ( min(boxes[n, 5], query_boxes[k, 5]) - max(boxes[n, 2], query_boxes[k, 2]) + add1 ) if il > 0: ua = float( (boxes[n, 3] - boxes[n, 0] + add1) * (boxes[n, 4] - boxes[n, 1] + add1) * (boxes[n, 5] - boxes[n, 2] + add1) + box_area - iw * ih * il ) overlaps[n, k] = iw * ih * il / ua return overlaps @numba.jit(nopython=True) def iou_nd_jit(boxes, query_boxes, add1=True): """calculate box iou nd, 2x slower than iou_jit. ---------- boxes: (N, ndim * 2) ndarray of float query_boxes: (K, ndim * 2) ndarray of float Returns ------- overlaps: (N, K) ndarray of overlap between boxes and query_boxes """ N = boxes.shape[0] K = query_boxes.shape[0] ndim = boxes.shape[1] // 2 overlaps = np.zeros((N, K), dtype=boxes.dtype) side_lengths = np.zeros((ndim,), dtype=boxes.dtype) if add1: add1 = 1.0 else: add1 = 0.0 invalid = False for k in range(K): qbox_area = query_boxes[k, ndim] - query_boxes[k, 0] + add1 for i in range(1, ndim): qbox_area *= query_boxes[k, ndim + i] - query_boxes[k, i] + add1 for n in range(N): invalid = False for i in range(ndim): side_length = ( min(boxes[n, i + ndim], query_boxes[k, i + ndim]) - max(boxes[n, i], query_boxes[k, i]) + add1 ) if side_length <= 0: invalid = True break side_lengths[i] = side_length if not invalid: box_area = boxes[n, ndim] - boxes[n, 0] + add1 for i in range(1, ndim): box_area *= boxes[n, ndim + i] - boxes[n, i] + add1 inter = side_lengths[0] for i in range(1, ndim): inter *= side_lengths[i] # inter = np.prod(side_lengths) ua = float(box_area + qbox_area - inter) overlaps[n, k] = inter / ua return overlaps def points_in_rbbox(points, rbbox, z_axis=2, origin=(0.5, 0.5, 0.5)): rbbox_corners = center_to_corner_box3d( rbbox[:, :3], rbbox[:, 3:6], rbbox[:, -1], origin=origin, axis=z_axis ) surfaces = corner_to_surfaces_3d(rbbox_corners) indices = points_in_convex_polygon_3d_jit(points[:, :3], surfaces) return indices def corner_to_surfaces_3d(corners): """convert 3d box corners from corner function above to surfaces that normal vectors all direct to internal. Args: corners (float array, [N, 8, 3]): 3d box corners. Returns: surfaces (float array, [N, 6, 4, 3]): """ # box_corners: [N, 8, 3], must from corner functions in this module surfaces = np.array( [ [corners[:, 0], corners[:, 1], corners[:, 2], corners[:, 3]], [corners[:, 7], corners[:, 6], corners[:, 5], corners[:, 4]], [corners[:, 0], corners[:, 3], corners[:, 7], corners[:, 4]], [corners[:, 1], corners[:, 5], corners[:, 6], corners[:, 2]], [corners[:, 0], corners[:, 4], corners[:, 5], corners[:, 1]], [corners[:, 3], corners[:, 2], corners[:, 6], corners[:, 7]], ] ).transpose([2, 0, 1, 3]) return surfaces @numba.jit(nopython=True) def corner_to_surfaces_3d_jit(corners): """convert 3d box corners from corner function above to surfaces that normal vectors all direct to internal. Args: corners (float array, [N, 8, 3]): 3d box corners. Returns: surfaces (float array, [N, 6, 4, 3]): """ # box_corners: [N, 8, 3], must from corner functions in this module num_boxes = corners.shape[0] surfaces = np.zeros((num_boxes, 6, 4, 3), dtype=corners.dtype) corner_idxes = np.array( [0, 1, 2, 3, 7, 6, 5, 4, 0, 3, 7, 4, 1, 5, 6, 2, 0, 4, 5, 1, 3, 2, 6, 7] ).reshape(6, 4) for i in range(num_boxes): for j in range(6): for k in range(4): surfaces[i, j, k] = corners[i, corner_idxes[j, k]] return surfaces def assign_label_to_voxel(gt_boxes, coors, voxel_size, coors_range): """assign a 0/1 label to each voxel based on whether the center of voxel is in gt_box. LIDAR. """ voxel_size = np.array(voxel_size, dtype=gt_boxes.dtype) coors_range = np.array(coors_range, dtype=gt_boxes.dtype) shift = coors_range[:3] voxel_origins = coors[:, ::-1] * voxel_size + shift voxel_centers = voxel_origins + voxel_size * 0.5 gt_box_corners = center_to_corner_box3d( gt_boxes[:, :3] - voxel_size * 0.5, gt_boxes[:, 3:6] + voxel_size, gt_boxes[:, 6], origin=[0.5, 0.5, 0.5], axis=2, ) gt_surfaces = corner_to_surfaces_3d(gt_box_corners) ret = points_in_convex_polygon_3d_jit(voxel_centers, gt_surfaces) return np.any(ret, axis=1).astype(np.int64) def assign_label_to_voxel_v3(gt_boxes, coors, voxel_size, coors_range): """assign a 0/1 label to each voxel based on whether the center of voxel is in gt_box. LIDAR. """ voxel_size = np.array(voxel_size, dtype=gt_boxes.dtype) coors_range = np.array(coors_range, dtype=gt_boxes.dtype) shift = coors_range[:3] voxel_origins = coors[:, ::-1] * voxel_size + shift voxel_maxes = voxel_origins + voxel_size voxel_minmax = np.concatenate([voxel_origins, voxel_maxes], axis=-1) voxel_corners = minmax_to_corner_3d(voxel_minmax) gt_box_corners = center_to_corner_box3d( gt_boxes[:, :3], gt_boxes[:, 3:6], gt_boxes[:, 6], origin=[0.5, 0.5, 0.5], axis=2, ) gt_surfaces = corner_to_surfaces_3d(gt_box_corners) voxel_corners_flat = voxel_corners.reshape([-1, 3]) ret = points_in_convex_polygon_3d_jit(voxel_corners_flat, gt_surfaces) ret = ret.reshape([-1, 8, ret.shape[-1]]) return ret.any(-1).any(-1).astype(np.int64) def image_box_region_area(img_cumsum, bbox): """check a 2d voxel is contained by a box. used to filter empty anchors. Summed-area table algorithm: ==> W ------------------ | | | |------A---------B | | | | | | |----- C---------D Iabcd = ID-IB-IC+IA Args: img_cumsum: [M, H, W](yx) cumsumed image. bbox: [N, 4](xyxy) bounding box, """ N = bbox.shape[0] M = img_cumsum.shape[0] ret = np.zeros([N, M], dtype=img_cumsum.dtype) ID = img_cumsum[:, bbox[:, 3], bbox[:, 2]] IA = img_cumsum[:, bbox[:, 1], bbox[:, 0]] IB = img_cumsum[:, bbox[:, 3], bbox[:, 0]] IC = img_cumsum[:, bbox[:, 1], bbox[:, 2]] ret = ID - IB - IC + IA return ret def get_minimum_bounding_box_bv(points, voxel_size, bound, downsample=8, margin=1.6): x_vsize = voxel_size[0] y_vsize = voxel_size[1] max_x = points[:, 0].max() max_y = points[:, 1].max() min_x = points[:, 0].min() min_y = points[:, 1].min() max_x = np.floor(max_x / (x_vsize * downsample) + 1) * (x_vsize * downsample) max_y = np.floor(max_y / (y_vsize * downsample) + 1) * (y_vsize * downsample) min_x = np.floor(min_x / (x_vsize * downsample)) * (x_vsize * downsample) min_y = np.floor(min_y / (y_vsize * downsample)) * (y_vsize * downsample) max_x = np.minimum(max_x + margin, bound[2]) max_y = np.minimum(max_y + margin, bound[3]) min_x = np.maximum(min_x - margin, bound[0]) min_y = np.maximum(min_y - margin, bound[1]) return np.array([min_x, min_y, max_x, max_y]) def box3d_to_bbox(box3d, rect, Trv2c, P2): box3d_to_cam = box_lidar_to_camera(box3d, rect, Trv2c) box_corners = center_to_corner_box3d( box3d[:, :3], box3d[:, 3:6], box3d[:, 6], [0.5, 1.0, 0.5], axis=1 ) box_corners_in_image = project_to_image(box_corners, P2) # box_corners_in_image: [N, 8, 2] minxy = np.min(box_corners_in_image, axis=1) maxxy = np.max(box_corners_in_image, axis=1) bbox = np.concatenate([minxy, maxxy], axis=1) return bbox def change_box3d_center_(box3d, src, dst): dst = np.array(dst, dtype=box3d.dtype) src = np.array(src, dtype=box3d.dtype) box3d[..., :3] += box3d[..., 3:6] * (dst - src) def encode_parts(relative_shifts): parts = np.zeros((len(relative_shifts),), dtype=np.int32) mask = (relative_shifts[:, 0] >= 0) & (relative_shifts[:, 1] >= 0) parts[mask] = 0 mask = (relative_shifts[:, 0] < 0) & (relative_shifts[:, 1] >= 0) parts[mask] = 1 mask = (relative_shifts[:, 0] < 0) & (relative_shifts[:, 1] < 0) parts[mask] = 2 mask = (relative_shifts[:, 0] >= 0) & (relative_shifts[:, 1] < 0) parts[mask] = 3 return parts
center_to_corner_box2d
convert kitti locations, dimensions and angles to corners. format: center(xy), dims(xy), angles(clockwise when positive) Args: centers (float array, shape=[N, 2]): locations in kitti label file. dims (float array, shape=[N, 2]): dimensions in kitti label file. angles (float array, shape=[N]): rotation_y in kitti label file. Returns: [type]: [description]
from pathlib import Path import numba import numpy as np from det3d.core.bbox.geometry import ( points_count_convex_polygon_3d_jit, points_in_convex_polygon_3d_jit, ) try: from spconv.utils import rbbox_intersection, rbbox_iou except: print("Import spconv fail, no support for sparse convolution!") def points_count_rbbox(points, rbbox, z_axis=2, origin=(0.5, 0.5, 0.5)): rbbox_corners = center_to_corner_box3d( rbbox[:, :3], rbbox[:, 3:6], rbbox[:, -1], origin=origin, axis=z_axis ) surfaces = corner_to_surfaces_3d(rbbox_corners) return points_count_convex_polygon_3d_jit(points[:, :3], surfaces) def riou_cc(rbboxes, qrbboxes, standup_thresh=0.0): # less than 50ms when used in second one thread. 10x slower than gpu boxes_corners = center_to_corner_box2d( rbboxes[:, :2], rbboxes[:, 2:4], rbboxes[:, 4] ) boxes_standup = corner_to_standup_nd(boxes_corners) qboxes_corners = center_to_corner_box2d( qrbboxes[:, :2], qrbboxes[:, 2:4], qrbboxes[:, 4] ) qboxes_standup = corner_to_standup_nd(qboxes_corners) # if standup box not overlapped, rbbox not overlapped too. standup_iou = iou_jit(boxes_standup, qboxes_standup, eps=0.0) return rbbox_iou(boxes_corners, qboxes_corners, standup_iou, standup_thresh) def rinter_cc(rbboxes, qrbboxes, standup_thresh=0.0): # less than 50ms when used in second one thread. 10x slower than gpu boxes_corners = center_to_corner_box2d( rbboxes[:, :2], rbboxes[:, 2:4], rbboxes[:, 4] ) boxes_standup = corner_to_standup_nd(boxes_corners) qboxes_corners = center_to_corner_box2d( qrbboxes[:, :2], qrbboxes[:, 2:4], qrbboxes[:, 4] ) qboxes_standup = corner_to_standup_nd(qboxes_corners) # if standup box not overlapped, rbbox not overlapped too. standup_iou = iou_jit(boxes_standup, qboxes_standup, eps=0.0) return rbbox_intersection( boxes_corners, qboxes_corners, standup_iou, standup_thresh ) def corners_nd(dims, origin=0.5): """generate relative box corners based on length per dim and origin point. Args: dims (float array, shape=[N, ndim]): array of length per dim origin (list or array or float): origin point relate to smallest point. Returns: float array, shape=[N, 2 ** ndim, ndim]: returned corners. point layout example: (2d) x0y0, x0y1, x1y0, x1y1; (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1 where x0 < x1, y0 < y1, z0 < z1 """ ndim = int(dims.shape[1]) corners_norm = np.stack( np.unravel_index(np.arange(2 ** ndim), [2] * ndim), axis=1 ).astype(dims.dtype) # now corners_norm has format: (2d) x0y0, x0y1, x1y0, x1y1 # (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1 # so need to convert to a format which is convenient to do other computing. # for 2d boxes, format is clockwise start with minimum point # for 3d boxes, please draw lines by your hand. if ndim == 2: # generate clockwise box corners corners_norm = corners_norm[[0, 1, 3, 2]] elif ndim == 3: corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]] corners_norm = corners_norm - np.array(origin, dtype=dims.dtype) corners = dims.reshape([-1, 1, ndim]) * corners_norm.reshape([1, 2 ** ndim, ndim]) return corners @numba.njit def corners_2d_jit(dims, origin=0.5): ndim = 2 corners_norm = np.array([[0, 0], [0, 1], [1, 1], [1, 0]], dtype=dims.dtype) corners_norm = corners_norm - np.array(origin, dtype=dims.dtype) corners = dims.reshape((-1, 1, ndim)) * corners_norm.reshape((1, 2 ** ndim, ndim)) return corners @numba.njit def corners_3d_jit(dims, origin=0.5): ndim = 3 corners_norm = np.array( [0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1], dtype=dims.dtype, ).reshape((8, 3)) corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]] corners_norm = corners_norm - np.array(origin, dtype=dims.dtype) corners = dims.reshape((-1, 1, ndim)) * corners_norm.reshape((1, 2 ** ndim, ndim)) return corners @numba.njit def corner_to_standup_nd_jit(boxes_corner): num_boxes = boxes_corner.shape[0] ndim = boxes_corner.shape[-1] result = np.zeros((num_boxes, ndim * 2), dtype=boxes_corner.dtype) for i in range(num_boxes): for j in range(ndim): result[i, j] = np.min(boxes_corner[i, :, j]) for j in range(ndim): result[i, j + ndim] = np.max(boxes_corner[i, :, j]) return result def corner_to_standup_nd(boxes_corner): assert len(boxes_corner.shape) == 3 standup_boxes = [] standup_boxes.append(np.min(boxes_corner, axis=1)) standup_boxes.append(np.max(boxes_corner, axis=1)) return np.concatenate(standup_boxes, -1) def rbbox2d_to_near_bbox(rbboxes): """convert rotated bbox to nearest 'standing' or 'lying' bbox. Args: rbboxes: [N, 5(x, y, xdim, ydim, rad)] rotated bboxes Returns: bboxes: [N, 4(xmin, ymin, xmax, ymax)] bboxes """ rots = rbboxes[..., -1] rots_0_pi_div_2 = np.abs(limit_period(rots, 0.5, np.pi)) cond = (rots_0_pi_div_2 > np.pi / 4)[..., np.newaxis] bboxes_center = np.where(cond, rbboxes[:, [0, 1, 3, 2]], rbboxes[:, :4]) bboxes = center_to_minmax_2d(bboxes_center[:, :2], bboxes_center[:, 2:]) return bboxes def rotation_3d_in_axis(points, angles, axis=0): # points: [N, point_size, 3] rot_sin = np.sin(angles) rot_cos = np.cos(angles) ones = np.ones_like(rot_cos) zeros = np.zeros_like(rot_cos) if axis == 1: rot_mat_T = np.stack( [ [rot_cos, zeros, -rot_sin], [zeros, ones, zeros], [rot_sin, zeros, rot_cos], ] ) elif axis == 2 or axis == -1: rot_mat_T = np.stack( [ [rot_cos, -rot_sin, zeros], [rot_sin, rot_cos, zeros], [zeros, zeros, ones], ] ) elif axis == 0: rot_mat_T = np.stack( [ [zeros, rot_cos, -rot_sin], [zeros, rot_sin, rot_cos], [ones, zeros, zeros], ] ) else: raise ValueError("axis should in range") return np.einsum("aij,jka->aik", points, rot_mat_T) def rotation_points_single_angle(points, angle, axis=0): # points: [N, 3] rot_sin = np.sin(angle) rot_cos = np.cos(angle) if axis == 1: rot_mat_T = np.array( [[rot_cos, 0, -rot_sin], [0, 1, 0], [rot_sin, 0, rot_cos]], dtype=points.dtype, ) elif axis == 2 or axis == -1: rot_mat_T = np.array( [[rot_cos, -rot_sin, 0], [rot_sin, rot_cos, 0], [0, 0, 1]], dtype=points.dtype, ) elif axis == 0: rot_mat_T = np.array( [[1, 0, 0], [0, rot_cos, -rot_sin], [0, rot_sin, rot_cos]], dtype=points.dtype, ) else: raise ValueError("axis should in range") return points @ rot_mat_T def rotation_2d(points, angles): """rotation 2d points based on origin point clockwise when angle positive. Args: points (float array, shape=[N, point_size, 2]): points to be rotated. angles (float array, shape=[N]): rotation angle. Returns: float array: same shape as points """ rot_sin = np.sin(angles) rot_cos = np.cos(angles) rot_mat_T = np.stack([[rot_cos, -rot_sin], [rot_sin, rot_cos]]) return np.einsum("aij,jka->aik", points, rot_mat_T) def rotation_box(box_corners, angle): """rotation 2d points based on origin point clockwise when angle positive. Args: points (float array, shape=[N, point_size, 2]): points to be rotated. angle (float): rotation angle. Returns: float array: same shape as points """ rot_sin = np.sin(angle) rot_cos = np.cos(angle) rot_mat_T = np.array( [[rot_cos, -rot_sin], [rot_sin, rot_cos]], dtype=box_corners.dtype ) return box_corners @ rot_mat_T def center_to_corner_box3d(centers, dims, angles=None, origin=(0.5, 0.5, 0.5), axis=2): """convert kitti locations, dimensions and angles to corners Args: centers (float array, shape=[N, 3]): locations in kitti label file. dims (float array, shape=[N, 3]): dimensions in kitti label file. angles (float array, shape=[N]): rotation_y in kitti label file. origin (list or array or float): origin point relate to smallest point. use [0.5, 1.0, 0.5] in camera and [0.5, 0.5, 0] in lidar. axis (int): rotation axis. 1 for camera and 2 for lidar. Returns: [type]: [description] """ # 'length' in kitti format is in x axis. # yzx(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar) # center in kitti format is [0.5, 1.0, 0.5] in xyz. corners = corners_nd(dims, origin=origin) # corners: [N, 8, 3] if angles is not None: corners = rotation_3d_in_axis(corners, angles, axis=axis) corners += centers.reshape([-1, 1, 3]) return corners # MASKED: center_to_corner_box2d function (lines 265-285) @numba.jit(nopython=True) def box2d_to_corner_jit(boxes): num_box = boxes.shape[0] corners_norm = np.zeros((4, 2), dtype=boxes.dtype) corners_norm[1, 1] = 1.0 corners_norm[2] = 1.0 corners_norm[3, 0] = 1.0 corners_norm -= np.array([0.5, 0.5], dtype=boxes.dtype) corners = boxes.reshape(num_box, 1, 5)[:, :, 2:4] * corners_norm.reshape(1, 4, 2) rot_mat_T = np.zeros((2, 2), dtype=boxes.dtype) box_corners = np.zeros((num_box, 4, 2), dtype=boxes.dtype) for i in range(num_box): rot_sin = np.sin(boxes[i, -1]) rot_cos = np.cos(boxes[i, -1]) rot_mat_T[0, 0] = rot_cos rot_mat_T[0, 1] = -rot_sin rot_mat_T[1, 0] = rot_sin rot_mat_T[1, 1] = rot_cos box_corners[i] = corners[i] @ rot_mat_T + boxes[i, :2] return box_corners def rbbox3d_to_corners(rbboxes, origin=[0.5, 0.5, 0.5], axis=2): return center_to_corner_box3d( rbboxes[..., :3], rbboxes[..., 3:6], rbboxes[..., 6], origin, axis=axis ) def rbbox3d_to_bev_corners(rbboxes, origin=0.5): return center_to_corner_box2d( rbboxes[..., :2], rbboxes[..., 3:5], rbboxes[..., 6], origin ) def minmax_to_corner_2d(minmax_box): ndim = minmax_box.shape[-1] // 2 center = minmax_box[..., :ndim] dims = minmax_box[..., ndim:] - center return center_to_corner_box2d(center, dims, origin=0.0) def minmax_to_corner_2d_v2(minmax_box): # N, 4 -> N 4 2 return minmax_box[..., [0, 1, 0, 3, 2, 3, 2, 1]].reshape(-1, 4, 2) def minmax_to_corner_3d(minmax_box): ndim = minmax_box.shape[-1] // 2 center = minmax_box[..., :ndim] dims = minmax_box[..., ndim:] - center return center_to_corner_box3d(center, dims, origin=0.0) def minmax_to_center_2d(minmax_box): ndim = minmax_box.shape[-1] // 2 center_min = minmax_box[..., :ndim] dims = minmax_box[..., ndim:] - center_min center = center_min + 0.5 * dims return np.concatenate([center, dims], axis=-1) def center_to_minmax_2d_0_5(centers, dims): return np.concatenate([centers - dims / 2, centers + dims / 2], axis=-1) def center_to_minmax_2d(centers, dims, origin=0.5): if origin == 0.5: return center_to_minmax_2d_0_5(centers, dims) corners = center_to_corner_box2d(centers, dims, origin=origin) return corners[:, [0, 2]].reshape([-1, 4]) def limit_period(val, offset=0.5, period=np.pi): return val - np.floor(val / period + offset) * period def projection_matrix_to_CRT_kitti(proj): # P = C @ [R|T] # C is upper triangular matrix, so we need to inverse CR and use QR # stable for all kitti camera projection matrix CR = proj[0:3, 0:3] CT = proj[0:3, 3] RinvCinv = np.linalg.inv(CR) Rinv, Cinv = np.linalg.qr(RinvCinv) C = np.linalg.inv(Cinv) R = np.linalg.inv(Rinv) T = Cinv @ CT return C, R, T def get_frustum(bbox_image, C, near_clip=0.001, far_clip=100): fku = C[0, 0] fkv = -C[1, 1] u0v0 = C[0:2, 2] z_points = np.array([near_clip] * 4 + [far_clip] * 4, dtype=C.dtype)[:, np.newaxis] b = bbox_image box_corners = np.array( [[b[0], b[1]], [b[0], b[3]], [b[2], b[3]], [b[2], b[1]]], dtype=C.dtype ) near_box_corners = (box_corners - u0v0) / np.array( [fku / near_clip, -fkv / near_clip], dtype=C.dtype ) far_box_corners = (box_corners - u0v0) / np.array( [fku / far_clip, -fkv / far_clip], dtype=C.dtype ) ret_xy = np.concatenate([near_box_corners, far_box_corners], axis=0) # [8, 2] ret_xyz = np.concatenate([ret_xy, z_points], axis=1) return ret_xyz def get_frustum_v2(bboxes, C, near_clip=0.001, far_clip=100): fku = C[0, 0] fkv = -C[1, 1] u0v0 = C[0:2, 2] num_box = bboxes.shape[0] z_points = np.array([near_clip] * 4 + [far_clip] * 4, dtype=C.dtype)[ np.newaxis, :, np.newaxis ] z_points = np.tile(z_points, [num_box, 1, 1]) box_corners = minmax_to_corner_2d_v2(bboxes) near_box_corners = (box_corners - u0v0) / np.array( [fku / near_clip, -fkv / near_clip], dtype=C.dtype ) far_box_corners = (box_corners - u0v0) / np.array( [fku / far_clip, -fkv / far_clip], dtype=C.dtype ) ret_xy = np.concatenate([near_box_corners, far_box_corners], axis=1) # [8, 2] ret_xyz = np.concatenate([ret_xy, z_points], axis=-1) return ret_xyz @numba.njit def _add_rgb_to_points_kernel(points_2d, image, points_rgb): num_points = points_2d.shape[0] image_h, image_w = image.shape[:2] for i in range(num_points): img_pos = np.floor(points_2d[i]).astype(np.int32) if img_pos[0] >= 0 and img_pos[0] < image_w: if img_pos[1] >= 0 and img_pos[1] < image_h: points_rgb[i, :] = image[img_pos[1], img_pos[0], :] # image[img_pos[1], img_pos[0]] = 0 def add_rgb_to_points(points, image, rect, Trv2c, P2, mean_size=[5, 5]): kernel = np.ones(mean_size, np.float32) / np.prod(mean_size) # image = cv2.filter2D(image, -1, kernel) points_cam = lidar_to_camera(points[:, :3], rect, Trv2c) points_2d = project_to_image(points_cam, P2) points_rgb = np.zeros([points_cam.shape[0], 3], dtype=points.dtype) _add_rgb_to_points_kernel(points_2d, image, points_rgb) return points_rgb def project_to_image(points_3d, proj_mat): points_shape = list(points_3d.shape) points_shape[-1] = 1 points_4 = np.concatenate([points_3d, np.ones(points_shape)], axis=-1) point_2d = points_4 @ proj_mat.T point_2d_res = point_2d[..., :2] / point_2d[..., 2:3] return point_2d_res def camera_to_lidar(points, r_rect, velo2cam): points_shape = list(points.shape[0:-1]) if points.shape[-1] == 3: points = np.concatenate([points, np.ones(points_shape + [1])], axis=-1) lidar_points = points @ np.linalg.inv((r_rect @ velo2cam).T) return lidar_points[..., :3] def lidar_to_camera(points, r_rect, velo2cam): points_shape = list(points.shape[:-1]) if points.shape[-1] == 3: points = np.concatenate([points, np.ones(points_shape + [1])], axis=-1) camera_points = points @ (r_rect @ velo2cam).T return camera_points[..., :3] def box_camera_to_lidar(data, r_rect, velo2cam): xyz = data[:, 0:3] l, h, w = data[:, 3:4], data[:, 4:5], data[:, 5:6] r = data[:, 6:7] xyz_lidar = camera_to_lidar(xyz, r_rect, velo2cam) return np.concatenate([xyz_lidar, w, l, h, r], axis=1) def box_lidar_to_camera(data, r_rect, velo2cam): xyz_lidar = data[:, 0:3] w, l, h = data[:, 3:4], data[:, 4:5], data[:, 5:6] r = data[:, 6:7] xyz = lidar_to_camera(xyz_lidar, r_rect, velo2cam) return np.concatenate([xyz, l, h, w, r], axis=1) def remove_outside_points(points, rect, Trv2c, P2, image_shape): # 5x faster than remove_outside_points_v1(2ms vs 10ms) C, R, T = projection_matrix_to_CRT_kitti(P2) image_bbox = [0, 0, image_shape[1], image_shape[0]] frustum = get_frustum(image_bbox, C) frustum -= T frustum = np.linalg.inv(R) @ frustum.T frustum = camera_to_lidar(frustum.T, rect, Trv2c) frustum_surfaces = corner_to_surfaces_3d_jit(frustum[np.newaxis, ...]) indices = points_in_convex_polygon_3d_jit(points[:, :3], frustum_surfaces) points = points[indices.reshape([-1])] return points @numba.jit(nopython=True) def iou_jit(boxes, query_boxes, eps=1.0): """calculate box iou. note that jit version runs 2x faster than cython in my machine! Parameters ---------- boxes: (N, 4) ndarray of float query_boxes: (K, 4) ndarray of float Returns ------- overlaps: (N, K) ndarray of overlap between boxes and query_boxes """ N = boxes.shape[0] K = query_boxes.shape[0] overlaps = np.zeros((N, K), dtype=boxes.dtype) for k in range(K): box_area = (query_boxes[k, 2] - query_boxes[k, 0] + eps) * ( query_boxes[k, 3] - query_boxes[k, 1] + eps ) for n in range(N): iw = ( min(boxes[n, 2], query_boxes[k, 2]) - max(boxes[n, 0], query_boxes[k, 0]) + eps ) if iw > 0: ih = ( min(boxes[n, 3], query_boxes[k, 3]) - max(boxes[n, 1], query_boxes[k, 1]) + eps ) if ih > 0: ua = ( (boxes[n, 2] - boxes[n, 0] + eps) * (boxes[n, 3] - boxes[n, 1] + eps) + box_area - iw * ih ) overlaps[n, k] = iw * ih / ua return overlaps @numba.jit(nopython=True) def iou_3d_jit(boxes, query_boxes, add1=True): """calculate box iou3d, ---------- boxes: (N, 6) ndarray of float query_boxes: (K, 6) ndarray of float Returns ------- overlaps: (N, K) ndarray of overlap between boxes and query_boxes """ N = boxes.shape[0] K = query_boxes.shape[0] overlaps = np.zeros((N, K), dtype=boxes.dtype) if add1: add1 = 1.0 else: add1 = 0.0 for k in range(K): box_area = ( (query_boxes[k, 3] - query_boxes[k, 0] + add1) * (query_boxes[k, 4] - query_boxes[k, 1] + add1) * (query_boxes[k, 5] - query_boxes[k, 2] + add1) ) for n in range(N): iw = ( min(boxes[n, 3], query_boxes[k, 3]) - max(boxes[n, 0], query_boxes[k, 0]) + add1 ) if iw > 0: ih = ( min(boxes[n, 4], query_boxes[k, 4]) - max(boxes[n, 1], query_boxes[k, 1]) + add1 ) if ih > 0: il = ( min(boxes[n, 5], query_boxes[k, 5]) - max(boxes[n, 2], query_boxes[k, 2]) + add1 ) if il > 0: ua = float( (boxes[n, 3] - boxes[n, 0] + add1) * (boxes[n, 4] - boxes[n, 1] + add1) * (boxes[n, 5] - boxes[n, 2] + add1) + box_area - iw * ih * il ) overlaps[n, k] = iw * ih * il / ua return overlaps @numba.jit(nopython=True) def iou_nd_jit(boxes, query_boxes, add1=True): """calculate box iou nd, 2x slower than iou_jit. ---------- boxes: (N, ndim * 2) ndarray of float query_boxes: (K, ndim * 2) ndarray of float Returns ------- overlaps: (N, K) ndarray of overlap between boxes and query_boxes """ N = boxes.shape[0] K = query_boxes.shape[0] ndim = boxes.shape[1] // 2 overlaps = np.zeros((N, K), dtype=boxes.dtype) side_lengths = np.zeros((ndim,), dtype=boxes.dtype) if add1: add1 = 1.0 else: add1 = 0.0 invalid = False for k in range(K): qbox_area = query_boxes[k, ndim] - query_boxes[k, 0] + add1 for i in range(1, ndim): qbox_area *= query_boxes[k, ndim + i] - query_boxes[k, i] + add1 for n in range(N): invalid = False for i in range(ndim): side_length = ( min(boxes[n, i + ndim], query_boxes[k, i + ndim]) - max(boxes[n, i], query_boxes[k, i]) + add1 ) if side_length <= 0: invalid = True break side_lengths[i] = side_length if not invalid: box_area = boxes[n, ndim] - boxes[n, 0] + add1 for i in range(1, ndim): box_area *= boxes[n, ndim + i] - boxes[n, i] + add1 inter = side_lengths[0] for i in range(1, ndim): inter *= side_lengths[i] # inter = np.prod(side_lengths) ua = float(box_area + qbox_area - inter) overlaps[n, k] = inter / ua return overlaps def points_in_rbbox(points, rbbox, z_axis=2, origin=(0.5, 0.5, 0.5)): rbbox_corners = center_to_corner_box3d( rbbox[:, :3], rbbox[:, 3:6], rbbox[:, -1], origin=origin, axis=z_axis ) surfaces = corner_to_surfaces_3d(rbbox_corners) indices = points_in_convex_polygon_3d_jit(points[:, :3], surfaces) return indices def corner_to_surfaces_3d(corners): """convert 3d box corners from corner function above to surfaces that normal vectors all direct to internal. Args: corners (float array, [N, 8, 3]): 3d box corners. Returns: surfaces (float array, [N, 6, 4, 3]): """ # box_corners: [N, 8, 3], must from corner functions in this module surfaces = np.array( [ [corners[:, 0], corners[:, 1], corners[:, 2], corners[:, 3]], [corners[:, 7], corners[:, 6], corners[:, 5], corners[:, 4]], [corners[:, 0], corners[:, 3], corners[:, 7], corners[:, 4]], [corners[:, 1], corners[:, 5], corners[:, 6], corners[:, 2]], [corners[:, 0], corners[:, 4], corners[:, 5], corners[:, 1]], [corners[:, 3], corners[:, 2], corners[:, 6], corners[:, 7]], ] ).transpose([2, 0, 1, 3]) return surfaces @numba.jit(nopython=True) def corner_to_surfaces_3d_jit(corners): """convert 3d box corners from corner function above to surfaces that normal vectors all direct to internal. Args: corners (float array, [N, 8, 3]): 3d box corners. Returns: surfaces (float array, [N, 6, 4, 3]): """ # box_corners: [N, 8, 3], must from corner functions in this module num_boxes = corners.shape[0] surfaces = np.zeros((num_boxes, 6, 4, 3), dtype=corners.dtype) corner_idxes = np.array( [0, 1, 2, 3, 7, 6, 5, 4, 0, 3, 7, 4, 1, 5, 6, 2, 0, 4, 5, 1, 3, 2, 6, 7] ).reshape(6, 4) for i in range(num_boxes): for j in range(6): for k in range(4): surfaces[i, j, k] = corners[i, corner_idxes[j, k]] return surfaces def assign_label_to_voxel(gt_boxes, coors, voxel_size, coors_range): """assign a 0/1 label to each voxel based on whether the center of voxel is in gt_box. LIDAR. """ voxel_size = np.array(voxel_size, dtype=gt_boxes.dtype) coors_range = np.array(coors_range, dtype=gt_boxes.dtype) shift = coors_range[:3] voxel_origins = coors[:, ::-1] * voxel_size + shift voxel_centers = voxel_origins + voxel_size * 0.5 gt_box_corners = center_to_corner_box3d( gt_boxes[:, :3] - voxel_size * 0.5, gt_boxes[:, 3:6] + voxel_size, gt_boxes[:, 6], origin=[0.5, 0.5, 0.5], axis=2, ) gt_surfaces = corner_to_surfaces_3d(gt_box_corners) ret = points_in_convex_polygon_3d_jit(voxel_centers, gt_surfaces) return np.any(ret, axis=1).astype(np.int64) def assign_label_to_voxel_v3(gt_boxes, coors, voxel_size, coors_range): """assign a 0/1 label to each voxel based on whether the center of voxel is in gt_box. LIDAR. """ voxel_size = np.array(voxel_size, dtype=gt_boxes.dtype) coors_range = np.array(coors_range, dtype=gt_boxes.dtype) shift = coors_range[:3] voxel_origins = coors[:, ::-1] * voxel_size + shift voxel_maxes = voxel_origins + voxel_size voxel_minmax = np.concatenate([voxel_origins, voxel_maxes], axis=-1) voxel_corners = minmax_to_corner_3d(voxel_minmax) gt_box_corners = center_to_corner_box3d( gt_boxes[:, :3], gt_boxes[:, 3:6], gt_boxes[:, 6], origin=[0.5, 0.5, 0.5], axis=2, ) gt_surfaces = corner_to_surfaces_3d(gt_box_corners) voxel_corners_flat = voxel_corners.reshape([-1, 3]) ret = points_in_convex_polygon_3d_jit(voxel_corners_flat, gt_surfaces) ret = ret.reshape([-1, 8, ret.shape[-1]]) return ret.any(-1).any(-1).astype(np.int64) def image_box_region_area(img_cumsum, bbox): """check a 2d voxel is contained by a box. used to filter empty anchors. Summed-area table algorithm: ==> W ------------------ | | | |------A---------B | | | | | | |----- C---------D Iabcd = ID-IB-IC+IA Args: img_cumsum: [M, H, W](yx) cumsumed image. bbox: [N, 4](xyxy) bounding box, """ N = bbox.shape[0] M = img_cumsum.shape[0] ret = np.zeros([N, M], dtype=img_cumsum.dtype) ID = img_cumsum[:, bbox[:, 3], bbox[:, 2]] IA = img_cumsum[:, bbox[:, 1], bbox[:, 0]] IB = img_cumsum[:, bbox[:, 3], bbox[:, 0]] IC = img_cumsum[:, bbox[:, 1], bbox[:, 2]] ret = ID - IB - IC + IA return ret def get_minimum_bounding_box_bv(points, voxel_size, bound, downsample=8, margin=1.6): x_vsize = voxel_size[0] y_vsize = voxel_size[1] max_x = points[:, 0].max() max_y = points[:, 1].max() min_x = points[:, 0].min() min_y = points[:, 1].min() max_x = np.floor(max_x / (x_vsize * downsample) + 1) * (x_vsize * downsample) max_y = np.floor(max_y / (y_vsize * downsample) + 1) * (y_vsize * downsample) min_x = np.floor(min_x / (x_vsize * downsample)) * (x_vsize * downsample) min_y = np.floor(min_y / (y_vsize * downsample)) * (y_vsize * downsample) max_x = np.minimum(max_x + margin, bound[2]) max_y = np.minimum(max_y + margin, bound[3]) min_x = np.maximum(min_x - margin, bound[0]) min_y = np.maximum(min_y - margin, bound[1]) return np.array([min_x, min_y, max_x, max_y]) def box3d_to_bbox(box3d, rect, Trv2c, P2): box3d_to_cam = box_lidar_to_camera(box3d, rect, Trv2c) box_corners = center_to_corner_box3d( box3d[:, :3], box3d[:, 3:6], box3d[:, 6], [0.5, 1.0, 0.5], axis=1 ) box_corners_in_image = project_to_image(box_corners, P2) # box_corners_in_image: [N, 8, 2] minxy = np.min(box_corners_in_image, axis=1) maxxy = np.max(box_corners_in_image, axis=1) bbox = np.concatenate([minxy, maxxy], axis=1) return bbox def change_box3d_center_(box3d, src, dst): dst = np.array(dst, dtype=box3d.dtype) src = np.array(src, dtype=box3d.dtype) box3d[..., :3] += box3d[..., 3:6] * (dst - src) def encode_parts(relative_shifts): parts = np.zeros((len(relative_shifts),), dtype=np.int32) mask = (relative_shifts[:, 0] >= 0) & (relative_shifts[:, 1] >= 0) parts[mask] = 0 mask = (relative_shifts[:, 0] < 0) & (relative_shifts[:, 1] >= 0) parts[mask] = 1 mask = (relative_shifts[:, 0] < 0) & (relative_shifts[:, 1] < 0) parts[mask] = 2 mask = (relative_shifts[:, 0] >= 0) & (relative_shifts[:, 1] < 0) parts[mask] = 3 return parts
def center_to_corner_box2d(centers, dims, angles=None, origin=0.5): """convert kitti locations, dimensions and angles to corners. format: center(xy), dims(xy), angles(clockwise when positive) Args: centers (float array, shape=[N, 2]): locations in kitti label file. dims (float array, shape=[N, 2]): dimensions in kitti label file. angles (float array, shape=[N]): rotation_y in kitti label file. Returns: [type]: [description] """ # 'length' in kitti format is in x axis. # xyz(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar) # center in kitti format is [0.5, 1.0, 0.5] in xyz. corners = corners_nd(dims, origin=origin) # corners: [N, 4, 2] if angles is not None: corners = rotation_2d(corners, angles) corners += centers.reshape([-1, 1, 2]) return corners
265
285
from pathlib import Path import numba import numpy as np from det3d.core.bbox.geometry import ( points_count_convex_polygon_3d_jit, points_in_convex_polygon_3d_jit, ) try: from spconv.utils import rbbox_intersection, rbbox_iou except: print("Import spconv fail, no support for sparse convolution!") def points_count_rbbox(points, rbbox, z_axis=2, origin=(0.5, 0.5, 0.5)): rbbox_corners = center_to_corner_box3d( rbbox[:, :3], rbbox[:, 3:6], rbbox[:, -1], origin=origin, axis=z_axis ) surfaces = corner_to_surfaces_3d(rbbox_corners) return points_count_convex_polygon_3d_jit(points[:, :3], surfaces) def riou_cc(rbboxes, qrbboxes, standup_thresh=0.0): # less than 50ms when used in second one thread. 10x slower than gpu boxes_corners = center_to_corner_box2d( rbboxes[:, :2], rbboxes[:, 2:4], rbboxes[:, 4] ) boxes_standup = corner_to_standup_nd(boxes_corners) qboxes_corners = center_to_corner_box2d( qrbboxes[:, :2], qrbboxes[:, 2:4], qrbboxes[:, 4] ) qboxes_standup = corner_to_standup_nd(qboxes_corners) # if standup box not overlapped, rbbox not overlapped too. standup_iou = iou_jit(boxes_standup, qboxes_standup, eps=0.0) return rbbox_iou(boxes_corners, qboxes_corners, standup_iou, standup_thresh) def rinter_cc(rbboxes, qrbboxes, standup_thresh=0.0): # less than 50ms when used in second one thread. 10x slower than gpu boxes_corners = center_to_corner_box2d( rbboxes[:, :2], rbboxes[:, 2:4], rbboxes[:, 4] ) boxes_standup = corner_to_standup_nd(boxes_corners) qboxes_corners = center_to_corner_box2d( qrbboxes[:, :2], qrbboxes[:, 2:4], qrbboxes[:, 4] ) qboxes_standup = corner_to_standup_nd(qboxes_corners) # if standup box not overlapped, rbbox not overlapped too. standup_iou = iou_jit(boxes_standup, qboxes_standup, eps=0.0) return rbbox_intersection( boxes_corners, qboxes_corners, standup_iou, standup_thresh ) def corners_nd(dims, origin=0.5): """generate relative box corners based on length per dim and origin point. Args: dims (float array, shape=[N, ndim]): array of length per dim origin (list or array or float): origin point relate to smallest point. Returns: float array, shape=[N, 2 ** ndim, ndim]: returned corners. point layout example: (2d) x0y0, x0y1, x1y0, x1y1; (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1 where x0 < x1, y0 < y1, z0 < z1 """ ndim = int(dims.shape[1]) corners_norm = np.stack( np.unravel_index(np.arange(2 ** ndim), [2] * ndim), axis=1 ).astype(dims.dtype) # now corners_norm has format: (2d) x0y0, x0y1, x1y0, x1y1 # (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1 # so need to convert to a format which is convenient to do other computing. # for 2d boxes, format is clockwise start with minimum point # for 3d boxes, please draw lines by your hand. if ndim == 2: # generate clockwise box corners corners_norm = corners_norm[[0, 1, 3, 2]] elif ndim == 3: corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]] corners_norm = corners_norm - np.array(origin, dtype=dims.dtype) corners = dims.reshape([-1, 1, ndim]) * corners_norm.reshape([1, 2 ** ndim, ndim]) return corners @numba.njit def corners_2d_jit(dims, origin=0.5): ndim = 2 corners_norm = np.array([[0, 0], [0, 1], [1, 1], [1, 0]], dtype=dims.dtype) corners_norm = corners_norm - np.array(origin, dtype=dims.dtype) corners = dims.reshape((-1, 1, ndim)) * corners_norm.reshape((1, 2 ** ndim, ndim)) return corners @numba.njit def corners_3d_jit(dims, origin=0.5): ndim = 3 corners_norm = np.array( [0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1], dtype=dims.dtype, ).reshape((8, 3)) corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]] corners_norm = corners_norm - np.array(origin, dtype=dims.dtype) corners = dims.reshape((-1, 1, ndim)) * corners_norm.reshape((1, 2 ** ndim, ndim)) return corners @numba.njit def corner_to_standup_nd_jit(boxes_corner): num_boxes = boxes_corner.shape[0] ndim = boxes_corner.shape[-1] result = np.zeros((num_boxes, ndim * 2), dtype=boxes_corner.dtype) for i in range(num_boxes): for j in range(ndim): result[i, j] = np.min(boxes_corner[i, :, j]) for j in range(ndim): result[i, j + ndim] = np.max(boxes_corner[i, :, j]) return result def corner_to_standup_nd(boxes_corner): assert len(boxes_corner.shape) == 3 standup_boxes = [] standup_boxes.append(np.min(boxes_corner, axis=1)) standup_boxes.append(np.max(boxes_corner, axis=1)) return np.concatenate(standup_boxes, -1) def rbbox2d_to_near_bbox(rbboxes): """convert rotated bbox to nearest 'standing' or 'lying' bbox. Args: rbboxes: [N, 5(x, y, xdim, ydim, rad)] rotated bboxes Returns: bboxes: [N, 4(xmin, ymin, xmax, ymax)] bboxes """ rots = rbboxes[..., -1] rots_0_pi_div_2 = np.abs(limit_period(rots, 0.5, np.pi)) cond = (rots_0_pi_div_2 > np.pi / 4)[..., np.newaxis] bboxes_center = np.where(cond, rbboxes[:, [0, 1, 3, 2]], rbboxes[:, :4]) bboxes = center_to_minmax_2d(bboxes_center[:, :2], bboxes_center[:, 2:]) return bboxes def rotation_3d_in_axis(points, angles, axis=0): # points: [N, point_size, 3] rot_sin = np.sin(angles) rot_cos = np.cos(angles) ones = np.ones_like(rot_cos) zeros = np.zeros_like(rot_cos) if axis == 1: rot_mat_T = np.stack( [ [rot_cos, zeros, -rot_sin], [zeros, ones, zeros], [rot_sin, zeros, rot_cos], ] ) elif axis == 2 or axis == -1: rot_mat_T = np.stack( [ [rot_cos, -rot_sin, zeros], [rot_sin, rot_cos, zeros], [zeros, zeros, ones], ] ) elif axis == 0: rot_mat_T = np.stack( [ [zeros, rot_cos, -rot_sin], [zeros, rot_sin, rot_cos], [ones, zeros, zeros], ] ) else: raise ValueError("axis should in range") return np.einsum("aij,jka->aik", points, rot_mat_T) def rotation_points_single_angle(points, angle, axis=0): # points: [N, 3] rot_sin = np.sin(angle) rot_cos = np.cos(angle) if axis == 1: rot_mat_T = np.array( [[rot_cos, 0, -rot_sin], [0, 1, 0], [rot_sin, 0, rot_cos]], dtype=points.dtype, ) elif axis == 2 or axis == -1: rot_mat_T = np.array( [[rot_cos, -rot_sin, 0], [rot_sin, rot_cos, 0], [0, 0, 1]], dtype=points.dtype, ) elif axis == 0: rot_mat_T = np.array( [[1, 0, 0], [0, rot_cos, -rot_sin], [0, rot_sin, rot_cos]], dtype=points.dtype, ) else: raise ValueError("axis should in range") return points @ rot_mat_T def rotation_2d(points, angles): """rotation 2d points based on origin point clockwise when angle positive. Args: points (float array, shape=[N, point_size, 2]): points to be rotated. angles (float array, shape=[N]): rotation angle. Returns: float array: same shape as points """ rot_sin = np.sin(angles) rot_cos = np.cos(angles) rot_mat_T = np.stack([[rot_cos, -rot_sin], [rot_sin, rot_cos]]) return np.einsum("aij,jka->aik", points, rot_mat_T) def rotation_box(box_corners, angle): """rotation 2d points based on origin point clockwise when angle positive. Args: points (float array, shape=[N, point_size, 2]): points to be rotated. angle (float): rotation angle. Returns: float array: same shape as points """ rot_sin = np.sin(angle) rot_cos = np.cos(angle) rot_mat_T = np.array( [[rot_cos, -rot_sin], [rot_sin, rot_cos]], dtype=box_corners.dtype ) return box_corners @ rot_mat_T def center_to_corner_box3d(centers, dims, angles=None, origin=(0.5, 0.5, 0.5), axis=2): """convert kitti locations, dimensions and angles to corners Args: centers (float array, shape=[N, 3]): locations in kitti label file. dims (float array, shape=[N, 3]): dimensions in kitti label file. angles (float array, shape=[N]): rotation_y in kitti label file. origin (list or array or float): origin point relate to smallest point. use [0.5, 1.0, 0.5] in camera and [0.5, 0.5, 0] in lidar. axis (int): rotation axis. 1 for camera and 2 for lidar. Returns: [type]: [description] """ # 'length' in kitti format is in x axis. # yzx(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar) # center in kitti format is [0.5, 1.0, 0.5] in xyz. corners = corners_nd(dims, origin=origin) # corners: [N, 8, 3] if angles is not None: corners = rotation_3d_in_axis(corners, angles, axis=axis) corners += centers.reshape([-1, 1, 3]) return corners def center_to_corner_box2d(centers, dims, angles=None, origin=0.5): """convert kitti locations, dimensions and angles to corners. format: center(xy), dims(xy), angles(clockwise when positive) Args: centers (float array, shape=[N, 2]): locations in kitti label file. dims (float array, shape=[N, 2]): dimensions in kitti label file. angles (float array, shape=[N]): rotation_y in kitti label file. Returns: [type]: [description] """ # 'length' in kitti format is in x axis. # xyz(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar) # center in kitti format is [0.5, 1.0, 0.5] in xyz. corners = corners_nd(dims, origin=origin) # corners: [N, 4, 2] if angles is not None: corners = rotation_2d(corners, angles) corners += centers.reshape([-1, 1, 2]) return corners @numba.jit(nopython=True) def box2d_to_corner_jit(boxes): num_box = boxes.shape[0] corners_norm = np.zeros((4, 2), dtype=boxes.dtype) corners_norm[1, 1] = 1.0 corners_norm[2] = 1.0 corners_norm[3, 0] = 1.0 corners_norm -= np.array([0.5, 0.5], dtype=boxes.dtype) corners = boxes.reshape(num_box, 1, 5)[:, :, 2:4] * corners_norm.reshape(1, 4, 2) rot_mat_T = np.zeros((2, 2), dtype=boxes.dtype) box_corners = np.zeros((num_box, 4, 2), dtype=boxes.dtype) for i in range(num_box): rot_sin = np.sin(boxes[i, -1]) rot_cos = np.cos(boxes[i, -1]) rot_mat_T[0, 0] = rot_cos rot_mat_T[0, 1] = -rot_sin rot_mat_T[1, 0] = rot_sin rot_mat_T[1, 1] = rot_cos box_corners[i] = corners[i] @ rot_mat_T + boxes[i, :2] return box_corners def rbbox3d_to_corners(rbboxes, origin=[0.5, 0.5, 0.5], axis=2): return center_to_corner_box3d( rbboxes[..., :3], rbboxes[..., 3:6], rbboxes[..., 6], origin, axis=axis ) def rbbox3d_to_bev_corners(rbboxes, origin=0.5): return center_to_corner_box2d( rbboxes[..., :2], rbboxes[..., 3:5], rbboxes[..., 6], origin ) def minmax_to_corner_2d(minmax_box): ndim = minmax_box.shape[-1] // 2 center = minmax_box[..., :ndim] dims = minmax_box[..., ndim:] - center return center_to_corner_box2d(center, dims, origin=0.0) def minmax_to_corner_2d_v2(minmax_box): # N, 4 -> N 4 2 return minmax_box[..., [0, 1, 0, 3, 2, 3, 2, 1]].reshape(-1, 4, 2) def minmax_to_corner_3d(minmax_box): ndim = minmax_box.shape[-1] // 2 center = minmax_box[..., :ndim] dims = minmax_box[..., ndim:] - center return center_to_corner_box3d(center, dims, origin=0.0) def minmax_to_center_2d(minmax_box): ndim = minmax_box.shape[-1] // 2 center_min = minmax_box[..., :ndim] dims = minmax_box[..., ndim:] - center_min center = center_min + 0.5 * dims return np.concatenate([center, dims], axis=-1) def center_to_minmax_2d_0_5(centers, dims): return np.concatenate([centers - dims / 2, centers + dims / 2], axis=-1) def center_to_minmax_2d(centers, dims, origin=0.5): if origin == 0.5: return center_to_minmax_2d_0_5(centers, dims) corners = center_to_corner_box2d(centers, dims, origin=origin) return corners[:, [0, 2]].reshape([-1, 4]) def limit_period(val, offset=0.5, period=np.pi): return val - np.floor(val / period + offset) * period def projection_matrix_to_CRT_kitti(proj): # P = C @ [R|T] # C is upper triangular matrix, so we need to inverse CR and use QR # stable for all kitti camera projection matrix CR = proj[0:3, 0:3] CT = proj[0:3, 3] RinvCinv = np.linalg.inv(CR) Rinv, Cinv = np.linalg.qr(RinvCinv) C = np.linalg.inv(Cinv) R = np.linalg.inv(Rinv) T = Cinv @ CT return C, R, T def get_frustum(bbox_image, C, near_clip=0.001, far_clip=100): fku = C[0, 0] fkv = -C[1, 1] u0v0 = C[0:2, 2] z_points = np.array([near_clip] * 4 + [far_clip] * 4, dtype=C.dtype)[:, np.newaxis] b = bbox_image box_corners = np.array( [[b[0], b[1]], [b[0], b[3]], [b[2], b[3]], [b[2], b[1]]], dtype=C.dtype ) near_box_corners = (box_corners - u0v0) / np.array( [fku / near_clip, -fkv / near_clip], dtype=C.dtype ) far_box_corners = (box_corners - u0v0) / np.array( [fku / far_clip, -fkv / far_clip], dtype=C.dtype ) ret_xy = np.concatenate([near_box_corners, far_box_corners], axis=0) # [8, 2] ret_xyz = np.concatenate([ret_xy, z_points], axis=1) return ret_xyz def get_frustum_v2(bboxes, C, near_clip=0.001, far_clip=100): fku = C[0, 0] fkv = -C[1, 1] u0v0 = C[0:2, 2] num_box = bboxes.shape[0] z_points = np.array([near_clip] * 4 + [far_clip] * 4, dtype=C.dtype)[ np.newaxis, :, np.newaxis ] z_points = np.tile(z_points, [num_box, 1, 1]) box_corners = minmax_to_corner_2d_v2(bboxes) near_box_corners = (box_corners - u0v0) / np.array( [fku / near_clip, -fkv / near_clip], dtype=C.dtype ) far_box_corners = (box_corners - u0v0) / np.array( [fku / far_clip, -fkv / far_clip], dtype=C.dtype ) ret_xy = np.concatenate([near_box_corners, far_box_corners], axis=1) # [8, 2] ret_xyz = np.concatenate([ret_xy, z_points], axis=-1) return ret_xyz @numba.njit def _add_rgb_to_points_kernel(points_2d, image, points_rgb): num_points = points_2d.shape[0] image_h, image_w = image.shape[:2] for i in range(num_points): img_pos = np.floor(points_2d[i]).astype(np.int32) if img_pos[0] >= 0 and img_pos[0] < image_w: if img_pos[1] >= 0 and img_pos[1] < image_h: points_rgb[i, :] = image[img_pos[1], img_pos[0], :] # image[img_pos[1], img_pos[0]] = 0 def add_rgb_to_points(points, image, rect, Trv2c, P2, mean_size=[5, 5]): kernel = np.ones(mean_size, np.float32) / np.prod(mean_size) # image = cv2.filter2D(image, -1, kernel) points_cam = lidar_to_camera(points[:, :3], rect, Trv2c) points_2d = project_to_image(points_cam, P2) points_rgb = np.zeros([points_cam.shape[0], 3], dtype=points.dtype) _add_rgb_to_points_kernel(points_2d, image, points_rgb) return points_rgb def project_to_image(points_3d, proj_mat): points_shape = list(points_3d.shape) points_shape[-1] = 1 points_4 = np.concatenate([points_3d, np.ones(points_shape)], axis=-1) point_2d = points_4 @ proj_mat.T point_2d_res = point_2d[..., :2] / point_2d[..., 2:3] return point_2d_res def camera_to_lidar(points, r_rect, velo2cam): points_shape = list(points.shape[0:-1]) if points.shape[-1] == 3: points = np.concatenate([points, np.ones(points_shape + [1])], axis=-1) lidar_points = points @ np.linalg.inv((r_rect @ velo2cam).T) return lidar_points[..., :3] def lidar_to_camera(points, r_rect, velo2cam): points_shape = list(points.shape[:-1]) if points.shape[-1] == 3: points = np.concatenate([points, np.ones(points_shape + [1])], axis=-1) camera_points = points @ (r_rect @ velo2cam).T return camera_points[..., :3] def box_camera_to_lidar(data, r_rect, velo2cam): xyz = data[:, 0:3] l, h, w = data[:, 3:4], data[:, 4:5], data[:, 5:6] r = data[:, 6:7] xyz_lidar = camera_to_lidar(xyz, r_rect, velo2cam) return np.concatenate([xyz_lidar, w, l, h, r], axis=1) def box_lidar_to_camera(data, r_rect, velo2cam): xyz_lidar = data[:, 0:3] w, l, h = data[:, 3:4], data[:, 4:5], data[:, 5:6] r = data[:, 6:7] xyz = lidar_to_camera(xyz_lidar, r_rect, velo2cam) return np.concatenate([xyz, l, h, w, r], axis=1) def remove_outside_points(points, rect, Trv2c, P2, image_shape): # 5x faster than remove_outside_points_v1(2ms vs 10ms) C, R, T = projection_matrix_to_CRT_kitti(P2) image_bbox = [0, 0, image_shape[1], image_shape[0]] frustum = get_frustum(image_bbox, C) frustum -= T frustum = np.linalg.inv(R) @ frustum.T frustum = camera_to_lidar(frustum.T, rect, Trv2c) frustum_surfaces = corner_to_surfaces_3d_jit(frustum[np.newaxis, ...]) indices = points_in_convex_polygon_3d_jit(points[:, :3], frustum_surfaces) points = points[indices.reshape([-1])] return points @numba.jit(nopython=True) def iou_jit(boxes, query_boxes, eps=1.0): """calculate box iou. note that jit version runs 2x faster than cython in my machine! Parameters ---------- boxes: (N, 4) ndarray of float query_boxes: (K, 4) ndarray of float Returns ------- overlaps: (N, K) ndarray of overlap between boxes and query_boxes """ N = boxes.shape[0] K = query_boxes.shape[0] overlaps = np.zeros((N, K), dtype=boxes.dtype) for k in range(K): box_area = (query_boxes[k, 2] - query_boxes[k, 0] + eps) * ( query_boxes[k, 3] - query_boxes[k, 1] + eps ) for n in range(N): iw = ( min(boxes[n, 2], query_boxes[k, 2]) - max(boxes[n, 0], query_boxes[k, 0]) + eps ) if iw > 0: ih = ( min(boxes[n, 3], query_boxes[k, 3]) - max(boxes[n, 1], query_boxes[k, 1]) + eps ) if ih > 0: ua = ( (boxes[n, 2] - boxes[n, 0] + eps) * (boxes[n, 3] - boxes[n, 1] + eps) + box_area - iw * ih ) overlaps[n, k] = iw * ih / ua return overlaps @numba.jit(nopython=True) def iou_3d_jit(boxes, query_boxes, add1=True): """calculate box iou3d, ---------- boxes: (N, 6) ndarray of float query_boxes: (K, 6) ndarray of float Returns ------- overlaps: (N, K) ndarray of overlap between boxes and query_boxes """ N = boxes.shape[0] K = query_boxes.shape[0] overlaps = np.zeros((N, K), dtype=boxes.dtype) if add1: add1 = 1.0 else: add1 = 0.0 for k in range(K): box_area = ( (query_boxes[k, 3] - query_boxes[k, 0] + add1) * (query_boxes[k, 4] - query_boxes[k, 1] + add1) * (query_boxes[k, 5] - query_boxes[k, 2] + add1) ) for n in range(N): iw = ( min(boxes[n, 3], query_boxes[k, 3]) - max(boxes[n, 0], query_boxes[k, 0]) + add1 ) if iw > 0: ih = ( min(boxes[n, 4], query_boxes[k, 4]) - max(boxes[n, 1], query_boxes[k, 1]) + add1 ) if ih > 0: il = ( min(boxes[n, 5], query_boxes[k, 5]) - max(boxes[n, 2], query_boxes[k, 2]) + add1 ) if il > 0: ua = float( (boxes[n, 3] - boxes[n, 0] + add1) * (boxes[n, 4] - boxes[n, 1] + add1) * (boxes[n, 5] - boxes[n, 2] + add1) + box_area - iw * ih * il ) overlaps[n, k] = iw * ih * il / ua return overlaps @numba.jit(nopython=True) def iou_nd_jit(boxes, query_boxes, add1=True): """calculate box iou nd, 2x slower than iou_jit. ---------- boxes: (N, ndim * 2) ndarray of float query_boxes: (K, ndim * 2) ndarray of float Returns ------- overlaps: (N, K) ndarray of overlap between boxes and query_boxes """ N = boxes.shape[0] K = query_boxes.shape[0] ndim = boxes.shape[1] // 2 overlaps = np.zeros((N, K), dtype=boxes.dtype) side_lengths = np.zeros((ndim,), dtype=boxes.dtype) if add1: add1 = 1.0 else: add1 = 0.0 invalid = False for k in range(K): qbox_area = query_boxes[k, ndim] - query_boxes[k, 0] + add1 for i in range(1, ndim): qbox_area *= query_boxes[k, ndim + i] - query_boxes[k, i] + add1 for n in range(N): invalid = False for i in range(ndim): side_length = ( min(boxes[n, i + ndim], query_boxes[k, i + ndim]) - max(boxes[n, i], query_boxes[k, i]) + add1 ) if side_length <= 0: invalid = True break side_lengths[i] = side_length if not invalid: box_area = boxes[n, ndim] - boxes[n, 0] + add1 for i in range(1, ndim): box_area *= boxes[n, ndim + i] - boxes[n, i] + add1 inter = side_lengths[0] for i in range(1, ndim): inter *= side_lengths[i] # inter = np.prod(side_lengths) ua = float(box_area + qbox_area - inter) overlaps[n, k] = inter / ua return overlaps def points_in_rbbox(points, rbbox, z_axis=2, origin=(0.5, 0.5, 0.5)): rbbox_corners = center_to_corner_box3d( rbbox[:, :3], rbbox[:, 3:6], rbbox[:, -1], origin=origin, axis=z_axis ) surfaces = corner_to_surfaces_3d(rbbox_corners) indices = points_in_convex_polygon_3d_jit(points[:, :3], surfaces) return indices def corner_to_surfaces_3d(corners): """convert 3d box corners from corner function above to surfaces that normal vectors all direct to internal. Args: corners (float array, [N, 8, 3]): 3d box corners. Returns: surfaces (float array, [N, 6, 4, 3]): """ # box_corners: [N, 8, 3], must from corner functions in this module surfaces = np.array( [ [corners[:, 0], corners[:, 1], corners[:, 2], corners[:, 3]], [corners[:, 7], corners[:, 6], corners[:, 5], corners[:, 4]], [corners[:, 0], corners[:, 3], corners[:, 7], corners[:, 4]], [corners[:, 1], corners[:, 5], corners[:, 6], corners[:, 2]], [corners[:, 0], corners[:, 4], corners[:, 5], corners[:, 1]], [corners[:, 3], corners[:, 2], corners[:, 6], corners[:, 7]], ] ).transpose([2, 0, 1, 3]) return surfaces @numba.jit(nopython=True) def corner_to_surfaces_3d_jit(corners): """convert 3d box corners from corner function above to surfaces that normal vectors all direct to internal. Args: corners (float array, [N, 8, 3]): 3d box corners. Returns: surfaces (float array, [N, 6, 4, 3]): """ # box_corners: [N, 8, 3], must from corner functions in this module num_boxes = corners.shape[0] surfaces = np.zeros((num_boxes, 6, 4, 3), dtype=corners.dtype) corner_idxes = np.array( [0, 1, 2, 3, 7, 6, 5, 4, 0, 3, 7, 4, 1, 5, 6, 2, 0, 4, 5, 1, 3, 2, 6, 7] ).reshape(6, 4) for i in range(num_boxes): for j in range(6): for k in range(4): surfaces[i, j, k] = corners[i, corner_idxes[j, k]] return surfaces def assign_label_to_voxel(gt_boxes, coors, voxel_size, coors_range): """assign a 0/1 label to each voxel based on whether the center of voxel is in gt_box. LIDAR. """ voxel_size = np.array(voxel_size, dtype=gt_boxes.dtype) coors_range = np.array(coors_range, dtype=gt_boxes.dtype) shift = coors_range[:3] voxel_origins = coors[:, ::-1] * voxel_size + shift voxel_centers = voxel_origins + voxel_size * 0.5 gt_box_corners = center_to_corner_box3d( gt_boxes[:, :3] - voxel_size * 0.5, gt_boxes[:, 3:6] + voxel_size, gt_boxes[:, 6], origin=[0.5, 0.5, 0.5], axis=2, ) gt_surfaces = corner_to_surfaces_3d(gt_box_corners) ret = points_in_convex_polygon_3d_jit(voxel_centers, gt_surfaces) return np.any(ret, axis=1).astype(np.int64) def assign_label_to_voxel_v3(gt_boxes, coors, voxel_size, coors_range): """assign a 0/1 label to each voxel based on whether the center of voxel is in gt_box. LIDAR. """ voxel_size = np.array(voxel_size, dtype=gt_boxes.dtype) coors_range = np.array(coors_range, dtype=gt_boxes.dtype) shift = coors_range[:3] voxel_origins = coors[:, ::-1] * voxel_size + shift voxel_maxes = voxel_origins + voxel_size voxel_minmax = np.concatenate([voxel_origins, voxel_maxes], axis=-1) voxel_corners = minmax_to_corner_3d(voxel_minmax) gt_box_corners = center_to_corner_box3d( gt_boxes[:, :3], gt_boxes[:, 3:6], gt_boxes[:, 6], origin=[0.5, 0.5, 0.5], axis=2, ) gt_surfaces = corner_to_surfaces_3d(gt_box_corners) voxel_corners_flat = voxel_corners.reshape([-1, 3]) ret = points_in_convex_polygon_3d_jit(voxel_corners_flat, gt_surfaces) ret = ret.reshape([-1, 8, ret.shape[-1]]) return ret.any(-1).any(-1).astype(np.int64) def image_box_region_area(img_cumsum, bbox): """check a 2d voxel is contained by a box. used to filter empty anchors. Summed-area table algorithm: ==> W ------------------ | | | |------A---------B | | | | | | |----- C---------D Iabcd = ID-IB-IC+IA Args: img_cumsum: [M, H, W](yx) cumsumed image. bbox: [N, 4](xyxy) bounding box, """ N = bbox.shape[0] M = img_cumsum.shape[0] ret = np.zeros([N, M], dtype=img_cumsum.dtype) ID = img_cumsum[:, bbox[:, 3], bbox[:, 2]] IA = img_cumsum[:, bbox[:, 1], bbox[:, 0]] IB = img_cumsum[:, bbox[:, 3], bbox[:, 0]] IC = img_cumsum[:, bbox[:, 1], bbox[:, 2]] ret = ID - IB - IC + IA return ret def get_minimum_bounding_box_bv(points, voxel_size, bound, downsample=8, margin=1.6): x_vsize = voxel_size[0] y_vsize = voxel_size[1] max_x = points[:, 0].max() max_y = points[:, 1].max() min_x = points[:, 0].min() min_y = points[:, 1].min() max_x = np.floor(max_x / (x_vsize * downsample) + 1) * (x_vsize * downsample) max_y = np.floor(max_y / (y_vsize * downsample) + 1) * (y_vsize * downsample) min_x = np.floor(min_x / (x_vsize * downsample)) * (x_vsize * downsample) min_y = np.floor(min_y / (y_vsize * downsample)) * (y_vsize * downsample) max_x = np.minimum(max_x + margin, bound[2]) max_y = np.minimum(max_y + margin, bound[3]) min_x = np.maximum(min_x - margin, bound[0]) min_y = np.maximum(min_y - margin, bound[1]) return np.array([min_x, min_y, max_x, max_y]) def box3d_to_bbox(box3d, rect, Trv2c, P2): box3d_to_cam = box_lidar_to_camera(box3d, rect, Trv2c) box_corners = center_to_corner_box3d( box3d[:, :3], box3d[:, 3:6], box3d[:, 6], [0.5, 1.0, 0.5], axis=1 ) box_corners_in_image = project_to_image(box_corners, P2) # box_corners_in_image: [N, 8, 2] minxy = np.min(box_corners_in_image, axis=1) maxxy = np.max(box_corners_in_image, axis=1) bbox = np.concatenate([minxy, maxxy], axis=1) return bbox def change_box3d_center_(box3d, src, dst): dst = np.array(dst, dtype=box3d.dtype) src = np.array(src, dtype=box3d.dtype) box3d[..., :3] += box3d[..., 3:6] * (dst - src) def encode_parts(relative_shifts): parts = np.zeros((len(relative_shifts),), dtype=np.int32) mask = (relative_shifts[:, 0] >= 0) & (relative_shifts[:, 1] >= 0) parts[mask] = 0 mask = (relative_shifts[:, 0] < 0) & (relative_shifts[:, 1] >= 0) parts[mask] = 1 mask = (relative_shifts[:, 0] < 0) & (relative_shifts[:, 1] < 0) parts[mask] = 2 mask = (relative_shifts[:, 0] >= 0) & (relative_shifts[:, 1] < 0) parts[mask] = 3 return parts
iou_jit
calculate box iou. note that jit version runs 2x faster than cython in my machine! Parameters ---------- boxes: (N, 4) ndarray of float query_boxes: (K, 4) ndarray of float Returns ------- overlaps: (N, K) ndarray of overlap between boxes and query_boxes
from pathlib import Path import numba import numpy as np from det3d.core.bbox.geometry import ( points_count_convex_polygon_3d_jit, points_in_convex_polygon_3d_jit, ) try: from spconv.utils import rbbox_intersection, rbbox_iou except: print("Import spconv fail, no support for sparse convolution!") def points_count_rbbox(points, rbbox, z_axis=2, origin=(0.5, 0.5, 0.5)): rbbox_corners = center_to_corner_box3d( rbbox[:, :3], rbbox[:, 3:6], rbbox[:, -1], origin=origin, axis=z_axis ) surfaces = corner_to_surfaces_3d(rbbox_corners) return points_count_convex_polygon_3d_jit(points[:, :3], surfaces) def riou_cc(rbboxes, qrbboxes, standup_thresh=0.0): # less than 50ms when used in second one thread. 10x slower than gpu boxes_corners = center_to_corner_box2d( rbboxes[:, :2], rbboxes[:, 2:4], rbboxes[:, 4] ) boxes_standup = corner_to_standup_nd(boxes_corners) qboxes_corners = center_to_corner_box2d( qrbboxes[:, :2], qrbboxes[:, 2:4], qrbboxes[:, 4] ) qboxes_standup = corner_to_standup_nd(qboxes_corners) # if standup box not overlapped, rbbox not overlapped too. standup_iou = iou_jit(boxes_standup, qboxes_standup, eps=0.0) return rbbox_iou(boxes_corners, qboxes_corners, standup_iou, standup_thresh) def rinter_cc(rbboxes, qrbboxes, standup_thresh=0.0): # less than 50ms when used in second one thread. 10x slower than gpu boxes_corners = center_to_corner_box2d( rbboxes[:, :2], rbboxes[:, 2:4], rbboxes[:, 4] ) boxes_standup = corner_to_standup_nd(boxes_corners) qboxes_corners = center_to_corner_box2d( qrbboxes[:, :2], qrbboxes[:, 2:4], qrbboxes[:, 4] ) qboxes_standup = corner_to_standup_nd(qboxes_corners) # if standup box not overlapped, rbbox not overlapped too. standup_iou = iou_jit(boxes_standup, qboxes_standup, eps=0.0) return rbbox_intersection( boxes_corners, qboxes_corners, standup_iou, standup_thresh ) def corners_nd(dims, origin=0.5): """generate relative box corners based on length per dim and origin point. Args: dims (float array, shape=[N, ndim]): array of length per dim origin (list or array or float): origin point relate to smallest point. Returns: float array, shape=[N, 2 ** ndim, ndim]: returned corners. point layout example: (2d) x0y0, x0y1, x1y0, x1y1; (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1 where x0 < x1, y0 < y1, z0 < z1 """ ndim = int(dims.shape[1]) corners_norm = np.stack( np.unravel_index(np.arange(2 ** ndim), [2] * ndim), axis=1 ).astype(dims.dtype) # now corners_norm has format: (2d) x0y0, x0y1, x1y0, x1y1 # (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1 # so need to convert to a format which is convenient to do other computing. # for 2d boxes, format is clockwise start with minimum point # for 3d boxes, please draw lines by your hand. if ndim == 2: # generate clockwise box corners corners_norm = corners_norm[[0, 1, 3, 2]] elif ndim == 3: corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]] corners_norm = corners_norm - np.array(origin, dtype=dims.dtype) corners = dims.reshape([-1, 1, ndim]) * corners_norm.reshape([1, 2 ** ndim, ndim]) return corners @numba.njit def corners_2d_jit(dims, origin=0.5): ndim = 2 corners_norm = np.array([[0, 0], [0, 1], [1, 1], [1, 0]], dtype=dims.dtype) corners_norm = corners_norm - np.array(origin, dtype=dims.dtype) corners = dims.reshape((-1, 1, ndim)) * corners_norm.reshape((1, 2 ** ndim, ndim)) return corners @numba.njit def corners_3d_jit(dims, origin=0.5): ndim = 3 corners_norm = np.array( [0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1], dtype=dims.dtype, ).reshape((8, 3)) corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]] corners_norm = corners_norm - np.array(origin, dtype=dims.dtype) corners = dims.reshape((-1, 1, ndim)) * corners_norm.reshape((1, 2 ** ndim, ndim)) return corners @numba.njit def corner_to_standup_nd_jit(boxes_corner): num_boxes = boxes_corner.shape[0] ndim = boxes_corner.shape[-1] result = np.zeros((num_boxes, ndim * 2), dtype=boxes_corner.dtype) for i in range(num_boxes): for j in range(ndim): result[i, j] = np.min(boxes_corner[i, :, j]) for j in range(ndim): result[i, j + ndim] = np.max(boxes_corner[i, :, j]) return result def corner_to_standup_nd(boxes_corner): assert len(boxes_corner.shape) == 3 standup_boxes = [] standup_boxes.append(np.min(boxes_corner, axis=1)) standup_boxes.append(np.max(boxes_corner, axis=1)) return np.concatenate(standup_boxes, -1) def rbbox2d_to_near_bbox(rbboxes): """convert rotated bbox to nearest 'standing' or 'lying' bbox. Args: rbboxes: [N, 5(x, y, xdim, ydim, rad)] rotated bboxes Returns: bboxes: [N, 4(xmin, ymin, xmax, ymax)] bboxes """ rots = rbboxes[..., -1] rots_0_pi_div_2 = np.abs(limit_period(rots, 0.5, np.pi)) cond = (rots_0_pi_div_2 > np.pi / 4)[..., np.newaxis] bboxes_center = np.where(cond, rbboxes[:, [0, 1, 3, 2]], rbboxes[:, :4]) bboxes = center_to_minmax_2d(bboxes_center[:, :2], bboxes_center[:, 2:]) return bboxes def rotation_3d_in_axis(points, angles, axis=0): # points: [N, point_size, 3] rot_sin = np.sin(angles) rot_cos = np.cos(angles) ones = np.ones_like(rot_cos) zeros = np.zeros_like(rot_cos) if axis == 1: rot_mat_T = np.stack( [ [rot_cos, zeros, -rot_sin], [zeros, ones, zeros], [rot_sin, zeros, rot_cos], ] ) elif axis == 2 or axis == -1: rot_mat_T = np.stack( [ [rot_cos, -rot_sin, zeros], [rot_sin, rot_cos, zeros], [zeros, zeros, ones], ] ) elif axis == 0: rot_mat_T = np.stack( [ [zeros, rot_cos, -rot_sin], [zeros, rot_sin, rot_cos], [ones, zeros, zeros], ] ) else: raise ValueError("axis should in range") return np.einsum("aij,jka->aik", points, rot_mat_T) def rotation_points_single_angle(points, angle, axis=0): # points: [N, 3] rot_sin = np.sin(angle) rot_cos = np.cos(angle) if axis == 1: rot_mat_T = np.array( [[rot_cos, 0, -rot_sin], [0, 1, 0], [rot_sin, 0, rot_cos]], dtype=points.dtype, ) elif axis == 2 or axis == -1: rot_mat_T = np.array( [[rot_cos, -rot_sin, 0], [rot_sin, rot_cos, 0], [0, 0, 1]], dtype=points.dtype, ) elif axis == 0: rot_mat_T = np.array( [[1, 0, 0], [0, rot_cos, -rot_sin], [0, rot_sin, rot_cos]], dtype=points.dtype, ) else: raise ValueError("axis should in range") return points @ rot_mat_T def rotation_2d(points, angles): """rotation 2d points based on origin point clockwise when angle positive. Args: points (float array, shape=[N, point_size, 2]): points to be rotated. angles (float array, shape=[N]): rotation angle. Returns: float array: same shape as points """ rot_sin = np.sin(angles) rot_cos = np.cos(angles) rot_mat_T = np.stack([[rot_cos, -rot_sin], [rot_sin, rot_cos]]) return np.einsum("aij,jka->aik", points, rot_mat_T) def rotation_box(box_corners, angle): """rotation 2d points based on origin point clockwise when angle positive. Args: points (float array, shape=[N, point_size, 2]): points to be rotated. angle (float): rotation angle. Returns: float array: same shape as points """ rot_sin = np.sin(angle) rot_cos = np.cos(angle) rot_mat_T = np.array( [[rot_cos, -rot_sin], [rot_sin, rot_cos]], dtype=box_corners.dtype ) return box_corners @ rot_mat_T def center_to_corner_box3d(centers, dims, angles=None, origin=(0.5, 0.5, 0.5), axis=2): """convert kitti locations, dimensions and angles to corners Args: centers (float array, shape=[N, 3]): locations in kitti label file. dims (float array, shape=[N, 3]): dimensions in kitti label file. angles (float array, shape=[N]): rotation_y in kitti label file. origin (list or array or float): origin point relate to smallest point. use [0.5, 1.0, 0.5] in camera and [0.5, 0.5, 0] in lidar. axis (int): rotation axis. 1 for camera and 2 for lidar. Returns: [type]: [description] """ # 'length' in kitti format is in x axis. # yzx(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar) # center in kitti format is [0.5, 1.0, 0.5] in xyz. corners = corners_nd(dims, origin=origin) # corners: [N, 8, 3] if angles is not None: corners = rotation_3d_in_axis(corners, angles, axis=axis) corners += centers.reshape([-1, 1, 3]) return corners def center_to_corner_box2d(centers, dims, angles=None, origin=0.5): """convert kitti locations, dimensions and angles to corners. format: center(xy), dims(xy), angles(clockwise when positive) Args: centers (float array, shape=[N, 2]): locations in kitti label file. dims (float array, shape=[N, 2]): dimensions in kitti label file. angles (float array, shape=[N]): rotation_y in kitti label file. Returns: [type]: [description] """ # 'length' in kitti format is in x axis. # xyz(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar) # center in kitti format is [0.5, 1.0, 0.5] in xyz. corners = corners_nd(dims, origin=origin) # corners: [N, 4, 2] if angles is not None: corners = rotation_2d(corners, angles) corners += centers.reshape([-1, 1, 2]) return corners @numba.jit(nopython=True) def box2d_to_corner_jit(boxes): num_box = boxes.shape[0] corners_norm = np.zeros((4, 2), dtype=boxes.dtype) corners_norm[1, 1] = 1.0 corners_norm[2] = 1.0 corners_norm[3, 0] = 1.0 corners_norm -= np.array([0.5, 0.5], dtype=boxes.dtype) corners = boxes.reshape(num_box, 1, 5)[:, :, 2:4] * corners_norm.reshape(1, 4, 2) rot_mat_T = np.zeros((2, 2), dtype=boxes.dtype) box_corners = np.zeros((num_box, 4, 2), dtype=boxes.dtype) for i in range(num_box): rot_sin = np.sin(boxes[i, -1]) rot_cos = np.cos(boxes[i, -1]) rot_mat_T[0, 0] = rot_cos rot_mat_T[0, 1] = -rot_sin rot_mat_T[1, 0] = rot_sin rot_mat_T[1, 1] = rot_cos box_corners[i] = corners[i] @ rot_mat_T + boxes[i, :2] return box_corners def rbbox3d_to_corners(rbboxes, origin=[0.5, 0.5, 0.5], axis=2): return center_to_corner_box3d( rbboxes[..., :3], rbboxes[..., 3:6], rbboxes[..., 6], origin, axis=axis ) def rbbox3d_to_bev_corners(rbboxes, origin=0.5): return center_to_corner_box2d( rbboxes[..., :2], rbboxes[..., 3:5], rbboxes[..., 6], origin ) def minmax_to_corner_2d(minmax_box): ndim = minmax_box.shape[-1] // 2 center = minmax_box[..., :ndim] dims = minmax_box[..., ndim:] - center return center_to_corner_box2d(center, dims, origin=0.0) def minmax_to_corner_2d_v2(minmax_box): # N, 4 -> N 4 2 return minmax_box[..., [0, 1, 0, 3, 2, 3, 2, 1]].reshape(-1, 4, 2) def minmax_to_corner_3d(minmax_box): ndim = minmax_box.shape[-1] // 2 center = minmax_box[..., :ndim] dims = minmax_box[..., ndim:] - center return center_to_corner_box3d(center, dims, origin=0.0) def minmax_to_center_2d(minmax_box): ndim = minmax_box.shape[-1] // 2 center_min = minmax_box[..., :ndim] dims = minmax_box[..., ndim:] - center_min center = center_min + 0.5 * dims return np.concatenate([center, dims], axis=-1) def center_to_minmax_2d_0_5(centers, dims): return np.concatenate([centers - dims / 2, centers + dims / 2], axis=-1) def center_to_minmax_2d(centers, dims, origin=0.5): if origin == 0.5: return center_to_minmax_2d_0_5(centers, dims) corners = center_to_corner_box2d(centers, dims, origin=origin) return corners[:, [0, 2]].reshape([-1, 4]) def limit_period(val, offset=0.5, period=np.pi): return val - np.floor(val / period + offset) * period def projection_matrix_to_CRT_kitti(proj): # P = C @ [R|T] # C is upper triangular matrix, so we need to inverse CR and use QR # stable for all kitti camera projection matrix CR = proj[0:3, 0:3] CT = proj[0:3, 3] RinvCinv = np.linalg.inv(CR) Rinv, Cinv = np.linalg.qr(RinvCinv) C = np.linalg.inv(Cinv) R = np.linalg.inv(Rinv) T = Cinv @ CT return C, R, T def get_frustum(bbox_image, C, near_clip=0.001, far_clip=100): fku = C[0, 0] fkv = -C[1, 1] u0v0 = C[0:2, 2] z_points = np.array([near_clip] * 4 + [far_clip] * 4, dtype=C.dtype)[:, np.newaxis] b = bbox_image box_corners = np.array( [[b[0], b[1]], [b[0], b[3]], [b[2], b[3]], [b[2], b[1]]], dtype=C.dtype ) near_box_corners = (box_corners - u0v0) / np.array( [fku / near_clip, -fkv / near_clip], dtype=C.dtype ) far_box_corners = (box_corners - u0v0) / np.array( [fku / far_clip, -fkv / far_clip], dtype=C.dtype ) ret_xy = np.concatenate([near_box_corners, far_box_corners], axis=0) # [8, 2] ret_xyz = np.concatenate([ret_xy, z_points], axis=1) return ret_xyz def get_frustum_v2(bboxes, C, near_clip=0.001, far_clip=100): fku = C[0, 0] fkv = -C[1, 1] u0v0 = C[0:2, 2] num_box = bboxes.shape[0] z_points = np.array([near_clip] * 4 + [far_clip] * 4, dtype=C.dtype)[ np.newaxis, :, np.newaxis ] z_points = np.tile(z_points, [num_box, 1, 1]) box_corners = minmax_to_corner_2d_v2(bboxes) near_box_corners = (box_corners - u0v0) / np.array( [fku / near_clip, -fkv / near_clip], dtype=C.dtype ) far_box_corners = (box_corners - u0v0) / np.array( [fku / far_clip, -fkv / far_clip], dtype=C.dtype ) ret_xy = np.concatenate([near_box_corners, far_box_corners], axis=1) # [8, 2] ret_xyz = np.concatenate([ret_xy, z_points], axis=-1) return ret_xyz @numba.njit def _add_rgb_to_points_kernel(points_2d, image, points_rgb): num_points = points_2d.shape[0] image_h, image_w = image.shape[:2] for i in range(num_points): img_pos = np.floor(points_2d[i]).astype(np.int32) if img_pos[0] >= 0 and img_pos[0] < image_w: if img_pos[1] >= 0 and img_pos[1] < image_h: points_rgb[i, :] = image[img_pos[1], img_pos[0], :] # image[img_pos[1], img_pos[0]] = 0 def add_rgb_to_points(points, image, rect, Trv2c, P2, mean_size=[5, 5]): kernel = np.ones(mean_size, np.float32) / np.prod(mean_size) # image = cv2.filter2D(image, -1, kernel) points_cam = lidar_to_camera(points[:, :3], rect, Trv2c) points_2d = project_to_image(points_cam, P2) points_rgb = np.zeros([points_cam.shape[0], 3], dtype=points.dtype) _add_rgb_to_points_kernel(points_2d, image, points_rgb) return points_rgb def project_to_image(points_3d, proj_mat): points_shape = list(points_3d.shape) points_shape[-1] = 1 points_4 = np.concatenate([points_3d, np.ones(points_shape)], axis=-1) point_2d = points_4 @ proj_mat.T point_2d_res = point_2d[..., :2] / point_2d[..., 2:3] return point_2d_res def camera_to_lidar(points, r_rect, velo2cam): points_shape = list(points.shape[0:-1]) if points.shape[-1] == 3: points = np.concatenate([points, np.ones(points_shape + [1])], axis=-1) lidar_points = points @ np.linalg.inv((r_rect @ velo2cam).T) return lidar_points[..., :3] def lidar_to_camera(points, r_rect, velo2cam): points_shape = list(points.shape[:-1]) if points.shape[-1] == 3: points = np.concatenate([points, np.ones(points_shape + [1])], axis=-1) camera_points = points @ (r_rect @ velo2cam).T return camera_points[..., :3] def box_camera_to_lidar(data, r_rect, velo2cam): xyz = data[:, 0:3] l, h, w = data[:, 3:4], data[:, 4:5], data[:, 5:6] r = data[:, 6:7] xyz_lidar = camera_to_lidar(xyz, r_rect, velo2cam) return np.concatenate([xyz_lidar, w, l, h, r], axis=1) def box_lidar_to_camera(data, r_rect, velo2cam): xyz_lidar = data[:, 0:3] w, l, h = data[:, 3:4], data[:, 4:5], data[:, 5:6] r = data[:, 6:7] xyz = lidar_to_camera(xyz_lidar, r_rect, velo2cam) return np.concatenate([xyz, l, h, w, r], axis=1) def remove_outside_points(points, rect, Trv2c, P2, image_shape): # 5x faster than remove_outside_points_v1(2ms vs 10ms) C, R, T = projection_matrix_to_CRT_kitti(P2) image_bbox = [0, 0, image_shape[1], image_shape[0]] frustum = get_frustum(image_bbox, C) frustum -= T frustum = np.linalg.inv(R) @ frustum.T frustum = camera_to_lidar(frustum.T, rect, Trv2c) frustum_surfaces = corner_to_surfaces_3d_jit(frustum[np.newaxis, ...]) indices = points_in_convex_polygon_3d_jit(points[:, :3], frustum_surfaces) points = points[indices.reshape([-1])] return points # MASKED: iou_jit function (lines 496-535) @numba.jit(nopython=True) def iou_3d_jit(boxes, query_boxes, add1=True): """calculate box iou3d, ---------- boxes: (N, 6) ndarray of float query_boxes: (K, 6) ndarray of float Returns ------- overlaps: (N, K) ndarray of overlap between boxes and query_boxes """ N = boxes.shape[0] K = query_boxes.shape[0] overlaps = np.zeros((N, K), dtype=boxes.dtype) if add1: add1 = 1.0 else: add1 = 0.0 for k in range(K): box_area = ( (query_boxes[k, 3] - query_boxes[k, 0] + add1) * (query_boxes[k, 4] - query_boxes[k, 1] + add1) * (query_boxes[k, 5] - query_boxes[k, 2] + add1) ) for n in range(N): iw = ( min(boxes[n, 3], query_boxes[k, 3]) - max(boxes[n, 0], query_boxes[k, 0]) + add1 ) if iw > 0: ih = ( min(boxes[n, 4], query_boxes[k, 4]) - max(boxes[n, 1], query_boxes[k, 1]) + add1 ) if ih > 0: il = ( min(boxes[n, 5], query_boxes[k, 5]) - max(boxes[n, 2], query_boxes[k, 2]) + add1 ) if il > 0: ua = float( (boxes[n, 3] - boxes[n, 0] + add1) * (boxes[n, 4] - boxes[n, 1] + add1) * (boxes[n, 5] - boxes[n, 2] + add1) + box_area - iw * ih * il ) overlaps[n, k] = iw * ih * il / ua return overlaps @numba.jit(nopython=True) def iou_nd_jit(boxes, query_boxes, add1=True): """calculate box iou nd, 2x slower than iou_jit. ---------- boxes: (N, ndim * 2) ndarray of float query_boxes: (K, ndim * 2) ndarray of float Returns ------- overlaps: (N, K) ndarray of overlap between boxes and query_boxes """ N = boxes.shape[0] K = query_boxes.shape[0] ndim = boxes.shape[1] // 2 overlaps = np.zeros((N, K), dtype=boxes.dtype) side_lengths = np.zeros((ndim,), dtype=boxes.dtype) if add1: add1 = 1.0 else: add1 = 0.0 invalid = False for k in range(K): qbox_area = query_boxes[k, ndim] - query_boxes[k, 0] + add1 for i in range(1, ndim): qbox_area *= query_boxes[k, ndim + i] - query_boxes[k, i] + add1 for n in range(N): invalid = False for i in range(ndim): side_length = ( min(boxes[n, i + ndim], query_boxes[k, i + ndim]) - max(boxes[n, i], query_boxes[k, i]) + add1 ) if side_length <= 0: invalid = True break side_lengths[i] = side_length if not invalid: box_area = boxes[n, ndim] - boxes[n, 0] + add1 for i in range(1, ndim): box_area *= boxes[n, ndim + i] - boxes[n, i] + add1 inter = side_lengths[0] for i in range(1, ndim): inter *= side_lengths[i] # inter = np.prod(side_lengths) ua = float(box_area + qbox_area - inter) overlaps[n, k] = inter / ua return overlaps def points_in_rbbox(points, rbbox, z_axis=2, origin=(0.5, 0.5, 0.5)): rbbox_corners = center_to_corner_box3d( rbbox[:, :3], rbbox[:, 3:6], rbbox[:, -1], origin=origin, axis=z_axis ) surfaces = corner_to_surfaces_3d(rbbox_corners) indices = points_in_convex_polygon_3d_jit(points[:, :3], surfaces) return indices def corner_to_surfaces_3d(corners): """convert 3d box corners from corner function above to surfaces that normal vectors all direct to internal. Args: corners (float array, [N, 8, 3]): 3d box corners. Returns: surfaces (float array, [N, 6, 4, 3]): """ # box_corners: [N, 8, 3], must from corner functions in this module surfaces = np.array( [ [corners[:, 0], corners[:, 1], corners[:, 2], corners[:, 3]], [corners[:, 7], corners[:, 6], corners[:, 5], corners[:, 4]], [corners[:, 0], corners[:, 3], corners[:, 7], corners[:, 4]], [corners[:, 1], corners[:, 5], corners[:, 6], corners[:, 2]], [corners[:, 0], corners[:, 4], corners[:, 5], corners[:, 1]], [corners[:, 3], corners[:, 2], corners[:, 6], corners[:, 7]], ] ).transpose([2, 0, 1, 3]) return surfaces @numba.jit(nopython=True) def corner_to_surfaces_3d_jit(corners): """convert 3d box corners from corner function above to surfaces that normal vectors all direct to internal. Args: corners (float array, [N, 8, 3]): 3d box corners. Returns: surfaces (float array, [N, 6, 4, 3]): """ # box_corners: [N, 8, 3], must from corner functions in this module num_boxes = corners.shape[0] surfaces = np.zeros((num_boxes, 6, 4, 3), dtype=corners.dtype) corner_idxes = np.array( [0, 1, 2, 3, 7, 6, 5, 4, 0, 3, 7, 4, 1, 5, 6, 2, 0, 4, 5, 1, 3, 2, 6, 7] ).reshape(6, 4) for i in range(num_boxes): for j in range(6): for k in range(4): surfaces[i, j, k] = corners[i, corner_idxes[j, k]] return surfaces def assign_label_to_voxel(gt_boxes, coors, voxel_size, coors_range): """assign a 0/1 label to each voxel based on whether the center of voxel is in gt_box. LIDAR. """ voxel_size = np.array(voxel_size, dtype=gt_boxes.dtype) coors_range = np.array(coors_range, dtype=gt_boxes.dtype) shift = coors_range[:3] voxel_origins = coors[:, ::-1] * voxel_size + shift voxel_centers = voxel_origins + voxel_size * 0.5 gt_box_corners = center_to_corner_box3d( gt_boxes[:, :3] - voxel_size * 0.5, gt_boxes[:, 3:6] + voxel_size, gt_boxes[:, 6], origin=[0.5, 0.5, 0.5], axis=2, ) gt_surfaces = corner_to_surfaces_3d(gt_box_corners) ret = points_in_convex_polygon_3d_jit(voxel_centers, gt_surfaces) return np.any(ret, axis=1).astype(np.int64) def assign_label_to_voxel_v3(gt_boxes, coors, voxel_size, coors_range): """assign a 0/1 label to each voxel based on whether the center of voxel is in gt_box. LIDAR. """ voxel_size = np.array(voxel_size, dtype=gt_boxes.dtype) coors_range = np.array(coors_range, dtype=gt_boxes.dtype) shift = coors_range[:3] voxel_origins = coors[:, ::-1] * voxel_size + shift voxel_maxes = voxel_origins + voxel_size voxel_minmax = np.concatenate([voxel_origins, voxel_maxes], axis=-1) voxel_corners = minmax_to_corner_3d(voxel_minmax) gt_box_corners = center_to_corner_box3d( gt_boxes[:, :3], gt_boxes[:, 3:6], gt_boxes[:, 6], origin=[0.5, 0.5, 0.5], axis=2, ) gt_surfaces = corner_to_surfaces_3d(gt_box_corners) voxel_corners_flat = voxel_corners.reshape([-1, 3]) ret = points_in_convex_polygon_3d_jit(voxel_corners_flat, gt_surfaces) ret = ret.reshape([-1, 8, ret.shape[-1]]) return ret.any(-1).any(-1).astype(np.int64) def image_box_region_area(img_cumsum, bbox): """check a 2d voxel is contained by a box. used to filter empty anchors. Summed-area table algorithm: ==> W ------------------ | | | |------A---------B | | | | | | |----- C---------D Iabcd = ID-IB-IC+IA Args: img_cumsum: [M, H, W](yx) cumsumed image. bbox: [N, 4](xyxy) bounding box, """ N = bbox.shape[0] M = img_cumsum.shape[0] ret = np.zeros([N, M], dtype=img_cumsum.dtype) ID = img_cumsum[:, bbox[:, 3], bbox[:, 2]] IA = img_cumsum[:, bbox[:, 1], bbox[:, 0]] IB = img_cumsum[:, bbox[:, 3], bbox[:, 0]] IC = img_cumsum[:, bbox[:, 1], bbox[:, 2]] ret = ID - IB - IC + IA return ret def get_minimum_bounding_box_bv(points, voxel_size, bound, downsample=8, margin=1.6): x_vsize = voxel_size[0] y_vsize = voxel_size[1] max_x = points[:, 0].max() max_y = points[:, 1].max() min_x = points[:, 0].min() min_y = points[:, 1].min() max_x = np.floor(max_x / (x_vsize * downsample) + 1) * (x_vsize * downsample) max_y = np.floor(max_y / (y_vsize * downsample) + 1) * (y_vsize * downsample) min_x = np.floor(min_x / (x_vsize * downsample)) * (x_vsize * downsample) min_y = np.floor(min_y / (y_vsize * downsample)) * (y_vsize * downsample) max_x = np.minimum(max_x + margin, bound[2]) max_y = np.minimum(max_y + margin, bound[3]) min_x = np.maximum(min_x - margin, bound[0]) min_y = np.maximum(min_y - margin, bound[1]) return np.array([min_x, min_y, max_x, max_y]) def box3d_to_bbox(box3d, rect, Trv2c, P2): box3d_to_cam = box_lidar_to_camera(box3d, rect, Trv2c) box_corners = center_to_corner_box3d( box3d[:, :3], box3d[:, 3:6], box3d[:, 6], [0.5, 1.0, 0.5], axis=1 ) box_corners_in_image = project_to_image(box_corners, P2) # box_corners_in_image: [N, 8, 2] minxy = np.min(box_corners_in_image, axis=1) maxxy = np.max(box_corners_in_image, axis=1) bbox = np.concatenate([minxy, maxxy], axis=1) return bbox def change_box3d_center_(box3d, src, dst): dst = np.array(dst, dtype=box3d.dtype) src = np.array(src, dtype=box3d.dtype) box3d[..., :3] += box3d[..., 3:6] * (dst - src) def encode_parts(relative_shifts): parts = np.zeros((len(relative_shifts),), dtype=np.int32) mask = (relative_shifts[:, 0] >= 0) & (relative_shifts[:, 1] >= 0) parts[mask] = 0 mask = (relative_shifts[:, 0] < 0) & (relative_shifts[:, 1] >= 0) parts[mask] = 1 mask = (relative_shifts[:, 0] < 0) & (relative_shifts[:, 1] < 0) parts[mask] = 2 mask = (relative_shifts[:, 0] >= 0) & (relative_shifts[:, 1] < 0) parts[mask] = 3 return parts
@numba.jit(nopython=True) def iou_jit(boxes, query_boxes, eps=1.0): """calculate box iou. note that jit version runs 2x faster than cython in my machine! Parameters ---------- boxes: (N, 4) ndarray of float query_boxes: (K, 4) ndarray of float Returns ------- overlaps: (N, K) ndarray of overlap between boxes and query_boxes """ N = boxes.shape[0] K = query_boxes.shape[0] overlaps = np.zeros((N, K), dtype=boxes.dtype) for k in range(K): box_area = (query_boxes[k, 2] - query_boxes[k, 0] + eps) * ( query_boxes[k, 3] - query_boxes[k, 1] + eps ) for n in range(N): iw = ( min(boxes[n, 2], query_boxes[k, 2]) - max(boxes[n, 0], query_boxes[k, 0]) + eps ) if iw > 0: ih = ( min(boxes[n, 3], query_boxes[k, 3]) - max(boxes[n, 1], query_boxes[k, 1]) + eps ) if ih > 0: ua = ( (boxes[n, 2] - boxes[n, 0] + eps) * (boxes[n, 3] - boxes[n, 1] + eps) + box_area - iw * ih ) overlaps[n, k] = iw * ih / ua return overlaps
496
535
from pathlib import Path import numba import numpy as np from det3d.core.bbox.geometry import ( points_count_convex_polygon_3d_jit, points_in_convex_polygon_3d_jit, ) try: from spconv.utils import rbbox_intersection, rbbox_iou except: print("Import spconv fail, no support for sparse convolution!") def points_count_rbbox(points, rbbox, z_axis=2, origin=(0.5, 0.5, 0.5)): rbbox_corners = center_to_corner_box3d( rbbox[:, :3], rbbox[:, 3:6], rbbox[:, -1], origin=origin, axis=z_axis ) surfaces = corner_to_surfaces_3d(rbbox_corners) return points_count_convex_polygon_3d_jit(points[:, :3], surfaces) def riou_cc(rbboxes, qrbboxes, standup_thresh=0.0): # less than 50ms when used in second one thread. 10x slower than gpu boxes_corners = center_to_corner_box2d( rbboxes[:, :2], rbboxes[:, 2:4], rbboxes[:, 4] ) boxes_standup = corner_to_standup_nd(boxes_corners) qboxes_corners = center_to_corner_box2d( qrbboxes[:, :2], qrbboxes[:, 2:4], qrbboxes[:, 4] ) qboxes_standup = corner_to_standup_nd(qboxes_corners) # if standup box not overlapped, rbbox not overlapped too. standup_iou = iou_jit(boxes_standup, qboxes_standup, eps=0.0) return rbbox_iou(boxes_corners, qboxes_corners, standup_iou, standup_thresh) def rinter_cc(rbboxes, qrbboxes, standup_thresh=0.0): # less than 50ms when used in second one thread. 10x slower than gpu boxes_corners = center_to_corner_box2d( rbboxes[:, :2], rbboxes[:, 2:4], rbboxes[:, 4] ) boxes_standup = corner_to_standup_nd(boxes_corners) qboxes_corners = center_to_corner_box2d( qrbboxes[:, :2], qrbboxes[:, 2:4], qrbboxes[:, 4] ) qboxes_standup = corner_to_standup_nd(qboxes_corners) # if standup box not overlapped, rbbox not overlapped too. standup_iou = iou_jit(boxes_standup, qboxes_standup, eps=0.0) return rbbox_intersection( boxes_corners, qboxes_corners, standup_iou, standup_thresh ) def corners_nd(dims, origin=0.5): """generate relative box corners based on length per dim and origin point. Args: dims (float array, shape=[N, ndim]): array of length per dim origin (list or array or float): origin point relate to smallest point. Returns: float array, shape=[N, 2 ** ndim, ndim]: returned corners. point layout example: (2d) x0y0, x0y1, x1y0, x1y1; (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1 where x0 < x1, y0 < y1, z0 < z1 """ ndim = int(dims.shape[1]) corners_norm = np.stack( np.unravel_index(np.arange(2 ** ndim), [2] * ndim), axis=1 ).astype(dims.dtype) # now corners_norm has format: (2d) x0y0, x0y1, x1y0, x1y1 # (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1 # so need to convert to a format which is convenient to do other computing. # for 2d boxes, format is clockwise start with minimum point # for 3d boxes, please draw lines by your hand. if ndim == 2: # generate clockwise box corners corners_norm = corners_norm[[0, 1, 3, 2]] elif ndim == 3: corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]] corners_norm = corners_norm - np.array(origin, dtype=dims.dtype) corners = dims.reshape([-1, 1, ndim]) * corners_norm.reshape([1, 2 ** ndim, ndim]) return corners @numba.njit def corners_2d_jit(dims, origin=0.5): ndim = 2 corners_norm = np.array([[0, 0], [0, 1], [1, 1], [1, 0]], dtype=dims.dtype) corners_norm = corners_norm - np.array(origin, dtype=dims.dtype) corners = dims.reshape((-1, 1, ndim)) * corners_norm.reshape((1, 2 ** ndim, ndim)) return corners @numba.njit def corners_3d_jit(dims, origin=0.5): ndim = 3 corners_norm = np.array( [0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1], dtype=dims.dtype, ).reshape((8, 3)) corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]] corners_norm = corners_norm - np.array(origin, dtype=dims.dtype) corners = dims.reshape((-1, 1, ndim)) * corners_norm.reshape((1, 2 ** ndim, ndim)) return corners @numba.njit def corner_to_standup_nd_jit(boxes_corner): num_boxes = boxes_corner.shape[0] ndim = boxes_corner.shape[-1] result = np.zeros((num_boxes, ndim * 2), dtype=boxes_corner.dtype) for i in range(num_boxes): for j in range(ndim): result[i, j] = np.min(boxes_corner[i, :, j]) for j in range(ndim): result[i, j + ndim] = np.max(boxes_corner[i, :, j]) return result def corner_to_standup_nd(boxes_corner): assert len(boxes_corner.shape) == 3 standup_boxes = [] standup_boxes.append(np.min(boxes_corner, axis=1)) standup_boxes.append(np.max(boxes_corner, axis=1)) return np.concatenate(standup_boxes, -1) def rbbox2d_to_near_bbox(rbboxes): """convert rotated bbox to nearest 'standing' or 'lying' bbox. Args: rbboxes: [N, 5(x, y, xdim, ydim, rad)] rotated bboxes Returns: bboxes: [N, 4(xmin, ymin, xmax, ymax)] bboxes """ rots = rbboxes[..., -1] rots_0_pi_div_2 = np.abs(limit_period(rots, 0.5, np.pi)) cond = (rots_0_pi_div_2 > np.pi / 4)[..., np.newaxis] bboxes_center = np.where(cond, rbboxes[:, [0, 1, 3, 2]], rbboxes[:, :4]) bboxes = center_to_minmax_2d(bboxes_center[:, :2], bboxes_center[:, 2:]) return bboxes def rotation_3d_in_axis(points, angles, axis=0): # points: [N, point_size, 3] rot_sin = np.sin(angles) rot_cos = np.cos(angles) ones = np.ones_like(rot_cos) zeros = np.zeros_like(rot_cos) if axis == 1: rot_mat_T = np.stack( [ [rot_cos, zeros, -rot_sin], [zeros, ones, zeros], [rot_sin, zeros, rot_cos], ] ) elif axis == 2 or axis == -1: rot_mat_T = np.stack( [ [rot_cos, -rot_sin, zeros], [rot_sin, rot_cos, zeros], [zeros, zeros, ones], ] ) elif axis == 0: rot_mat_T = np.stack( [ [zeros, rot_cos, -rot_sin], [zeros, rot_sin, rot_cos], [ones, zeros, zeros], ] ) else: raise ValueError("axis should in range") return np.einsum("aij,jka->aik", points, rot_mat_T) def rotation_points_single_angle(points, angle, axis=0): # points: [N, 3] rot_sin = np.sin(angle) rot_cos = np.cos(angle) if axis == 1: rot_mat_T = np.array( [[rot_cos, 0, -rot_sin], [0, 1, 0], [rot_sin, 0, rot_cos]], dtype=points.dtype, ) elif axis == 2 or axis == -1: rot_mat_T = np.array( [[rot_cos, -rot_sin, 0], [rot_sin, rot_cos, 0], [0, 0, 1]], dtype=points.dtype, ) elif axis == 0: rot_mat_T = np.array( [[1, 0, 0], [0, rot_cos, -rot_sin], [0, rot_sin, rot_cos]], dtype=points.dtype, ) else: raise ValueError("axis should in range") return points @ rot_mat_T def rotation_2d(points, angles): """rotation 2d points based on origin point clockwise when angle positive. Args: points (float array, shape=[N, point_size, 2]): points to be rotated. angles (float array, shape=[N]): rotation angle. Returns: float array: same shape as points """ rot_sin = np.sin(angles) rot_cos = np.cos(angles) rot_mat_T = np.stack([[rot_cos, -rot_sin], [rot_sin, rot_cos]]) return np.einsum("aij,jka->aik", points, rot_mat_T) def rotation_box(box_corners, angle): """rotation 2d points based on origin point clockwise when angle positive. Args: points (float array, shape=[N, point_size, 2]): points to be rotated. angle (float): rotation angle. Returns: float array: same shape as points """ rot_sin = np.sin(angle) rot_cos = np.cos(angle) rot_mat_T = np.array( [[rot_cos, -rot_sin], [rot_sin, rot_cos]], dtype=box_corners.dtype ) return box_corners @ rot_mat_T def center_to_corner_box3d(centers, dims, angles=None, origin=(0.5, 0.5, 0.5), axis=2): """convert kitti locations, dimensions and angles to corners Args: centers (float array, shape=[N, 3]): locations in kitti label file. dims (float array, shape=[N, 3]): dimensions in kitti label file. angles (float array, shape=[N]): rotation_y in kitti label file. origin (list or array or float): origin point relate to smallest point. use [0.5, 1.0, 0.5] in camera and [0.5, 0.5, 0] in lidar. axis (int): rotation axis. 1 for camera and 2 for lidar. Returns: [type]: [description] """ # 'length' in kitti format is in x axis. # yzx(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar) # center in kitti format is [0.5, 1.0, 0.5] in xyz. corners = corners_nd(dims, origin=origin) # corners: [N, 8, 3] if angles is not None: corners = rotation_3d_in_axis(corners, angles, axis=axis) corners += centers.reshape([-1, 1, 3]) return corners def center_to_corner_box2d(centers, dims, angles=None, origin=0.5): """convert kitti locations, dimensions and angles to corners. format: center(xy), dims(xy), angles(clockwise when positive) Args: centers (float array, shape=[N, 2]): locations in kitti label file. dims (float array, shape=[N, 2]): dimensions in kitti label file. angles (float array, shape=[N]): rotation_y in kitti label file. Returns: [type]: [description] """ # 'length' in kitti format is in x axis. # xyz(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar) # center in kitti format is [0.5, 1.0, 0.5] in xyz. corners = corners_nd(dims, origin=origin) # corners: [N, 4, 2] if angles is not None: corners = rotation_2d(corners, angles) corners += centers.reshape([-1, 1, 2]) return corners @numba.jit(nopython=True) def box2d_to_corner_jit(boxes): num_box = boxes.shape[0] corners_norm = np.zeros((4, 2), dtype=boxes.dtype) corners_norm[1, 1] = 1.0 corners_norm[2] = 1.0 corners_norm[3, 0] = 1.0 corners_norm -= np.array([0.5, 0.5], dtype=boxes.dtype) corners = boxes.reshape(num_box, 1, 5)[:, :, 2:4] * corners_norm.reshape(1, 4, 2) rot_mat_T = np.zeros((2, 2), dtype=boxes.dtype) box_corners = np.zeros((num_box, 4, 2), dtype=boxes.dtype) for i in range(num_box): rot_sin = np.sin(boxes[i, -1]) rot_cos = np.cos(boxes[i, -1]) rot_mat_T[0, 0] = rot_cos rot_mat_T[0, 1] = -rot_sin rot_mat_T[1, 0] = rot_sin rot_mat_T[1, 1] = rot_cos box_corners[i] = corners[i] @ rot_mat_T + boxes[i, :2] return box_corners def rbbox3d_to_corners(rbboxes, origin=[0.5, 0.5, 0.5], axis=2): return center_to_corner_box3d( rbboxes[..., :3], rbboxes[..., 3:6], rbboxes[..., 6], origin, axis=axis ) def rbbox3d_to_bev_corners(rbboxes, origin=0.5): return center_to_corner_box2d( rbboxes[..., :2], rbboxes[..., 3:5], rbboxes[..., 6], origin ) def minmax_to_corner_2d(minmax_box): ndim = minmax_box.shape[-1] // 2 center = minmax_box[..., :ndim] dims = minmax_box[..., ndim:] - center return center_to_corner_box2d(center, dims, origin=0.0) def minmax_to_corner_2d_v2(minmax_box): # N, 4 -> N 4 2 return minmax_box[..., [0, 1, 0, 3, 2, 3, 2, 1]].reshape(-1, 4, 2) def minmax_to_corner_3d(minmax_box): ndim = minmax_box.shape[-1] // 2 center = minmax_box[..., :ndim] dims = minmax_box[..., ndim:] - center return center_to_corner_box3d(center, dims, origin=0.0) def minmax_to_center_2d(minmax_box): ndim = minmax_box.shape[-1] // 2 center_min = minmax_box[..., :ndim] dims = minmax_box[..., ndim:] - center_min center = center_min + 0.5 * dims return np.concatenate([center, dims], axis=-1) def center_to_minmax_2d_0_5(centers, dims): return np.concatenate([centers - dims / 2, centers + dims / 2], axis=-1) def center_to_minmax_2d(centers, dims, origin=0.5): if origin == 0.5: return center_to_minmax_2d_0_5(centers, dims) corners = center_to_corner_box2d(centers, dims, origin=origin) return corners[:, [0, 2]].reshape([-1, 4]) def limit_period(val, offset=0.5, period=np.pi): return val - np.floor(val / period + offset) * period def projection_matrix_to_CRT_kitti(proj): # P = C @ [R|T] # C is upper triangular matrix, so we need to inverse CR and use QR # stable for all kitti camera projection matrix CR = proj[0:3, 0:3] CT = proj[0:3, 3] RinvCinv = np.linalg.inv(CR) Rinv, Cinv = np.linalg.qr(RinvCinv) C = np.linalg.inv(Cinv) R = np.linalg.inv(Rinv) T = Cinv @ CT return C, R, T def get_frustum(bbox_image, C, near_clip=0.001, far_clip=100): fku = C[0, 0] fkv = -C[1, 1] u0v0 = C[0:2, 2] z_points = np.array([near_clip] * 4 + [far_clip] * 4, dtype=C.dtype)[:, np.newaxis] b = bbox_image box_corners = np.array( [[b[0], b[1]], [b[0], b[3]], [b[2], b[3]], [b[2], b[1]]], dtype=C.dtype ) near_box_corners = (box_corners - u0v0) / np.array( [fku / near_clip, -fkv / near_clip], dtype=C.dtype ) far_box_corners = (box_corners - u0v0) / np.array( [fku / far_clip, -fkv / far_clip], dtype=C.dtype ) ret_xy = np.concatenate([near_box_corners, far_box_corners], axis=0) # [8, 2] ret_xyz = np.concatenate([ret_xy, z_points], axis=1) return ret_xyz def get_frustum_v2(bboxes, C, near_clip=0.001, far_clip=100): fku = C[0, 0] fkv = -C[1, 1] u0v0 = C[0:2, 2] num_box = bboxes.shape[0] z_points = np.array([near_clip] * 4 + [far_clip] * 4, dtype=C.dtype)[ np.newaxis, :, np.newaxis ] z_points = np.tile(z_points, [num_box, 1, 1]) box_corners = minmax_to_corner_2d_v2(bboxes) near_box_corners = (box_corners - u0v0) / np.array( [fku / near_clip, -fkv / near_clip], dtype=C.dtype ) far_box_corners = (box_corners - u0v0) / np.array( [fku / far_clip, -fkv / far_clip], dtype=C.dtype ) ret_xy = np.concatenate([near_box_corners, far_box_corners], axis=1) # [8, 2] ret_xyz = np.concatenate([ret_xy, z_points], axis=-1) return ret_xyz @numba.njit def _add_rgb_to_points_kernel(points_2d, image, points_rgb): num_points = points_2d.shape[0] image_h, image_w = image.shape[:2] for i in range(num_points): img_pos = np.floor(points_2d[i]).astype(np.int32) if img_pos[0] >= 0 and img_pos[0] < image_w: if img_pos[1] >= 0 and img_pos[1] < image_h: points_rgb[i, :] = image[img_pos[1], img_pos[0], :] # image[img_pos[1], img_pos[0]] = 0 def add_rgb_to_points(points, image, rect, Trv2c, P2, mean_size=[5, 5]): kernel = np.ones(mean_size, np.float32) / np.prod(mean_size) # image = cv2.filter2D(image, -1, kernel) points_cam = lidar_to_camera(points[:, :3], rect, Trv2c) points_2d = project_to_image(points_cam, P2) points_rgb = np.zeros([points_cam.shape[0], 3], dtype=points.dtype) _add_rgb_to_points_kernel(points_2d, image, points_rgb) return points_rgb def project_to_image(points_3d, proj_mat): points_shape = list(points_3d.shape) points_shape[-1] = 1 points_4 = np.concatenate([points_3d, np.ones(points_shape)], axis=-1) point_2d = points_4 @ proj_mat.T point_2d_res = point_2d[..., :2] / point_2d[..., 2:3] return point_2d_res def camera_to_lidar(points, r_rect, velo2cam): points_shape = list(points.shape[0:-1]) if points.shape[-1] == 3: points = np.concatenate([points, np.ones(points_shape + [1])], axis=-1) lidar_points = points @ np.linalg.inv((r_rect @ velo2cam).T) return lidar_points[..., :3] def lidar_to_camera(points, r_rect, velo2cam): points_shape = list(points.shape[:-1]) if points.shape[-1] == 3: points = np.concatenate([points, np.ones(points_shape + [1])], axis=-1) camera_points = points @ (r_rect @ velo2cam).T return camera_points[..., :3] def box_camera_to_lidar(data, r_rect, velo2cam): xyz = data[:, 0:3] l, h, w = data[:, 3:4], data[:, 4:5], data[:, 5:6] r = data[:, 6:7] xyz_lidar = camera_to_lidar(xyz, r_rect, velo2cam) return np.concatenate([xyz_lidar, w, l, h, r], axis=1) def box_lidar_to_camera(data, r_rect, velo2cam): xyz_lidar = data[:, 0:3] w, l, h = data[:, 3:4], data[:, 4:5], data[:, 5:6] r = data[:, 6:7] xyz = lidar_to_camera(xyz_lidar, r_rect, velo2cam) return np.concatenate([xyz, l, h, w, r], axis=1) def remove_outside_points(points, rect, Trv2c, P2, image_shape): # 5x faster than remove_outside_points_v1(2ms vs 10ms) C, R, T = projection_matrix_to_CRT_kitti(P2) image_bbox = [0, 0, image_shape[1], image_shape[0]] frustum = get_frustum(image_bbox, C) frustum -= T frustum = np.linalg.inv(R) @ frustum.T frustum = camera_to_lidar(frustum.T, rect, Trv2c) frustum_surfaces = corner_to_surfaces_3d_jit(frustum[np.newaxis, ...]) indices = points_in_convex_polygon_3d_jit(points[:, :3], frustum_surfaces) points = points[indices.reshape([-1])] return points @numba.jit(nopython=True) def iou_jit(boxes, query_boxes, eps=1.0): """calculate box iou. note that jit version runs 2x faster than cython in my machine! Parameters ---------- boxes: (N, 4) ndarray of float query_boxes: (K, 4) ndarray of float Returns ------- overlaps: (N, K) ndarray of overlap between boxes and query_boxes """ N = boxes.shape[0] K = query_boxes.shape[0] overlaps = np.zeros((N, K), dtype=boxes.dtype) for k in range(K): box_area = (query_boxes[k, 2] - query_boxes[k, 0] + eps) * ( query_boxes[k, 3] - query_boxes[k, 1] + eps ) for n in range(N): iw = ( min(boxes[n, 2], query_boxes[k, 2]) - max(boxes[n, 0], query_boxes[k, 0]) + eps ) if iw > 0: ih = ( min(boxes[n, 3], query_boxes[k, 3]) - max(boxes[n, 1], query_boxes[k, 1]) + eps ) if ih > 0: ua = ( (boxes[n, 2] - boxes[n, 0] + eps) * (boxes[n, 3] - boxes[n, 1] + eps) + box_area - iw * ih ) overlaps[n, k] = iw * ih / ua return overlaps @numba.jit(nopython=True) def iou_3d_jit(boxes, query_boxes, add1=True): """calculate box iou3d, ---------- boxes: (N, 6) ndarray of float query_boxes: (K, 6) ndarray of float Returns ------- overlaps: (N, K) ndarray of overlap between boxes and query_boxes """ N = boxes.shape[0] K = query_boxes.shape[0] overlaps = np.zeros((N, K), dtype=boxes.dtype) if add1: add1 = 1.0 else: add1 = 0.0 for k in range(K): box_area = ( (query_boxes[k, 3] - query_boxes[k, 0] + add1) * (query_boxes[k, 4] - query_boxes[k, 1] + add1) * (query_boxes[k, 5] - query_boxes[k, 2] + add1) ) for n in range(N): iw = ( min(boxes[n, 3], query_boxes[k, 3]) - max(boxes[n, 0], query_boxes[k, 0]) + add1 ) if iw > 0: ih = ( min(boxes[n, 4], query_boxes[k, 4]) - max(boxes[n, 1], query_boxes[k, 1]) + add1 ) if ih > 0: il = ( min(boxes[n, 5], query_boxes[k, 5]) - max(boxes[n, 2], query_boxes[k, 2]) + add1 ) if il > 0: ua = float( (boxes[n, 3] - boxes[n, 0] + add1) * (boxes[n, 4] - boxes[n, 1] + add1) * (boxes[n, 5] - boxes[n, 2] + add1) + box_area - iw * ih * il ) overlaps[n, k] = iw * ih * il / ua return overlaps @numba.jit(nopython=True) def iou_nd_jit(boxes, query_boxes, add1=True): """calculate box iou nd, 2x slower than iou_jit. ---------- boxes: (N, ndim * 2) ndarray of float query_boxes: (K, ndim * 2) ndarray of float Returns ------- overlaps: (N, K) ndarray of overlap between boxes and query_boxes """ N = boxes.shape[0] K = query_boxes.shape[0] ndim = boxes.shape[1] // 2 overlaps = np.zeros((N, K), dtype=boxes.dtype) side_lengths = np.zeros((ndim,), dtype=boxes.dtype) if add1: add1 = 1.0 else: add1 = 0.0 invalid = False for k in range(K): qbox_area = query_boxes[k, ndim] - query_boxes[k, 0] + add1 for i in range(1, ndim): qbox_area *= query_boxes[k, ndim + i] - query_boxes[k, i] + add1 for n in range(N): invalid = False for i in range(ndim): side_length = ( min(boxes[n, i + ndim], query_boxes[k, i + ndim]) - max(boxes[n, i], query_boxes[k, i]) + add1 ) if side_length <= 0: invalid = True break side_lengths[i] = side_length if not invalid: box_area = boxes[n, ndim] - boxes[n, 0] + add1 for i in range(1, ndim): box_area *= boxes[n, ndim + i] - boxes[n, i] + add1 inter = side_lengths[0] for i in range(1, ndim): inter *= side_lengths[i] # inter = np.prod(side_lengths) ua = float(box_area + qbox_area - inter) overlaps[n, k] = inter / ua return overlaps def points_in_rbbox(points, rbbox, z_axis=2, origin=(0.5, 0.5, 0.5)): rbbox_corners = center_to_corner_box3d( rbbox[:, :3], rbbox[:, 3:6], rbbox[:, -1], origin=origin, axis=z_axis ) surfaces = corner_to_surfaces_3d(rbbox_corners) indices = points_in_convex_polygon_3d_jit(points[:, :3], surfaces) return indices def corner_to_surfaces_3d(corners): """convert 3d box corners from corner function above to surfaces that normal vectors all direct to internal. Args: corners (float array, [N, 8, 3]): 3d box corners. Returns: surfaces (float array, [N, 6, 4, 3]): """ # box_corners: [N, 8, 3], must from corner functions in this module surfaces = np.array( [ [corners[:, 0], corners[:, 1], corners[:, 2], corners[:, 3]], [corners[:, 7], corners[:, 6], corners[:, 5], corners[:, 4]], [corners[:, 0], corners[:, 3], corners[:, 7], corners[:, 4]], [corners[:, 1], corners[:, 5], corners[:, 6], corners[:, 2]], [corners[:, 0], corners[:, 4], corners[:, 5], corners[:, 1]], [corners[:, 3], corners[:, 2], corners[:, 6], corners[:, 7]], ] ).transpose([2, 0, 1, 3]) return surfaces @numba.jit(nopython=True) def corner_to_surfaces_3d_jit(corners): """convert 3d box corners from corner function above to surfaces that normal vectors all direct to internal. Args: corners (float array, [N, 8, 3]): 3d box corners. Returns: surfaces (float array, [N, 6, 4, 3]): """ # box_corners: [N, 8, 3], must from corner functions in this module num_boxes = corners.shape[0] surfaces = np.zeros((num_boxes, 6, 4, 3), dtype=corners.dtype) corner_idxes = np.array( [0, 1, 2, 3, 7, 6, 5, 4, 0, 3, 7, 4, 1, 5, 6, 2, 0, 4, 5, 1, 3, 2, 6, 7] ).reshape(6, 4) for i in range(num_boxes): for j in range(6): for k in range(4): surfaces[i, j, k] = corners[i, corner_idxes[j, k]] return surfaces def assign_label_to_voxel(gt_boxes, coors, voxel_size, coors_range): """assign a 0/1 label to each voxel based on whether the center of voxel is in gt_box. LIDAR. """ voxel_size = np.array(voxel_size, dtype=gt_boxes.dtype) coors_range = np.array(coors_range, dtype=gt_boxes.dtype) shift = coors_range[:3] voxel_origins = coors[:, ::-1] * voxel_size + shift voxel_centers = voxel_origins + voxel_size * 0.5 gt_box_corners = center_to_corner_box3d( gt_boxes[:, :3] - voxel_size * 0.5, gt_boxes[:, 3:6] + voxel_size, gt_boxes[:, 6], origin=[0.5, 0.5, 0.5], axis=2, ) gt_surfaces = corner_to_surfaces_3d(gt_box_corners) ret = points_in_convex_polygon_3d_jit(voxel_centers, gt_surfaces) return np.any(ret, axis=1).astype(np.int64) def assign_label_to_voxel_v3(gt_boxes, coors, voxel_size, coors_range): """assign a 0/1 label to each voxel based on whether the center of voxel is in gt_box. LIDAR. """ voxel_size = np.array(voxel_size, dtype=gt_boxes.dtype) coors_range = np.array(coors_range, dtype=gt_boxes.dtype) shift = coors_range[:3] voxel_origins = coors[:, ::-1] * voxel_size + shift voxel_maxes = voxel_origins + voxel_size voxel_minmax = np.concatenate([voxel_origins, voxel_maxes], axis=-1) voxel_corners = minmax_to_corner_3d(voxel_minmax) gt_box_corners = center_to_corner_box3d( gt_boxes[:, :3], gt_boxes[:, 3:6], gt_boxes[:, 6], origin=[0.5, 0.5, 0.5], axis=2, ) gt_surfaces = corner_to_surfaces_3d(gt_box_corners) voxel_corners_flat = voxel_corners.reshape([-1, 3]) ret = points_in_convex_polygon_3d_jit(voxel_corners_flat, gt_surfaces) ret = ret.reshape([-1, 8, ret.shape[-1]]) return ret.any(-1).any(-1).astype(np.int64) def image_box_region_area(img_cumsum, bbox): """check a 2d voxel is contained by a box. used to filter empty anchors. Summed-area table algorithm: ==> W ------------------ | | | |------A---------B | | | | | | |----- C---------D Iabcd = ID-IB-IC+IA Args: img_cumsum: [M, H, W](yx) cumsumed image. bbox: [N, 4](xyxy) bounding box, """ N = bbox.shape[0] M = img_cumsum.shape[0] ret = np.zeros([N, M], dtype=img_cumsum.dtype) ID = img_cumsum[:, bbox[:, 3], bbox[:, 2]] IA = img_cumsum[:, bbox[:, 1], bbox[:, 0]] IB = img_cumsum[:, bbox[:, 3], bbox[:, 0]] IC = img_cumsum[:, bbox[:, 1], bbox[:, 2]] ret = ID - IB - IC + IA return ret def get_minimum_bounding_box_bv(points, voxel_size, bound, downsample=8, margin=1.6): x_vsize = voxel_size[0] y_vsize = voxel_size[1] max_x = points[:, 0].max() max_y = points[:, 1].max() min_x = points[:, 0].min() min_y = points[:, 1].min() max_x = np.floor(max_x / (x_vsize * downsample) + 1) * (x_vsize * downsample) max_y = np.floor(max_y / (y_vsize * downsample) + 1) * (y_vsize * downsample) min_x = np.floor(min_x / (x_vsize * downsample)) * (x_vsize * downsample) min_y = np.floor(min_y / (y_vsize * downsample)) * (y_vsize * downsample) max_x = np.minimum(max_x + margin, bound[2]) max_y = np.minimum(max_y + margin, bound[3]) min_x = np.maximum(min_x - margin, bound[0]) min_y = np.maximum(min_y - margin, bound[1]) return np.array([min_x, min_y, max_x, max_y]) def box3d_to_bbox(box3d, rect, Trv2c, P2): box3d_to_cam = box_lidar_to_camera(box3d, rect, Trv2c) box_corners = center_to_corner_box3d( box3d[:, :3], box3d[:, 3:6], box3d[:, 6], [0.5, 1.0, 0.5], axis=1 ) box_corners_in_image = project_to_image(box_corners, P2) # box_corners_in_image: [N, 8, 2] minxy = np.min(box_corners_in_image, axis=1) maxxy = np.max(box_corners_in_image, axis=1) bbox = np.concatenate([minxy, maxxy], axis=1) return bbox def change_box3d_center_(box3d, src, dst): dst = np.array(dst, dtype=box3d.dtype) src = np.array(src, dtype=box3d.dtype) box3d[..., :3] += box3d[..., 3:6] * (dst - src) def encode_parts(relative_shifts): parts = np.zeros((len(relative_shifts),), dtype=np.int32) mask = (relative_shifts[:, 0] >= 0) & (relative_shifts[:, 1] >= 0) parts[mask] = 0 mask = (relative_shifts[:, 0] < 0) & (relative_shifts[:, 1] >= 0) parts[mask] = 1 mask = (relative_shifts[:, 0] < 0) & (relative_shifts[:, 1] < 0) parts[mask] = 2 mask = (relative_shifts[:, 0] >= 0) & (relative_shifts[:, 1] < 0) parts[mask] = 3 return parts
image_box_region_area
check a 2d voxel is contained by a box. used to filter empty anchors. Summed-area table algorithm: ==> W ------------------ | | | |------A---------B | | | | | | |----- C---------D Iabcd = ID-IB-IC+IA Args: img_cumsum: [M, H, W](yx) cumsumed image. bbox: [N, 4](xyxy) bounding box,
from pathlib import Path import numba import numpy as np from det3d.core.bbox.geometry import ( points_count_convex_polygon_3d_jit, points_in_convex_polygon_3d_jit, ) try: from spconv.utils import rbbox_intersection, rbbox_iou except: print("Import spconv fail, no support for sparse convolution!") def points_count_rbbox(points, rbbox, z_axis=2, origin=(0.5, 0.5, 0.5)): rbbox_corners = center_to_corner_box3d( rbbox[:, :3], rbbox[:, 3:6], rbbox[:, -1], origin=origin, axis=z_axis ) surfaces = corner_to_surfaces_3d(rbbox_corners) return points_count_convex_polygon_3d_jit(points[:, :3], surfaces) def riou_cc(rbboxes, qrbboxes, standup_thresh=0.0): # less than 50ms when used in second one thread. 10x slower than gpu boxes_corners = center_to_corner_box2d( rbboxes[:, :2], rbboxes[:, 2:4], rbboxes[:, 4] ) boxes_standup = corner_to_standup_nd(boxes_corners) qboxes_corners = center_to_corner_box2d( qrbboxes[:, :2], qrbboxes[:, 2:4], qrbboxes[:, 4] ) qboxes_standup = corner_to_standup_nd(qboxes_corners) # if standup box not overlapped, rbbox not overlapped too. standup_iou = iou_jit(boxes_standup, qboxes_standup, eps=0.0) return rbbox_iou(boxes_corners, qboxes_corners, standup_iou, standup_thresh) def rinter_cc(rbboxes, qrbboxes, standup_thresh=0.0): # less than 50ms when used in second one thread. 10x slower than gpu boxes_corners = center_to_corner_box2d( rbboxes[:, :2], rbboxes[:, 2:4], rbboxes[:, 4] ) boxes_standup = corner_to_standup_nd(boxes_corners) qboxes_corners = center_to_corner_box2d( qrbboxes[:, :2], qrbboxes[:, 2:4], qrbboxes[:, 4] ) qboxes_standup = corner_to_standup_nd(qboxes_corners) # if standup box not overlapped, rbbox not overlapped too. standup_iou = iou_jit(boxes_standup, qboxes_standup, eps=0.0) return rbbox_intersection( boxes_corners, qboxes_corners, standup_iou, standup_thresh ) def corners_nd(dims, origin=0.5): """generate relative box corners based on length per dim and origin point. Args: dims (float array, shape=[N, ndim]): array of length per dim origin (list or array or float): origin point relate to smallest point. Returns: float array, shape=[N, 2 ** ndim, ndim]: returned corners. point layout example: (2d) x0y0, x0y1, x1y0, x1y1; (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1 where x0 < x1, y0 < y1, z0 < z1 """ ndim = int(dims.shape[1]) corners_norm = np.stack( np.unravel_index(np.arange(2 ** ndim), [2] * ndim), axis=1 ).astype(dims.dtype) # now corners_norm has format: (2d) x0y0, x0y1, x1y0, x1y1 # (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1 # so need to convert to a format which is convenient to do other computing. # for 2d boxes, format is clockwise start with minimum point # for 3d boxes, please draw lines by your hand. if ndim == 2: # generate clockwise box corners corners_norm = corners_norm[[0, 1, 3, 2]] elif ndim == 3: corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]] corners_norm = corners_norm - np.array(origin, dtype=dims.dtype) corners = dims.reshape([-1, 1, ndim]) * corners_norm.reshape([1, 2 ** ndim, ndim]) return corners @numba.njit def corners_2d_jit(dims, origin=0.5): ndim = 2 corners_norm = np.array([[0, 0], [0, 1], [1, 1], [1, 0]], dtype=dims.dtype) corners_norm = corners_norm - np.array(origin, dtype=dims.dtype) corners = dims.reshape((-1, 1, ndim)) * corners_norm.reshape((1, 2 ** ndim, ndim)) return corners @numba.njit def corners_3d_jit(dims, origin=0.5): ndim = 3 corners_norm = np.array( [0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1], dtype=dims.dtype, ).reshape((8, 3)) corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]] corners_norm = corners_norm - np.array(origin, dtype=dims.dtype) corners = dims.reshape((-1, 1, ndim)) * corners_norm.reshape((1, 2 ** ndim, ndim)) return corners @numba.njit def corner_to_standup_nd_jit(boxes_corner): num_boxes = boxes_corner.shape[0] ndim = boxes_corner.shape[-1] result = np.zeros((num_boxes, ndim * 2), dtype=boxes_corner.dtype) for i in range(num_boxes): for j in range(ndim): result[i, j] = np.min(boxes_corner[i, :, j]) for j in range(ndim): result[i, j + ndim] = np.max(boxes_corner[i, :, j]) return result def corner_to_standup_nd(boxes_corner): assert len(boxes_corner.shape) == 3 standup_boxes = [] standup_boxes.append(np.min(boxes_corner, axis=1)) standup_boxes.append(np.max(boxes_corner, axis=1)) return np.concatenate(standup_boxes, -1) def rbbox2d_to_near_bbox(rbboxes): """convert rotated bbox to nearest 'standing' or 'lying' bbox. Args: rbboxes: [N, 5(x, y, xdim, ydim, rad)] rotated bboxes Returns: bboxes: [N, 4(xmin, ymin, xmax, ymax)] bboxes """ rots = rbboxes[..., -1] rots_0_pi_div_2 = np.abs(limit_period(rots, 0.5, np.pi)) cond = (rots_0_pi_div_2 > np.pi / 4)[..., np.newaxis] bboxes_center = np.where(cond, rbboxes[:, [0, 1, 3, 2]], rbboxes[:, :4]) bboxes = center_to_minmax_2d(bboxes_center[:, :2], bboxes_center[:, 2:]) return bboxes def rotation_3d_in_axis(points, angles, axis=0): # points: [N, point_size, 3] rot_sin = np.sin(angles) rot_cos = np.cos(angles) ones = np.ones_like(rot_cos) zeros = np.zeros_like(rot_cos) if axis == 1: rot_mat_T = np.stack( [ [rot_cos, zeros, -rot_sin], [zeros, ones, zeros], [rot_sin, zeros, rot_cos], ] ) elif axis == 2 or axis == -1: rot_mat_T = np.stack( [ [rot_cos, -rot_sin, zeros], [rot_sin, rot_cos, zeros], [zeros, zeros, ones], ] ) elif axis == 0: rot_mat_T = np.stack( [ [zeros, rot_cos, -rot_sin], [zeros, rot_sin, rot_cos], [ones, zeros, zeros], ] ) else: raise ValueError("axis should in range") return np.einsum("aij,jka->aik", points, rot_mat_T) def rotation_points_single_angle(points, angle, axis=0): # points: [N, 3] rot_sin = np.sin(angle) rot_cos = np.cos(angle) if axis == 1: rot_mat_T = np.array( [[rot_cos, 0, -rot_sin], [0, 1, 0], [rot_sin, 0, rot_cos]], dtype=points.dtype, ) elif axis == 2 or axis == -1: rot_mat_T = np.array( [[rot_cos, -rot_sin, 0], [rot_sin, rot_cos, 0], [0, 0, 1]], dtype=points.dtype, ) elif axis == 0: rot_mat_T = np.array( [[1, 0, 0], [0, rot_cos, -rot_sin], [0, rot_sin, rot_cos]], dtype=points.dtype, ) else: raise ValueError("axis should in range") return points @ rot_mat_T def rotation_2d(points, angles): """rotation 2d points based on origin point clockwise when angle positive. Args: points (float array, shape=[N, point_size, 2]): points to be rotated. angles (float array, shape=[N]): rotation angle. Returns: float array: same shape as points """ rot_sin = np.sin(angles) rot_cos = np.cos(angles) rot_mat_T = np.stack([[rot_cos, -rot_sin], [rot_sin, rot_cos]]) return np.einsum("aij,jka->aik", points, rot_mat_T) def rotation_box(box_corners, angle): """rotation 2d points based on origin point clockwise when angle positive. Args: points (float array, shape=[N, point_size, 2]): points to be rotated. angle (float): rotation angle. Returns: float array: same shape as points """ rot_sin = np.sin(angle) rot_cos = np.cos(angle) rot_mat_T = np.array( [[rot_cos, -rot_sin], [rot_sin, rot_cos]], dtype=box_corners.dtype ) return box_corners @ rot_mat_T def center_to_corner_box3d(centers, dims, angles=None, origin=(0.5, 0.5, 0.5), axis=2): """convert kitti locations, dimensions and angles to corners Args: centers (float array, shape=[N, 3]): locations in kitti label file. dims (float array, shape=[N, 3]): dimensions in kitti label file. angles (float array, shape=[N]): rotation_y in kitti label file. origin (list or array or float): origin point relate to smallest point. use [0.5, 1.0, 0.5] in camera and [0.5, 0.5, 0] in lidar. axis (int): rotation axis. 1 for camera and 2 for lidar. Returns: [type]: [description] """ # 'length' in kitti format is in x axis. # yzx(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar) # center in kitti format is [0.5, 1.0, 0.5] in xyz. corners = corners_nd(dims, origin=origin) # corners: [N, 8, 3] if angles is not None: corners = rotation_3d_in_axis(corners, angles, axis=axis) corners += centers.reshape([-1, 1, 3]) return corners def center_to_corner_box2d(centers, dims, angles=None, origin=0.5): """convert kitti locations, dimensions and angles to corners. format: center(xy), dims(xy), angles(clockwise when positive) Args: centers (float array, shape=[N, 2]): locations in kitti label file. dims (float array, shape=[N, 2]): dimensions in kitti label file. angles (float array, shape=[N]): rotation_y in kitti label file. Returns: [type]: [description] """ # 'length' in kitti format is in x axis. # xyz(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar) # center in kitti format is [0.5, 1.0, 0.5] in xyz. corners = corners_nd(dims, origin=origin) # corners: [N, 4, 2] if angles is not None: corners = rotation_2d(corners, angles) corners += centers.reshape([-1, 1, 2]) return corners @numba.jit(nopython=True) def box2d_to_corner_jit(boxes): num_box = boxes.shape[0] corners_norm = np.zeros((4, 2), dtype=boxes.dtype) corners_norm[1, 1] = 1.0 corners_norm[2] = 1.0 corners_norm[3, 0] = 1.0 corners_norm -= np.array([0.5, 0.5], dtype=boxes.dtype) corners = boxes.reshape(num_box, 1, 5)[:, :, 2:4] * corners_norm.reshape(1, 4, 2) rot_mat_T = np.zeros((2, 2), dtype=boxes.dtype) box_corners = np.zeros((num_box, 4, 2), dtype=boxes.dtype) for i in range(num_box): rot_sin = np.sin(boxes[i, -1]) rot_cos = np.cos(boxes[i, -1]) rot_mat_T[0, 0] = rot_cos rot_mat_T[0, 1] = -rot_sin rot_mat_T[1, 0] = rot_sin rot_mat_T[1, 1] = rot_cos box_corners[i] = corners[i] @ rot_mat_T + boxes[i, :2] return box_corners def rbbox3d_to_corners(rbboxes, origin=[0.5, 0.5, 0.5], axis=2): return center_to_corner_box3d( rbboxes[..., :3], rbboxes[..., 3:6], rbboxes[..., 6], origin, axis=axis ) def rbbox3d_to_bev_corners(rbboxes, origin=0.5): return center_to_corner_box2d( rbboxes[..., :2], rbboxes[..., 3:5], rbboxes[..., 6], origin ) def minmax_to_corner_2d(minmax_box): ndim = minmax_box.shape[-1] // 2 center = minmax_box[..., :ndim] dims = minmax_box[..., ndim:] - center return center_to_corner_box2d(center, dims, origin=0.0) def minmax_to_corner_2d_v2(minmax_box): # N, 4 -> N 4 2 return minmax_box[..., [0, 1, 0, 3, 2, 3, 2, 1]].reshape(-1, 4, 2) def minmax_to_corner_3d(minmax_box): ndim = minmax_box.shape[-1] // 2 center = minmax_box[..., :ndim] dims = minmax_box[..., ndim:] - center return center_to_corner_box3d(center, dims, origin=0.0) def minmax_to_center_2d(minmax_box): ndim = minmax_box.shape[-1] // 2 center_min = minmax_box[..., :ndim] dims = minmax_box[..., ndim:] - center_min center = center_min + 0.5 * dims return np.concatenate([center, dims], axis=-1) def center_to_minmax_2d_0_5(centers, dims): return np.concatenate([centers - dims / 2, centers + dims / 2], axis=-1) def center_to_minmax_2d(centers, dims, origin=0.5): if origin == 0.5: return center_to_minmax_2d_0_5(centers, dims) corners = center_to_corner_box2d(centers, dims, origin=origin) return corners[:, [0, 2]].reshape([-1, 4]) def limit_period(val, offset=0.5, period=np.pi): return val - np.floor(val / period + offset) * period def projection_matrix_to_CRT_kitti(proj): # P = C @ [R|T] # C is upper triangular matrix, so we need to inverse CR and use QR # stable for all kitti camera projection matrix CR = proj[0:3, 0:3] CT = proj[0:3, 3] RinvCinv = np.linalg.inv(CR) Rinv, Cinv = np.linalg.qr(RinvCinv) C = np.linalg.inv(Cinv) R = np.linalg.inv(Rinv) T = Cinv @ CT return C, R, T def get_frustum(bbox_image, C, near_clip=0.001, far_clip=100): fku = C[0, 0] fkv = -C[1, 1] u0v0 = C[0:2, 2] z_points = np.array([near_clip] * 4 + [far_clip] * 4, dtype=C.dtype)[:, np.newaxis] b = bbox_image box_corners = np.array( [[b[0], b[1]], [b[0], b[3]], [b[2], b[3]], [b[2], b[1]]], dtype=C.dtype ) near_box_corners = (box_corners - u0v0) / np.array( [fku / near_clip, -fkv / near_clip], dtype=C.dtype ) far_box_corners = (box_corners - u0v0) / np.array( [fku / far_clip, -fkv / far_clip], dtype=C.dtype ) ret_xy = np.concatenate([near_box_corners, far_box_corners], axis=0) # [8, 2] ret_xyz = np.concatenate([ret_xy, z_points], axis=1) return ret_xyz def get_frustum_v2(bboxes, C, near_clip=0.001, far_clip=100): fku = C[0, 0] fkv = -C[1, 1] u0v0 = C[0:2, 2] num_box = bboxes.shape[0] z_points = np.array([near_clip] * 4 + [far_clip] * 4, dtype=C.dtype)[ np.newaxis, :, np.newaxis ] z_points = np.tile(z_points, [num_box, 1, 1]) box_corners = minmax_to_corner_2d_v2(bboxes) near_box_corners = (box_corners - u0v0) / np.array( [fku / near_clip, -fkv / near_clip], dtype=C.dtype ) far_box_corners = (box_corners - u0v0) / np.array( [fku / far_clip, -fkv / far_clip], dtype=C.dtype ) ret_xy = np.concatenate([near_box_corners, far_box_corners], axis=1) # [8, 2] ret_xyz = np.concatenate([ret_xy, z_points], axis=-1) return ret_xyz @numba.njit def _add_rgb_to_points_kernel(points_2d, image, points_rgb): num_points = points_2d.shape[0] image_h, image_w = image.shape[:2] for i in range(num_points): img_pos = np.floor(points_2d[i]).astype(np.int32) if img_pos[0] >= 0 and img_pos[0] < image_w: if img_pos[1] >= 0 and img_pos[1] < image_h: points_rgb[i, :] = image[img_pos[1], img_pos[0], :] # image[img_pos[1], img_pos[0]] = 0 def add_rgb_to_points(points, image, rect, Trv2c, P2, mean_size=[5, 5]): kernel = np.ones(mean_size, np.float32) / np.prod(mean_size) # image = cv2.filter2D(image, -1, kernel) points_cam = lidar_to_camera(points[:, :3], rect, Trv2c) points_2d = project_to_image(points_cam, P2) points_rgb = np.zeros([points_cam.shape[0], 3], dtype=points.dtype) _add_rgb_to_points_kernel(points_2d, image, points_rgb) return points_rgb def project_to_image(points_3d, proj_mat): points_shape = list(points_3d.shape) points_shape[-1] = 1 points_4 = np.concatenate([points_3d, np.ones(points_shape)], axis=-1) point_2d = points_4 @ proj_mat.T point_2d_res = point_2d[..., :2] / point_2d[..., 2:3] return point_2d_res def camera_to_lidar(points, r_rect, velo2cam): points_shape = list(points.shape[0:-1]) if points.shape[-1] == 3: points = np.concatenate([points, np.ones(points_shape + [1])], axis=-1) lidar_points = points @ np.linalg.inv((r_rect @ velo2cam).T) return lidar_points[..., :3] def lidar_to_camera(points, r_rect, velo2cam): points_shape = list(points.shape[:-1]) if points.shape[-1] == 3: points = np.concatenate([points, np.ones(points_shape + [1])], axis=-1) camera_points = points @ (r_rect @ velo2cam).T return camera_points[..., :3] def box_camera_to_lidar(data, r_rect, velo2cam): xyz = data[:, 0:3] l, h, w = data[:, 3:4], data[:, 4:5], data[:, 5:6] r = data[:, 6:7] xyz_lidar = camera_to_lidar(xyz, r_rect, velo2cam) return np.concatenate([xyz_lidar, w, l, h, r], axis=1) def box_lidar_to_camera(data, r_rect, velo2cam): xyz_lidar = data[:, 0:3] w, l, h = data[:, 3:4], data[:, 4:5], data[:, 5:6] r = data[:, 6:7] xyz = lidar_to_camera(xyz_lidar, r_rect, velo2cam) return np.concatenate([xyz, l, h, w, r], axis=1) def remove_outside_points(points, rect, Trv2c, P2, image_shape): # 5x faster than remove_outside_points_v1(2ms vs 10ms) C, R, T = projection_matrix_to_CRT_kitti(P2) image_bbox = [0, 0, image_shape[1], image_shape[0]] frustum = get_frustum(image_bbox, C) frustum -= T frustum = np.linalg.inv(R) @ frustum.T frustum = camera_to_lidar(frustum.T, rect, Trv2c) frustum_surfaces = corner_to_surfaces_3d_jit(frustum[np.newaxis, ...]) indices = points_in_convex_polygon_3d_jit(points[:, :3], frustum_surfaces) points = points[indices.reshape([-1])] return points @numba.jit(nopython=True) def iou_jit(boxes, query_boxes, eps=1.0): """calculate box iou. note that jit version runs 2x faster than cython in my machine! Parameters ---------- boxes: (N, 4) ndarray of float query_boxes: (K, 4) ndarray of float Returns ------- overlaps: (N, K) ndarray of overlap between boxes and query_boxes """ N = boxes.shape[0] K = query_boxes.shape[0] overlaps = np.zeros((N, K), dtype=boxes.dtype) for k in range(K): box_area = (query_boxes[k, 2] - query_boxes[k, 0] + eps) * ( query_boxes[k, 3] - query_boxes[k, 1] + eps ) for n in range(N): iw = ( min(boxes[n, 2], query_boxes[k, 2]) - max(boxes[n, 0], query_boxes[k, 0]) + eps ) if iw > 0: ih = ( min(boxes[n, 3], query_boxes[k, 3]) - max(boxes[n, 1], query_boxes[k, 1]) + eps ) if ih > 0: ua = ( (boxes[n, 2] - boxes[n, 0] + eps) * (boxes[n, 3] - boxes[n, 1] + eps) + box_area - iw * ih ) overlaps[n, k] = iw * ih / ua return overlaps @numba.jit(nopython=True) def iou_3d_jit(boxes, query_boxes, add1=True): """calculate box iou3d, ---------- boxes: (N, 6) ndarray of float query_boxes: (K, 6) ndarray of float Returns ------- overlaps: (N, K) ndarray of overlap between boxes and query_boxes """ N = boxes.shape[0] K = query_boxes.shape[0] overlaps = np.zeros((N, K), dtype=boxes.dtype) if add1: add1 = 1.0 else: add1 = 0.0 for k in range(K): box_area = ( (query_boxes[k, 3] - query_boxes[k, 0] + add1) * (query_boxes[k, 4] - query_boxes[k, 1] + add1) * (query_boxes[k, 5] - query_boxes[k, 2] + add1) ) for n in range(N): iw = ( min(boxes[n, 3], query_boxes[k, 3]) - max(boxes[n, 0], query_boxes[k, 0]) + add1 ) if iw > 0: ih = ( min(boxes[n, 4], query_boxes[k, 4]) - max(boxes[n, 1], query_boxes[k, 1]) + add1 ) if ih > 0: il = ( min(boxes[n, 5], query_boxes[k, 5]) - max(boxes[n, 2], query_boxes[k, 2]) + add1 ) if il > 0: ua = float( (boxes[n, 3] - boxes[n, 0] + add1) * (boxes[n, 4] - boxes[n, 1] + add1) * (boxes[n, 5] - boxes[n, 2] + add1) + box_area - iw * ih * il ) overlaps[n, k] = iw * ih * il / ua return overlaps @numba.jit(nopython=True) def iou_nd_jit(boxes, query_boxes, add1=True): """calculate box iou nd, 2x slower than iou_jit. ---------- boxes: (N, ndim * 2) ndarray of float query_boxes: (K, ndim * 2) ndarray of float Returns ------- overlaps: (N, K) ndarray of overlap between boxes and query_boxes """ N = boxes.shape[0] K = query_boxes.shape[0] ndim = boxes.shape[1] // 2 overlaps = np.zeros((N, K), dtype=boxes.dtype) side_lengths = np.zeros((ndim,), dtype=boxes.dtype) if add1: add1 = 1.0 else: add1 = 0.0 invalid = False for k in range(K): qbox_area = query_boxes[k, ndim] - query_boxes[k, 0] + add1 for i in range(1, ndim): qbox_area *= query_boxes[k, ndim + i] - query_boxes[k, i] + add1 for n in range(N): invalid = False for i in range(ndim): side_length = ( min(boxes[n, i + ndim], query_boxes[k, i + ndim]) - max(boxes[n, i], query_boxes[k, i]) + add1 ) if side_length <= 0: invalid = True break side_lengths[i] = side_length if not invalid: box_area = boxes[n, ndim] - boxes[n, 0] + add1 for i in range(1, ndim): box_area *= boxes[n, ndim + i] - boxes[n, i] + add1 inter = side_lengths[0] for i in range(1, ndim): inter *= side_lengths[i] # inter = np.prod(side_lengths) ua = float(box_area + qbox_area - inter) overlaps[n, k] = inter / ua return overlaps def points_in_rbbox(points, rbbox, z_axis=2, origin=(0.5, 0.5, 0.5)): rbbox_corners = center_to_corner_box3d( rbbox[:, :3], rbbox[:, 3:6], rbbox[:, -1], origin=origin, axis=z_axis ) surfaces = corner_to_surfaces_3d(rbbox_corners) indices = points_in_convex_polygon_3d_jit(points[:, :3], surfaces) return indices def corner_to_surfaces_3d(corners): """convert 3d box corners from corner function above to surfaces that normal vectors all direct to internal. Args: corners (float array, [N, 8, 3]): 3d box corners. Returns: surfaces (float array, [N, 6, 4, 3]): """ # box_corners: [N, 8, 3], must from corner functions in this module surfaces = np.array( [ [corners[:, 0], corners[:, 1], corners[:, 2], corners[:, 3]], [corners[:, 7], corners[:, 6], corners[:, 5], corners[:, 4]], [corners[:, 0], corners[:, 3], corners[:, 7], corners[:, 4]], [corners[:, 1], corners[:, 5], corners[:, 6], corners[:, 2]], [corners[:, 0], corners[:, 4], corners[:, 5], corners[:, 1]], [corners[:, 3], corners[:, 2], corners[:, 6], corners[:, 7]], ] ).transpose([2, 0, 1, 3]) return surfaces @numba.jit(nopython=True) def corner_to_surfaces_3d_jit(corners): """convert 3d box corners from corner function above to surfaces that normal vectors all direct to internal. Args: corners (float array, [N, 8, 3]): 3d box corners. Returns: surfaces (float array, [N, 6, 4, 3]): """ # box_corners: [N, 8, 3], must from corner functions in this module num_boxes = corners.shape[0] surfaces = np.zeros((num_boxes, 6, 4, 3), dtype=corners.dtype) corner_idxes = np.array( [0, 1, 2, 3, 7, 6, 5, 4, 0, 3, 7, 4, 1, 5, 6, 2, 0, 4, 5, 1, 3, 2, 6, 7] ).reshape(6, 4) for i in range(num_boxes): for j in range(6): for k in range(4): surfaces[i, j, k] = corners[i, corner_idxes[j, k]] return surfaces def assign_label_to_voxel(gt_boxes, coors, voxel_size, coors_range): """assign a 0/1 label to each voxel based on whether the center of voxel is in gt_box. LIDAR. """ voxel_size = np.array(voxel_size, dtype=gt_boxes.dtype) coors_range = np.array(coors_range, dtype=gt_boxes.dtype) shift = coors_range[:3] voxel_origins = coors[:, ::-1] * voxel_size + shift voxel_centers = voxel_origins + voxel_size * 0.5 gt_box_corners = center_to_corner_box3d( gt_boxes[:, :3] - voxel_size * 0.5, gt_boxes[:, 3:6] + voxel_size, gt_boxes[:, 6], origin=[0.5, 0.5, 0.5], axis=2, ) gt_surfaces = corner_to_surfaces_3d(gt_box_corners) ret = points_in_convex_polygon_3d_jit(voxel_centers, gt_surfaces) return np.any(ret, axis=1).astype(np.int64) def assign_label_to_voxel_v3(gt_boxes, coors, voxel_size, coors_range): """assign a 0/1 label to each voxel based on whether the center of voxel is in gt_box. LIDAR. """ voxel_size = np.array(voxel_size, dtype=gt_boxes.dtype) coors_range = np.array(coors_range, dtype=gt_boxes.dtype) shift = coors_range[:3] voxel_origins = coors[:, ::-1] * voxel_size + shift voxel_maxes = voxel_origins + voxel_size voxel_minmax = np.concatenate([voxel_origins, voxel_maxes], axis=-1) voxel_corners = minmax_to_corner_3d(voxel_minmax) gt_box_corners = center_to_corner_box3d( gt_boxes[:, :3], gt_boxes[:, 3:6], gt_boxes[:, 6], origin=[0.5, 0.5, 0.5], axis=2, ) gt_surfaces = corner_to_surfaces_3d(gt_box_corners) voxel_corners_flat = voxel_corners.reshape([-1, 3]) ret = points_in_convex_polygon_3d_jit(voxel_corners_flat, gt_surfaces) ret = ret.reshape([-1, 8, ret.shape[-1]]) return ret.any(-1).any(-1).astype(np.int64) # MASKED: image_box_region_area function (lines 742-766) def get_minimum_bounding_box_bv(points, voxel_size, bound, downsample=8, margin=1.6): x_vsize = voxel_size[0] y_vsize = voxel_size[1] max_x = points[:, 0].max() max_y = points[:, 1].max() min_x = points[:, 0].min() min_y = points[:, 1].min() max_x = np.floor(max_x / (x_vsize * downsample) + 1) * (x_vsize * downsample) max_y = np.floor(max_y / (y_vsize * downsample) + 1) * (y_vsize * downsample) min_x = np.floor(min_x / (x_vsize * downsample)) * (x_vsize * downsample) min_y = np.floor(min_y / (y_vsize * downsample)) * (y_vsize * downsample) max_x = np.minimum(max_x + margin, bound[2]) max_y = np.minimum(max_y + margin, bound[3]) min_x = np.maximum(min_x - margin, bound[0]) min_y = np.maximum(min_y - margin, bound[1]) return np.array([min_x, min_y, max_x, max_y]) def box3d_to_bbox(box3d, rect, Trv2c, P2): box3d_to_cam = box_lidar_to_camera(box3d, rect, Trv2c) box_corners = center_to_corner_box3d( box3d[:, :3], box3d[:, 3:6], box3d[:, 6], [0.5, 1.0, 0.5], axis=1 ) box_corners_in_image = project_to_image(box_corners, P2) # box_corners_in_image: [N, 8, 2] minxy = np.min(box_corners_in_image, axis=1) maxxy = np.max(box_corners_in_image, axis=1) bbox = np.concatenate([minxy, maxxy], axis=1) return bbox def change_box3d_center_(box3d, src, dst): dst = np.array(dst, dtype=box3d.dtype) src = np.array(src, dtype=box3d.dtype) box3d[..., :3] += box3d[..., 3:6] * (dst - src) def encode_parts(relative_shifts): parts = np.zeros((len(relative_shifts),), dtype=np.int32) mask = (relative_shifts[:, 0] >= 0) & (relative_shifts[:, 1] >= 0) parts[mask] = 0 mask = (relative_shifts[:, 0] < 0) & (relative_shifts[:, 1] >= 0) parts[mask] = 1 mask = (relative_shifts[:, 0] < 0) & (relative_shifts[:, 1] < 0) parts[mask] = 2 mask = (relative_shifts[:, 0] >= 0) & (relative_shifts[:, 1] < 0) parts[mask] = 3 return parts
def image_box_region_area(img_cumsum, bbox): """check a 2d voxel is contained by a box. used to filter empty anchors. Summed-area table algorithm: ==> W ------------------ | | | |------A---------B | | | | | | |----- C---------D Iabcd = ID-IB-IC+IA Args: img_cumsum: [M, H, W](yx) cumsumed image. bbox: [N, 4](xyxy) bounding box, """ N = bbox.shape[0] M = img_cumsum.shape[0] ret = np.zeros([N, M], dtype=img_cumsum.dtype) ID = img_cumsum[:, bbox[:, 3], bbox[:, 2]] IA = img_cumsum[:, bbox[:, 1], bbox[:, 0]] IB = img_cumsum[:, bbox[:, 3], bbox[:, 0]] IC = img_cumsum[:, bbox[:, 1], bbox[:, 2]] ret = ID - IB - IC + IA return ret
742
766
from pathlib import Path import numba import numpy as np from det3d.core.bbox.geometry import ( points_count_convex_polygon_3d_jit, points_in_convex_polygon_3d_jit, ) try: from spconv.utils import rbbox_intersection, rbbox_iou except: print("Import spconv fail, no support for sparse convolution!") def points_count_rbbox(points, rbbox, z_axis=2, origin=(0.5, 0.5, 0.5)): rbbox_corners = center_to_corner_box3d( rbbox[:, :3], rbbox[:, 3:6], rbbox[:, -1], origin=origin, axis=z_axis ) surfaces = corner_to_surfaces_3d(rbbox_corners) return points_count_convex_polygon_3d_jit(points[:, :3], surfaces) def riou_cc(rbboxes, qrbboxes, standup_thresh=0.0): # less than 50ms when used in second one thread. 10x slower than gpu boxes_corners = center_to_corner_box2d( rbboxes[:, :2], rbboxes[:, 2:4], rbboxes[:, 4] ) boxes_standup = corner_to_standup_nd(boxes_corners) qboxes_corners = center_to_corner_box2d( qrbboxes[:, :2], qrbboxes[:, 2:4], qrbboxes[:, 4] ) qboxes_standup = corner_to_standup_nd(qboxes_corners) # if standup box not overlapped, rbbox not overlapped too. standup_iou = iou_jit(boxes_standup, qboxes_standup, eps=0.0) return rbbox_iou(boxes_corners, qboxes_corners, standup_iou, standup_thresh) def rinter_cc(rbboxes, qrbboxes, standup_thresh=0.0): # less than 50ms when used in second one thread. 10x slower than gpu boxes_corners = center_to_corner_box2d( rbboxes[:, :2], rbboxes[:, 2:4], rbboxes[:, 4] ) boxes_standup = corner_to_standup_nd(boxes_corners) qboxes_corners = center_to_corner_box2d( qrbboxes[:, :2], qrbboxes[:, 2:4], qrbboxes[:, 4] ) qboxes_standup = corner_to_standup_nd(qboxes_corners) # if standup box not overlapped, rbbox not overlapped too. standup_iou = iou_jit(boxes_standup, qboxes_standup, eps=0.0) return rbbox_intersection( boxes_corners, qboxes_corners, standup_iou, standup_thresh ) def corners_nd(dims, origin=0.5): """generate relative box corners based on length per dim and origin point. Args: dims (float array, shape=[N, ndim]): array of length per dim origin (list or array or float): origin point relate to smallest point. Returns: float array, shape=[N, 2 ** ndim, ndim]: returned corners. point layout example: (2d) x0y0, x0y1, x1y0, x1y1; (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1 where x0 < x1, y0 < y1, z0 < z1 """ ndim = int(dims.shape[1]) corners_norm = np.stack( np.unravel_index(np.arange(2 ** ndim), [2] * ndim), axis=1 ).astype(dims.dtype) # now corners_norm has format: (2d) x0y0, x0y1, x1y0, x1y1 # (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1 # so need to convert to a format which is convenient to do other computing. # for 2d boxes, format is clockwise start with minimum point # for 3d boxes, please draw lines by your hand. if ndim == 2: # generate clockwise box corners corners_norm = corners_norm[[0, 1, 3, 2]] elif ndim == 3: corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]] corners_norm = corners_norm - np.array(origin, dtype=dims.dtype) corners = dims.reshape([-1, 1, ndim]) * corners_norm.reshape([1, 2 ** ndim, ndim]) return corners @numba.njit def corners_2d_jit(dims, origin=0.5): ndim = 2 corners_norm = np.array([[0, 0], [0, 1], [1, 1], [1, 0]], dtype=dims.dtype) corners_norm = corners_norm - np.array(origin, dtype=dims.dtype) corners = dims.reshape((-1, 1, ndim)) * corners_norm.reshape((1, 2 ** ndim, ndim)) return corners @numba.njit def corners_3d_jit(dims, origin=0.5): ndim = 3 corners_norm = np.array( [0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1], dtype=dims.dtype, ).reshape((8, 3)) corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]] corners_norm = corners_norm - np.array(origin, dtype=dims.dtype) corners = dims.reshape((-1, 1, ndim)) * corners_norm.reshape((1, 2 ** ndim, ndim)) return corners @numba.njit def corner_to_standup_nd_jit(boxes_corner): num_boxes = boxes_corner.shape[0] ndim = boxes_corner.shape[-1] result = np.zeros((num_boxes, ndim * 2), dtype=boxes_corner.dtype) for i in range(num_boxes): for j in range(ndim): result[i, j] = np.min(boxes_corner[i, :, j]) for j in range(ndim): result[i, j + ndim] = np.max(boxes_corner[i, :, j]) return result def corner_to_standup_nd(boxes_corner): assert len(boxes_corner.shape) == 3 standup_boxes = [] standup_boxes.append(np.min(boxes_corner, axis=1)) standup_boxes.append(np.max(boxes_corner, axis=1)) return np.concatenate(standup_boxes, -1) def rbbox2d_to_near_bbox(rbboxes): """convert rotated bbox to nearest 'standing' or 'lying' bbox. Args: rbboxes: [N, 5(x, y, xdim, ydim, rad)] rotated bboxes Returns: bboxes: [N, 4(xmin, ymin, xmax, ymax)] bboxes """ rots = rbboxes[..., -1] rots_0_pi_div_2 = np.abs(limit_period(rots, 0.5, np.pi)) cond = (rots_0_pi_div_2 > np.pi / 4)[..., np.newaxis] bboxes_center = np.where(cond, rbboxes[:, [0, 1, 3, 2]], rbboxes[:, :4]) bboxes = center_to_minmax_2d(bboxes_center[:, :2], bboxes_center[:, 2:]) return bboxes def rotation_3d_in_axis(points, angles, axis=0): # points: [N, point_size, 3] rot_sin = np.sin(angles) rot_cos = np.cos(angles) ones = np.ones_like(rot_cos) zeros = np.zeros_like(rot_cos) if axis == 1: rot_mat_T = np.stack( [ [rot_cos, zeros, -rot_sin], [zeros, ones, zeros], [rot_sin, zeros, rot_cos], ] ) elif axis == 2 or axis == -1: rot_mat_T = np.stack( [ [rot_cos, -rot_sin, zeros], [rot_sin, rot_cos, zeros], [zeros, zeros, ones], ] ) elif axis == 0: rot_mat_T = np.stack( [ [zeros, rot_cos, -rot_sin], [zeros, rot_sin, rot_cos], [ones, zeros, zeros], ] ) else: raise ValueError("axis should in range") return np.einsum("aij,jka->aik", points, rot_mat_T) def rotation_points_single_angle(points, angle, axis=0): # points: [N, 3] rot_sin = np.sin(angle) rot_cos = np.cos(angle) if axis == 1: rot_mat_T = np.array( [[rot_cos, 0, -rot_sin], [0, 1, 0], [rot_sin, 0, rot_cos]], dtype=points.dtype, ) elif axis == 2 or axis == -1: rot_mat_T = np.array( [[rot_cos, -rot_sin, 0], [rot_sin, rot_cos, 0], [0, 0, 1]], dtype=points.dtype, ) elif axis == 0: rot_mat_T = np.array( [[1, 0, 0], [0, rot_cos, -rot_sin], [0, rot_sin, rot_cos]], dtype=points.dtype, ) else: raise ValueError("axis should in range") return points @ rot_mat_T def rotation_2d(points, angles): """rotation 2d points based on origin point clockwise when angle positive. Args: points (float array, shape=[N, point_size, 2]): points to be rotated. angles (float array, shape=[N]): rotation angle. Returns: float array: same shape as points """ rot_sin = np.sin(angles) rot_cos = np.cos(angles) rot_mat_T = np.stack([[rot_cos, -rot_sin], [rot_sin, rot_cos]]) return np.einsum("aij,jka->aik", points, rot_mat_T) def rotation_box(box_corners, angle): """rotation 2d points based on origin point clockwise when angle positive. Args: points (float array, shape=[N, point_size, 2]): points to be rotated. angle (float): rotation angle. Returns: float array: same shape as points """ rot_sin = np.sin(angle) rot_cos = np.cos(angle) rot_mat_T = np.array( [[rot_cos, -rot_sin], [rot_sin, rot_cos]], dtype=box_corners.dtype ) return box_corners @ rot_mat_T def center_to_corner_box3d(centers, dims, angles=None, origin=(0.5, 0.5, 0.5), axis=2): """convert kitti locations, dimensions and angles to corners Args: centers (float array, shape=[N, 3]): locations in kitti label file. dims (float array, shape=[N, 3]): dimensions in kitti label file. angles (float array, shape=[N]): rotation_y in kitti label file. origin (list or array or float): origin point relate to smallest point. use [0.5, 1.0, 0.5] in camera and [0.5, 0.5, 0] in lidar. axis (int): rotation axis. 1 for camera and 2 for lidar. Returns: [type]: [description] """ # 'length' in kitti format is in x axis. # yzx(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar) # center in kitti format is [0.5, 1.0, 0.5] in xyz. corners = corners_nd(dims, origin=origin) # corners: [N, 8, 3] if angles is not None: corners = rotation_3d_in_axis(corners, angles, axis=axis) corners += centers.reshape([-1, 1, 3]) return corners def center_to_corner_box2d(centers, dims, angles=None, origin=0.5): """convert kitti locations, dimensions and angles to corners. format: center(xy), dims(xy), angles(clockwise when positive) Args: centers (float array, shape=[N, 2]): locations in kitti label file. dims (float array, shape=[N, 2]): dimensions in kitti label file. angles (float array, shape=[N]): rotation_y in kitti label file. Returns: [type]: [description] """ # 'length' in kitti format is in x axis. # xyz(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar) # center in kitti format is [0.5, 1.0, 0.5] in xyz. corners = corners_nd(dims, origin=origin) # corners: [N, 4, 2] if angles is not None: corners = rotation_2d(corners, angles) corners += centers.reshape([-1, 1, 2]) return corners @numba.jit(nopython=True) def box2d_to_corner_jit(boxes): num_box = boxes.shape[0] corners_norm = np.zeros((4, 2), dtype=boxes.dtype) corners_norm[1, 1] = 1.0 corners_norm[2] = 1.0 corners_norm[3, 0] = 1.0 corners_norm -= np.array([0.5, 0.5], dtype=boxes.dtype) corners = boxes.reshape(num_box, 1, 5)[:, :, 2:4] * corners_norm.reshape(1, 4, 2) rot_mat_T = np.zeros((2, 2), dtype=boxes.dtype) box_corners = np.zeros((num_box, 4, 2), dtype=boxes.dtype) for i in range(num_box): rot_sin = np.sin(boxes[i, -1]) rot_cos = np.cos(boxes[i, -1]) rot_mat_T[0, 0] = rot_cos rot_mat_T[0, 1] = -rot_sin rot_mat_T[1, 0] = rot_sin rot_mat_T[1, 1] = rot_cos box_corners[i] = corners[i] @ rot_mat_T + boxes[i, :2] return box_corners def rbbox3d_to_corners(rbboxes, origin=[0.5, 0.5, 0.5], axis=2): return center_to_corner_box3d( rbboxes[..., :3], rbboxes[..., 3:6], rbboxes[..., 6], origin, axis=axis ) def rbbox3d_to_bev_corners(rbboxes, origin=0.5): return center_to_corner_box2d( rbboxes[..., :2], rbboxes[..., 3:5], rbboxes[..., 6], origin ) def minmax_to_corner_2d(minmax_box): ndim = minmax_box.shape[-1] // 2 center = minmax_box[..., :ndim] dims = minmax_box[..., ndim:] - center return center_to_corner_box2d(center, dims, origin=0.0) def minmax_to_corner_2d_v2(minmax_box): # N, 4 -> N 4 2 return minmax_box[..., [0, 1, 0, 3, 2, 3, 2, 1]].reshape(-1, 4, 2) def minmax_to_corner_3d(minmax_box): ndim = minmax_box.shape[-1] // 2 center = minmax_box[..., :ndim] dims = minmax_box[..., ndim:] - center return center_to_corner_box3d(center, dims, origin=0.0) def minmax_to_center_2d(minmax_box): ndim = minmax_box.shape[-1] // 2 center_min = minmax_box[..., :ndim] dims = minmax_box[..., ndim:] - center_min center = center_min + 0.5 * dims return np.concatenate([center, dims], axis=-1) def center_to_minmax_2d_0_5(centers, dims): return np.concatenate([centers - dims / 2, centers + dims / 2], axis=-1) def center_to_minmax_2d(centers, dims, origin=0.5): if origin == 0.5: return center_to_minmax_2d_0_5(centers, dims) corners = center_to_corner_box2d(centers, dims, origin=origin) return corners[:, [0, 2]].reshape([-1, 4]) def limit_period(val, offset=0.5, period=np.pi): return val - np.floor(val / period + offset) * period def projection_matrix_to_CRT_kitti(proj): # P = C @ [R|T] # C is upper triangular matrix, so we need to inverse CR and use QR # stable for all kitti camera projection matrix CR = proj[0:3, 0:3] CT = proj[0:3, 3] RinvCinv = np.linalg.inv(CR) Rinv, Cinv = np.linalg.qr(RinvCinv) C = np.linalg.inv(Cinv) R = np.linalg.inv(Rinv) T = Cinv @ CT return C, R, T def get_frustum(bbox_image, C, near_clip=0.001, far_clip=100): fku = C[0, 0] fkv = -C[1, 1] u0v0 = C[0:2, 2] z_points = np.array([near_clip] * 4 + [far_clip] * 4, dtype=C.dtype)[:, np.newaxis] b = bbox_image box_corners = np.array( [[b[0], b[1]], [b[0], b[3]], [b[2], b[3]], [b[2], b[1]]], dtype=C.dtype ) near_box_corners = (box_corners - u0v0) / np.array( [fku / near_clip, -fkv / near_clip], dtype=C.dtype ) far_box_corners = (box_corners - u0v0) / np.array( [fku / far_clip, -fkv / far_clip], dtype=C.dtype ) ret_xy = np.concatenate([near_box_corners, far_box_corners], axis=0) # [8, 2] ret_xyz = np.concatenate([ret_xy, z_points], axis=1) return ret_xyz def get_frustum_v2(bboxes, C, near_clip=0.001, far_clip=100): fku = C[0, 0] fkv = -C[1, 1] u0v0 = C[0:2, 2] num_box = bboxes.shape[0] z_points = np.array([near_clip] * 4 + [far_clip] * 4, dtype=C.dtype)[ np.newaxis, :, np.newaxis ] z_points = np.tile(z_points, [num_box, 1, 1]) box_corners = minmax_to_corner_2d_v2(bboxes) near_box_corners = (box_corners - u0v0) / np.array( [fku / near_clip, -fkv / near_clip], dtype=C.dtype ) far_box_corners = (box_corners - u0v0) / np.array( [fku / far_clip, -fkv / far_clip], dtype=C.dtype ) ret_xy = np.concatenate([near_box_corners, far_box_corners], axis=1) # [8, 2] ret_xyz = np.concatenate([ret_xy, z_points], axis=-1) return ret_xyz @numba.njit def _add_rgb_to_points_kernel(points_2d, image, points_rgb): num_points = points_2d.shape[0] image_h, image_w = image.shape[:2] for i in range(num_points): img_pos = np.floor(points_2d[i]).astype(np.int32) if img_pos[0] >= 0 and img_pos[0] < image_w: if img_pos[1] >= 0 and img_pos[1] < image_h: points_rgb[i, :] = image[img_pos[1], img_pos[0], :] # image[img_pos[1], img_pos[0]] = 0 def add_rgb_to_points(points, image, rect, Trv2c, P2, mean_size=[5, 5]): kernel = np.ones(mean_size, np.float32) / np.prod(mean_size) # image = cv2.filter2D(image, -1, kernel) points_cam = lidar_to_camera(points[:, :3], rect, Trv2c) points_2d = project_to_image(points_cam, P2) points_rgb = np.zeros([points_cam.shape[0], 3], dtype=points.dtype) _add_rgb_to_points_kernel(points_2d, image, points_rgb) return points_rgb def project_to_image(points_3d, proj_mat): points_shape = list(points_3d.shape) points_shape[-1] = 1 points_4 = np.concatenate([points_3d, np.ones(points_shape)], axis=-1) point_2d = points_4 @ proj_mat.T point_2d_res = point_2d[..., :2] / point_2d[..., 2:3] return point_2d_res def camera_to_lidar(points, r_rect, velo2cam): points_shape = list(points.shape[0:-1]) if points.shape[-1] == 3: points = np.concatenate([points, np.ones(points_shape + [1])], axis=-1) lidar_points = points @ np.linalg.inv((r_rect @ velo2cam).T) return lidar_points[..., :3] def lidar_to_camera(points, r_rect, velo2cam): points_shape = list(points.shape[:-1]) if points.shape[-1] == 3: points = np.concatenate([points, np.ones(points_shape + [1])], axis=-1) camera_points = points @ (r_rect @ velo2cam).T return camera_points[..., :3] def box_camera_to_lidar(data, r_rect, velo2cam): xyz = data[:, 0:3] l, h, w = data[:, 3:4], data[:, 4:5], data[:, 5:6] r = data[:, 6:7] xyz_lidar = camera_to_lidar(xyz, r_rect, velo2cam) return np.concatenate([xyz_lidar, w, l, h, r], axis=1) def box_lidar_to_camera(data, r_rect, velo2cam): xyz_lidar = data[:, 0:3] w, l, h = data[:, 3:4], data[:, 4:5], data[:, 5:6] r = data[:, 6:7] xyz = lidar_to_camera(xyz_lidar, r_rect, velo2cam) return np.concatenate([xyz, l, h, w, r], axis=1) def remove_outside_points(points, rect, Trv2c, P2, image_shape): # 5x faster than remove_outside_points_v1(2ms vs 10ms) C, R, T = projection_matrix_to_CRT_kitti(P2) image_bbox = [0, 0, image_shape[1], image_shape[0]] frustum = get_frustum(image_bbox, C) frustum -= T frustum = np.linalg.inv(R) @ frustum.T frustum = camera_to_lidar(frustum.T, rect, Trv2c) frustum_surfaces = corner_to_surfaces_3d_jit(frustum[np.newaxis, ...]) indices = points_in_convex_polygon_3d_jit(points[:, :3], frustum_surfaces) points = points[indices.reshape([-1])] return points @numba.jit(nopython=True) def iou_jit(boxes, query_boxes, eps=1.0): """calculate box iou. note that jit version runs 2x faster than cython in my machine! Parameters ---------- boxes: (N, 4) ndarray of float query_boxes: (K, 4) ndarray of float Returns ------- overlaps: (N, K) ndarray of overlap between boxes and query_boxes """ N = boxes.shape[0] K = query_boxes.shape[0] overlaps = np.zeros((N, K), dtype=boxes.dtype) for k in range(K): box_area = (query_boxes[k, 2] - query_boxes[k, 0] + eps) * ( query_boxes[k, 3] - query_boxes[k, 1] + eps ) for n in range(N): iw = ( min(boxes[n, 2], query_boxes[k, 2]) - max(boxes[n, 0], query_boxes[k, 0]) + eps ) if iw > 0: ih = ( min(boxes[n, 3], query_boxes[k, 3]) - max(boxes[n, 1], query_boxes[k, 1]) + eps ) if ih > 0: ua = ( (boxes[n, 2] - boxes[n, 0] + eps) * (boxes[n, 3] - boxes[n, 1] + eps) + box_area - iw * ih ) overlaps[n, k] = iw * ih / ua return overlaps @numba.jit(nopython=True) def iou_3d_jit(boxes, query_boxes, add1=True): """calculate box iou3d, ---------- boxes: (N, 6) ndarray of float query_boxes: (K, 6) ndarray of float Returns ------- overlaps: (N, K) ndarray of overlap between boxes and query_boxes """ N = boxes.shape[0] K = query_boxes.shape[0] overlaps = np.zeros((N, K), dtype=boxes.dtype) if add1: add1 = 1.0 else: add1 = 0.0 for k in range(K): box_area = ( (query_boxes[k, 3] - query_boxes[k, 0] + add1) * (query_boxes[k, 4] - query_boxes[k, 1] + add1) * (query_boxes[k, 5] - query_boxes[k, 2] + add1) ) for n in range(N): iw = ( min(boxes[n, 3], query_boxes[k, 3]) - max(boxes[n, 0], query_boxes[k, 0]) + add1 ) if iw > 0: ih = ( min(boxes[n, 4], query_boxes[k, 4]) - max(boxes[n, 1], query_boxes[k, 1]) + add1 ) if ih > 0: il = ( min(boxes[n, 5], query_boxes[k, 5]) - max(boxes[n, 2], query_boxes[k, 2]) + add1 ) if il > 0: ua = float( (boxes[n, 3] - boxes[n, 0] + add1) * (boxes[n, 4] - boxes[n, 1] + add1) * (boxes[n, 5] - boxes[n, 2] + add1) + box_area - iw * ih * il ) overlaps[n, k] = iw * ih * il / ua return overlaps @numba.jit(nopython=True) def iou_nd_jit(boxes, query_boxes, add1=True): """calculate box iou nd, 2x slower than iou_jit. ---------- boxes: (N, ndim * 2) ndarray of float query_boxes: (K, ndim * 2) ndarray of float Returns ------- overlaps: (N, K) ndarray of overlap between boxes and query_boxes """ N = boxes.shape[0] K = query_boxes.shape[0] ndim = boxes.shape[1] // 2 overlaps = np.zeros((N, K), dtype=boxes.dtype) side_lengths = np.zeros((ndim,), dtype=boxes.dtype) if add1: add1 = 1.0 else: add1 = 0.0 invalid = False for k in range(K): qbox_area = query_boxes[k, ndim] - query_boxes[k, 0] + add1 for i in range(1, ndim): qbox_area *= query_boxes[k, ndim + i] - query_boxes[k, i] + add1 for n in range(N): invalid = False for i in range(ndim): side_length = ( min(boxes[n, i + ndim], query_boxes[k, i + ndim]) - max(boxes[n, i], query_boxes[k, i]) + add1 ) if side_length <= 0: invalid = True break side_lengths[i] = side_length if not invalid: box_area = boxes[n, ndim] - boxes[n, 0] + add1 for i in range(1, ndim): box_area *= boxes[n, ndim + i] - boxes[n, i] + add1 inter = side_lengths[0] for i in range(1, ndim): inter *= side_lengths[i] # inter = np.prod(side_lengths) ua = float(box_area + qbox_area - inter) overlaps[n, k] = inter / ua return overlaps def points_in_rbbox(points, rbbox, z_axis=2, origin=(0.5, 0.5, 0.5)): rbbox_corners = center_to_corner_box3d( rbbox[:, :3], rbbox[:, 3:6], rbbox[:, -1], origin=origin, axis=z_axis ) surfaces = corner_to_surfaces_3d(rbbox_corners) indices = points_in_convex_polygon_3d_jit(points[:, :3], surfaces) return indices def corner_to_surfaces_3d(corners): """convert 3d box corners from corner function above to surfaces that normal vectors all direct to internal. Args: corners (float array, [N, 8, 3]): 3d box corners. Returns: surfaces (float array, [N, 6, 4, 3]): """ # box_corners: [N, 8, 3], must from corner functions in this module surfaces = np.array( [ [corners[:, 0], corners[:, 1], corners[:, 2], corners[:, 3]], [corners[:, 7], corners[:, 6], corners[:, 5], corners[:, 4]], [corners[:, 0], corners[:, 3], corners[:, 7], corners[:, 4]], [corners[:, 1], corners[:, 5], corners[:, 6], corners[:, 2]], [corners[:, 0], corners[:, 4], corners[:, 5], corners[:, 1]], [corners[:, 3], corners[:, 2], corners[:, 6], corners[:, 7]], ] ).transpose([2, 0, 1, 3]) return surfaces @numba.jit(nopython=True) def corner_to_surfaces_3d_jit(corners): """convert 3d box corners from corner function above to surfaces that normal vectors all direct to internal. Args: corners (float array, [N, 8, 3]): 3d box corners. Returns: surfaces (float array, [N, 6, 4, 3]): """ # box_corners: [N, 8, 3], must from corner functions in this module num_boxes = corners.shape[0] surfaces = np.zeros((num_boxes, 6, 4, 3), dtype=corners.dtype) corner_idxes = np.array( [0, 1, 2, 3, 7, 6, 5, 4, 0, 3, 7, 4, 1, 5, 6, 2, 0, 4, 5, 1, 3, 2, 6, 7] ).reshape(6, 4) for i in range(num_boxes): for j in range(6): for k in range(4): surfaces[i, j, k] = corners[i, corner_idxes[j, k]] return surfaces def assign_label_to_voxel(gt_boxes, coors, voxel_size, coors_range): """assign a 0/1 label to each voxel based on whether the center of voxel is in gt_box. LIDAR. """ voxel_size = np.array(voxel_size, dtype=gt_boxes.dtype) coors_range = np.array(coors_range, dtype=gt_boxes.dtype) shift = coors_range[:3] voxel_origins = coors[:, ::-1] * voxel_size + shift voxel_centers = voxel_origins + voxel_size * 0.5 gt_box_corners = center_to_corner_box3d( gt_boxes[:, :3] - voxel_size * 0.5, gt_boxes[:, 3:6] + voxel_size, gt_boxes[:, 6], origin=[0.5, 0.5, 0.5], axis=2, ) gt_surfaces = corner_to_surfaces_3d(gt_box_corners) ret = points_in_convex_polygon_3d_jit(voxel_centers, gt_surfaces) return np.any(ret, axis=1).astype(np.int64) def assign_label_to_voxel_v3(gt_boxes, coors, voxel_size, coors_range): """assign a 0/1 label to each voxel based on whether the center of voxel is in gt_box. LIDAR. """ voxel_size = np.array(voxel_size, dtype=gt_boxes.dtype) coors_range = np.array(coors_range, dtype=gt_boxes.dtype) shift = coors_range[:3] voxel_origins = coors[:, ::-1] * voxel_size + shift voxel_maxes = voxel_origins + voxel_size voxel_minmax = np.concatenate([voxel_origins, voxel_maxes], axis=-1) voxel_corners = minmax_to_corner_3d(voxel_minmax) gt_box_corners = center_to_corner_box3d( gt_boxes[:, :3], gt_boxes[:, 3:6], gt_boxes[:, 6], origin=[0.5, 0.5, 0.5], axis=2, ) gt_surfaces = corner_to_surfaces_3d(gt_box_corners) voxel_corners_flat = voxel_corners.reshape([-1, 3]) ret = points_in_convex_polygon_3d_jit(voxel_corners_flat, gt_surfaces) ret = ret.reshape([-1, 8, ret.shape[-1]]) return ret.any(-1).any(-1).astype(np.int64) def image_box_region_area(img_cumsum, bbox): """check a 2d voxel is contained by a box. used to filter empty anchors. Summed-area table algorithm: ==> W ------------------ | | | |------A---------B | | | | | | |----- C---------D Iabcd = ID-IB-IC+IA Args: img_cumsum: [M, H, W](yx) cumsumed image. bbox: [N, 4](xyxy) bounding box, """ N = bbox.shape[0] M = img_cumsum.shape[0] ret = np.zeros([N, M], dtype=img_cumsum.dtype) ID = img_cumsum[:, bbox[:, 3], bbox[:, 2]] IA = img_cumsum[:, bbox[:, 1], bbox[:, 0]] IB = img_cumsum[:, bbox[:, 3], bbox[:, 0]] IC = img_cumsum[:, bbox[:, 1], bbox[:, 2]] ret = ID - IB - IC + IA return ret def get_minimum_bounding_box_bv(points, voxel_size, bound, downsample=8, margin=1.6): x_vsize = voxel_size[0] y_vsize = voxel_size[1] max_x = points[:, 0].max() max_y = points[:, 1].max() min_x = points[:, 0].min() min_y = points[:, 1].min() max_x = np.floor(max_x / (x_vsize * downsample) + 1) * (x_vsize * downsample) max_y = np.floor(max_y / (y_vsize * downsample) + 1) * (y_vsize * downsample) min_x = np.floor(min_x / (x_vsize * downsample)) * (x_vsize * downsample) min_y = np.floor(min_y / (y_vsize * downsample)) * (y_vsize * downsample) max_x = np.minimum(max_x + margin, bound[2]) max_y = np.minimum(max_y + margin, bound[3]) min_x = np.maximum(min_x - margin, bound[0]) min_y = np.maximum(min_y - margin, bound[1]) return np.array([min_x, min_y, max_x, max_y]) def box3d_to_bbox(box3d, rect, Trv2c, P2): box3d_to_cam = box_lidar_to_camera(box3d, rect, Trv2c) box_corners = center_to_corner_box3d( box3d[:, :3], box3d[:, 3:6], box3d[:, 6], [0.5, 1.0, 0.5], axis=1 ) box_corners_in_image = project_to_image(box_corners, P2) # box_corners_in_image: [N, 8, 2] minxy = np.min(box_corners_in_image, axis=1) maxxy = np.max(box_corners_in_image, axis=1) bbox = np.concatenate([minxy, maxxy], axis=1) return bbox def change_box3d_center_(box3d, src, dst): dst = np.array(dst, dtype=box3d.dtype) src = np.array(src, dtype=box3d.dtype) box3d[..., :3] += box3d[..., 3:6] * (dst - src) def encode_parts(relative_shifts): parts = np.zeros((len(relative_shifts),), dtype=np.int32) mask = (relative_shifts[:, 0] >= 0) & (relative_shifts[:, 1] >= 0) parts[mask] = 0 mask = (relative_shifts[:, 0] < 0) & (relative_shifts[:, 1] >= 0) parts[mask] = 1 mask = (relative_shifts[:, 0] < 0) & (relative_shifts[:, 1] < 0) parts[mask] = 2 mask = (relative_shifts[:, 0] >= 0) & (relative_shifts[:, 1] < 0) parts[mask] = 3 return parts
parse_command_args
This parses the arguments and returns a tuple containing: (args, command, command_args) For example, "--config=bar start --with=baz" would return: (['--config=bar'], 'start', ['--with=baz'])
""" logan.runner ~~~~~~~~~~~~ :copyright: (c) 2012 David Cramer. :license: Apache License 2.0, see NOTICE for more details. """ import argparse import os import re import sys from django.core import management from nautobot import __version__ from . import importer from .settings import create_default_settings __configured = False def sanitize_name(project): project = project.replace(" ", "-") return re.sub("[^A-Z0-9a-z_-]", "-", project) # MASKED: parse_command_args function (lines 29-49) def is_configured(): global __configured return __configured def configure_app( config_path=None, project=None, default_config_path=None, default_settings=None, settings_initializer=None, settings_envvar=None, initializer=None, allow_extras=True, config_module_name=None, runner_name=None, on_configure=None, ): """ :param project: should represent the canonical name for the project, generally the same name it assigned in distutils. :param default_config_path: the default location for the configuration file. :param default_settings: default settings to load (think inheritence). :param settings_initializer: a callback function which should return a string representing the default settings template to generate. :param initializer: a callback function which will be executed before the command is executed. It is passed a dictionary of various configuration attributes. """ global __configured project_filename = sanitize_name(project) if default_config_path is None: default_config_path = "~/%s/%s.conf.py" % (project_filename, project_filename) if settings_envvar is None: settings_envvar = project_filename.upper() + "_CONF" if config_module_name is None: config_module_name = project_filename + "_config" # normalize path if settings_envvar in os.environ: default_config_path = os.environ.get(settings_envvar) else: default_config_path = os.path.normpath(os.path.abspath(os.path.expanduser(default_config_path))) if not config_path: config_path = default_config_path config_path = os.path.expanduser(config_path) if not os.path.exists(config_path): if runner_name: raise ValueError( "Configuration file does not exist. Use '%s init' to initialize the file." % (runner_name,) ) raise ValueError("Configuration file does not exist at %r" % (config_path,)) os.environ["DJANGO_SETTINGS_MODULE"] = config_module_name def settings_callback(settings): if initializer is None: return try: initializer( { "project": project, "config_path": config_path, "settings": settings, } ) except Exception: # XXX: Django doesn't like various errors in this path import sys import traceback traceback.print_exc() sys.exit(1) importer.install( config_module_name, config_path, default_settings, allow_extras=allow_extras, callback=settings_callback, ) __configured = True # HACK(dcramer): we need to force access of django.conf.settings to # ensure we don't hit any import-driven recursive behavior from django.conf import settings hasattr(settings, "INSTALLED_APPS") if on_configure: on_configure( { "project": project, "config_path": config_path, "settings": settings, } ) class VerboseHelpFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter): """Argparse Formatter that includes newlines and shows argument defaults.""" def run_app(**kwargs): sys_args = sys.argv # The established command for running this program runner_name = os.path.basename(sys_args[0]) default_config_path = kwargs.get("default_config_path") # Primary parser parser = management.CommandParser( description=kwargs.pop("description"), formatter_class=VerboseHelpFormatter, add_help=False, ) parser.add_argument( "-c", "--config", metavar="CONFIG", help="Path to the configuration file", ) parser.add_argument( "--version", action="version", version=__version__, ) # This block of code here is done in this way because of the built in Django # management command parsing not playing well unless you have a Django # config with SECRET_KEY defined. # Parse out `--config` here first capturing any unparsed args for passing to # Django parser. args, unparsed_args = parser.parse_known_args() # Now add the sub-parser for "init" command subparsers = parser.add_subparsers(help=False, dest="command", metavar="") init_parser = subparsers.add_parser( "init", help="Initialize a new configuration", ) init_parser.add_argument( "config_path", default=default_config_path, nargs="?", help="Path to output generated configuration file", ) # Try to use our parser first, to process custom arguments try: args = parser.parse_args() command = args.command command_args = sys.argv[1:] # Fallback to passing through to Django management commands # except RuntimeError as err: except management.CommandError as err: if "invalid choice" not in str(err): raise # Rewrite sys_args to have the unparsed args (if any) sys_args = sys_args[:1] + unparsed_args _, command, command_args = parse_command_args(sys_args[1:]) # If we don't get a command of some sort, print help and exit dirty if not command: parser.print_help() parser.exit(1) # The `init` command is reserved for initializing configuration if command == "init": settings_initializer = kwargs.get("settings_initializer") config_path = os.path.expanduser(args.config_path) # Check if the config already exists; alert user and exit if exists. if os.path.exists(config_path): print( f"A configuration already exists at {config_path}. Please backup and remove it or choose another path." ) return # Create the config try: create_default_settings(config_path, settings_initializer) except OSError as e: raise e.__class__("Unable to write default settings file to %r" % config_path) print("Configuration file created at %r" % config_path) return # Fetch config path from `--config` if provided, otherwise we want it to # default to None so that the underlying machinery in `configure_app` will # process default path or environment variable. config_path = args.config # Overlay our config w/ defautls try: configure_app(config_path=config_path, **kwargs) except ValueError as err: parser.exit(status=2, message=str(err) + "\n") # Call Django management command management.execute_from_command_line([runner_name, command] + command_args) # Exit cleanly sys.exit(0) if __name__ == "__main__": run_app()
def parse_command_args(args): """ This parses the arguments and returns a tuple containing: (args, command, command_args) For example, "--config=bar start --with=baz" would return: (['--config=bar'], 'start', ['--with=baz']) """ index = None for arg_i, arg in enumerate(args): if not arg.startswith("-"): index = arg_i break # Unable to parse any arguments if index is None: return (args, None, []) return (args[:index], args[index], args[(index + 1) :])
29
49
""" logan.runner ~~~~~~~~~~~~ :copyright: (c) 2012 David Cramer. :license: Apache License 2.0, see NOTICE for more details. """ import argparse import os import re import sys from django.core import management from nautobot import __version__ from . import importer from .settings import create_default_settings __configured = False def sanitize_name(project): project = project.replace(" ", "-") return re.sub("[^A-Z0-9a-z_-]", "-", project) def parse_command_args(args): """ This parses the arguments and returns a tuple containing: (args, command, command_args) For example, "--config=bar start --with=baz" would return: (['--config=bar'], 'start', ['--with=baz']) """ index = None for arg_i, arg in enumerate(args): if not arg.startswith("-"): index = arg_i break # Unable to parse any arguments if index is None: return (args, None, []) return (args[:index], args[index], args[(index + 1) :]) def is_configured(): global __configured return __configured def configure_app( config_path=None, project=None, default_config_path=None, default_settings=None, settings_initializer=None, settings_envvar=None, initializer=None, allow_extras=True, config_module_name=None, runner_name=None, on_configure=None, ): """ :param project: should represent the canonical name for the project, generally the same name it assigned in distutils. :param default_config_path: the default location for the configuration file. :param default_settings: default settings to load (think inheritence). :param settings_initializer: a callback function which should return a string representing the default settings template to generate. :param initializer: a callback function which will be executed before the command is executed. It is passed a dictionary of various configuration attributes. """ global __configured project_filename = sanitize_name(project) if default_config_path is None: default_config_path = "~/%s/%s.conf.py" % (project_filename, project_filename) if settings_envvar is None: settings_envvar = project_filename.upper() + "_CONF" if config_module_name is None: config_module_name = project_filename + "_config" # normalize path if settings_envvar in os.environ: default_config_path = os.environ.get(settings_envvar) else: default_config_path = os.path.normpath(os.path.abspath(os.path.expanduser(default_config_path))) if not config_path: config_path = default_config_path config_path = os.path.expanduser(config_path) if not os.path.exists(config_path): if runner_name: raise ValueError( "Configuration file does not exist. Use '%s init' to initialize the file." % (runner_name,) ) raise ValueError("Configuration file does not exist at %r" % (config_path,)) os.environ["DJANGO_SETTINGS_MODULE"] = config_module_name def settings_callback(settings): if initializer is None: return try: initializer( { "project": project, "config_path": config_path, "settings": settings, } ) except Exception: # XXX: Django doesn't like various errors in this path import sys import traceback traceback.print_exc() sys.exit(1) importer.install( config_module_name, config_path, default_settings, allow_extras=allow_extras, callback=settings_callback, ) __configured = True # HACK(dcramer): we need to force access of django.conf.settings to # ensure we don't hit any import-driven recursive behavior from django.conf import settings hasattr(settings, "INSTALLED_APPS") if on_configure: on_configure( { "project": project, "config_path": config_path, "settings": settings, } ) class VerboseHelpFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter): """Argparse Formatter that includes newlines and shows argument defaults.""" def run_app(**kwargs): sys_args = sys.argv # The established command for running this program runner_name = os.path.basename(sys_args[0]) default_config_path = kwargs.get("default_config_path") # Primary parser parser = management.CommandParser( description=kwargs.pop("description"), formatter_class=VerboseHelpFormatter, add_help=False, ) parser.add_argument( "-c", "--config", metavar="CONFIG", help="Path to the configuration file", ) parser.add_argument( "--version", action="version", version=__version__, ) # This block of code here is done in this way because of the built in Django # management command parsing not playing well unless you have a Django # config with SECRET_KEY defined. # Parse out `--config` here first capturing any unparsed args for passing to # Django parser. args, unparsed_args = parser.parse_known_args() # Now add the sub-parser for "init" command subparsers = parser.add_subparsers(help=False, dest="command", metavar="") init_parser = subparsers.add_parser( "init", help="Initialize a new configuration", ) init_parser.add_argument( "config_path", default=default_config_path, nargs="?", help="Path to output generated configuration file", ) # Try to use our parser first, to process custom arguments try: args = parser.parse_args() command = args.command command_args = sys.argv[1:] # Fallback to passing through to Django management commands # except RuntimeError as err: except management.CommandError as err: if "invalid choice" not in str(err): raise # Rewrite sys_args to have the unparsed args (if any) sys_args = sys_args[:1] + unparsed_args _, command, command_args = parse_command_args(sys_args[1:]) # If we don't get a command of some sort, print help and exit dirty if not command: parser.print_help() parser.exit(1) # The `init` command is reserved for initializing configuration if command == "init": settings_initializer = kwargs.get("settings_initializer") config_path = os.path.expanduser(args.config_path) # Check if the config already exists; alert user and exit if exists. if os.path.exists(config_path): print( f"A configuration already exists at {config_path}. Please backup and remove it or choose another path." ) return # Create the config try: create_default_settings(config_path, settings_initializer) except OSError as e: raise e.__class__("Unable to write default settings file to %r" % config_path) print("Configuration file created at %r" % config_path) return # Fetch config path from `--config` if provided, otherwise we want it to # default to None so that the underlying machinery in `configure_app` will # process default path or environment variable. config_path = args.config # Overlay our config w/ defautls try: configure_app(config_path=config_path, **kwargs) except ValueError as err: parser.exit(status=2, message=str(err) + "\n") # Call Django management command management.execute_from_command_line([runner_name, command] + command_args) # Exit cleanly sys.exit(0) if __name__ == "__main__": run_app()