_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q265900
chain
test
def chain(*tasks): """ Given a number of tasks, builds a dependency chain. chain(task_1, task_2, task_3, task_4) is equivalent to task_1.set_downstream(task_2) task_2.set_downstream(task_3) task_3.set_downstream(task_4) """ for up_task, down_task in zip(tasks[:-1], tasks[1:]): up_task.set_downstream(down_task)
python
{ "resource": "" }
q265901
pprinttable
test
def pprinttable(rows): """Returns a pretty ascii table from tuples If namedtuple are used, the table will have headers """ if not rows: return if hasattr(rows[0], '_fields'): # if namedtuple headers = rows[0]._fields else: headers = ["col{}".format(i) for i in range(len(rows[0]))] lens = [len(s) for s in headers] for row in rows: for i in range(len(rows[0])): slenght = len("{}".format(row[i])) if slenght > lens[i]: lens[i] = slenght formats = [] hformats = [] for i in range(len(rows[0])): if isinstance(rows[0][i], int): formats.append("%%%dd" % lens[i]) else: formats.append("%%-%ds" % lens[i]) hformats.append("%%-%ds" % lens[i]) pattern = " | ".join(formats) hpattern = " | ".join(hformats) separator = "-+-".join(['-' * n for n in lens]) s = "" s += separator + '\n' s += (hpattern % tuple(headers)) + '\n' s += separator + '\n' def f(t): return "{}".format(t) if isinstance(t, basestring) else t for line in rows: s += pattern % tuple(f(t) for t in line) + '\n' s += separator + '\n' return s
python
{ "resource": "" }
q265902
render_log_filename
test
def render_log_filename(ti, try_number, filename_template): """ Given task instance, try_number, filename_template, return the rendered log filename :param ti: task instance :param try_number: try_number of the task :param filename_template: filename template, which can be jinja template or python string template """ filename_template, filename_jinja_template = parse_template_string(filename_template) if filename_jinja_template: jinja_context = ti.get_template_context() jinja_context['try_number'] = try_number return filename_jinja_template.render(**jinja_context) return filename_template.format(dag_id=ti.dag_id, task_id=ti.task_id, execution_date=ti.execution_date.isoformat(), try_number=try_number)
python
{ "resource": "" }
q265903
DataProcHook.get_conn
test
def get_conn(self): """Returns a Google Cloud Dataproc service object.""" http_authorized = self._authorize() return build( 'dataproc', self.api_version, http=http_authorized, cache_discovery=False)
python
{ "resource": "" }
q265904
DataProcHook.wait
test
def wait(self, operation): """Awaits for Google Cloud Dataproc Operation to complete.""" submitted = _DataProcOperation(self.get_conn(), operation, self.num_retries) submitted.wait_for_done()
python
{ "resource": "" }
q265905
_deep_string_coerce
test
def _deep_string_coerce(content, json_path='json'): """ Coerces content or all values of content if it is a dict to a string. The function will throw if content contains non-string or non-numeric types. The reason why we have this function is because the ``self.json`` field must be a dict with only string values. This is because ``render_template`` will fail for numerical values. """ c = _deep_string_coerce if isinstance(content, six.string_types): return content elif isinstance(content, six.integer_types + (float,)): # Databricks can tolerate either numeric or string types in the API backend. return str(content) elif isinstance(content, (list, tuple)): return [c(e, '{0}[{1}]'.format(json_path, i)) for i, e in enumerate(content)] elif isinstance(content, dict): return {k: c(v, '{0}[{1}]'.format(json_path, k)) for k, v in list(content.items())} else: param_type = type(content) msg = 'Type {0} used for parameter {1} is not a number or a string' \ .format(param_type, json_path) raise AirflowException(msg)
python
{ "resource": "" }
q265906
_handle_databricks_operator_execution
test
def _handle_databricks_operator_execution(operator, hook, log, context): """ Handles the Airflow + Databricks lifecycle logic for a Databricks operator :param operator: Databricks operator being handled :param context: Airflow context """ if operator.do_xcom_push: context['ti'].xcom_push(key=XCOM_RUN_ID_KEY, value=operator.run_id) log.info('Run submitted with run_id: %s', operator.run_id) run_page_url = hook.get_run_page_url(operator.run_id) if operator.do_xcom_push: context['ti'].xcom_push(key=XCOM_RUN_PAGE_URL_KEY, value=run_page_url) log.info('View run status, Spark UI, and logs at %s', run_page_url) while True: run_state = hook.get_run_state(operator.run_id) if run_state.is_terminal: if run_state.is_successful: log.info('%s completed successfully.', operator.task_id) log.info('View run status, Spark UI, and logs at %s', run_page_url) return else: error_message = '{t} failed with terminal state: {s}'.format( t=operator.task_id, s=run_state) raise AirflowException(error_message) else: log.info('%s in run state: %s', operator.task_id, run_state) log.info('View run status, Spark UI, and logs at %s', run_page_url) log.info('Sleeping for %s seconds.', operator.polling_period_seconds) time.sleep(operator.polling_period_seconds)
python
{ "resource": "" }
q265907
PigCliHook.run_cli
test
def run_cli(self, pig, verbose=True): """ Run an pig script using the pig cli >>> ph = PigCliHook() >>> result = ph.run_cli("ls /;") >>> ("hdfs://" in result) True """ with TemporaryDirectory(prefix='airflow_pigop_') as tmp_dir: with NamedTemporaryFile(dir=tmp_dir) as f: f.write(pig.encode('utf-8')) f.flush() fname = f.name pig_bin = 'pig' cmd_extra = [] pig_cmd = [pig_bin, '-f', fname] + cmd_extra if self.pig_properties: pig_properties_list = self.pig_properties.split() pig_cmd.extend(pig_properties_list) if verbose: self.log.info("%s", " ".join(pig_cmd)) sp = subprocess.Popen( pig_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=tmp_dir, close_fds=True) self.sp = sp stdout = '' for line in iter(sp.stdout.readline, b''): stdout += line.decode('utf-8') if verbose: self.log.info(line.strip()) sp.wait() if sp.returncode: raise AirflowException(stdout) return stdout
python
{ "resource": "" }
q265908
fetch_celery_task_state
test
def fetch_celery_task_state(celery_task): """ Fetch and return the state of the given celery task. The scope of this function is global so that it can be called by subprocesses in the pool. :param celery_task: a tuple of the Celery task key and the async Celery object used to fetch the task's state :type celery_task: tuple(str, celery.result.AsyncResult) :return: a tuple of the Celery task key and the Celery state of the task :rtype: tuple[str, str] """ try: with timeout(seconds=2): # Accessing state property of celery task will make actual network request # to get the current state of the task. res = (celery_task[0], celery_task[1].state) except Exception as e: exception_traceback = "Celery Task ID: {}\n{}".format(celery_task[0], traceback.format_exc()) res = ExceptionWithTraceback(e, exception_traceback) return res
python
{ "resource": "" }
q265909
CeleryExecutor._num_tasks_per_send_process
test
def _num_tasks_per_send_process(self, to_send_count): """ How many Celery tasks should each worker process send. :return: Number of tasks that should be sent per process :rtype: int """ return max(1, int(math.ceil(1.0 * to_send_count / self._sync_parallelism)))
python
{ "resource": "" }
q265910
CeleryExecutor._num_tasks_per_fetch_process
test
def _num_tasks_per_fetch_process(self): """ How many Celery tasks should be sent to each worker process. :return: Number of tasks that should be used per process :rtype: int """ return max(1, int(math.ceil(1.0 * len(self.tasks) / self._sync_parallelism)))
python
{ "resource": "" }
q265911
Variable.setdefault
test
def setdefault(cls, key, default, deserialize_json=False): """ Like a Python builtin dict object, setdefault returns the current value for a key, and if it isn't there, stores the default value and returns it. :param key: Dict key for this Variable :type key: str :param default: Default value to set and return if the variable isn't already in the DB :type default: Mixed :param deserialize_json: Store this as a JSON encoded value in the DB and un-encode it when retrieving a value :return: Mixed """ obj = Variable.get(key, default_var=None, deserialize_json=deserialize_json) if obj is None: if default is not None: Variable.set(key, default, serialize_json=deserialize_json) return default else: raise ValueError('Default Value must be set') else: return obj
python
{ "resource": "" }
q265912
MLEngineHook.get_conn
test
def get_conn(self): """ Returns a Google MLEngine service object. """ authed_http = self._authorize() return build('ml', 'v1', http=authed_http, cache_discovery=False)
python
{ "resource": "" }
q265913
MLEngineHook.create_job
test
def create_job(self, project_id, job, use_existing_job_fn=None): """ Launches a MLEngine job and wait for it to reach a terminal state. :param project_id: The Google Cloud project id within which MLEngine job will be launched. :type project_id: str :param job: MLEngine Job object that should be provided to the MLEngine API, such as: :: { 'jobId': 'my_job_id', 'trainingInput': { 'scaleTier': 'STANDARD_1', ... } } :type job: dict :param use_existing_job_fn: In case that a MLEngine job with the same job_id already exist, this method (if provided) will decide whether we should use this existing job, continue waiting for it to finish and returning the job object. It should accepts a MLEngine job object, and returns a boolean value indicating whether it is OK to reuse the existing job. If 'use_existing_job_fn' is not provided, we by default reuse the existing MLEngine job. :type use_existing_job_fn: function :return: The MLEngine job object if the job successfully reach a terminal state (which might be FAILED or CANCELLED state). :rtype: dict """ request = self._mlengine.projects().jobs().create( parent='projects/{}'.format(project_id), body=job) job_id = job['jobId'] try: request.execute() except HttpError as e: # 409 means there is an existing job with the same job ID. if e.resp.status == 409: if use_existing_job_fn is not None: existing_job = self._get_job(project_id, job_id) if not use_existing_job_fn(existing_job): self.log.error( 'Job with job_id %s already exist, but it does ' 'not match our expectation: %s', job_id, existing_job ) raise self.log.info( 'Job with job_id %s already exist. Will waiting for it to finish', job_id ) else: self.log.error('Failed to create MLEngine job: {}'.format(e)) raise return self._wait_for_job_done(project_id, job_id)
python
{ "resource": "" }
q265914
MLEngineHook._get_job
test
def _get_job(self, project_id, job_id): """ Gets a MLEngine job based on the job name. :return: MLEngine job object if succeed. :rtype: dict Raises: googleapiclient.errors.HttpError: if HTTP error is returned from server """ job_name = 'projects/{}/jobs/{}'.format(project_id, job_id) request = self._mlengine.projects().jobs().get(name=job_name) while True: try: return request.execute() except HttpError as e: if e.resp.status == 429: # polling after 30 seconds when quota failure occurs time.sleep(30) else: self.log.error('Failed to get MLEngine job: {}'.format(e)) raise
python
{ "resource": "" }
q265915
MLEngineHook._wait_for_job_done
test
def _wait_for_job_done(self, project_id, job_id, interval=30): """ Waits for the Job to reach a terminal state. This method will periodically check the job state until the job reach a terminal state. Raises: googleapiclient.errors.HttpError: if HTTP error is returned when getting the job """ if interval <= 0: raise ValueError("Interval must be > 0") while True: job = self._get_job(project_id, job_id) if job['state'] in ['SUCCEEDED', 'FAILED', 'CANCELLED']: return job time.sleep(interval)
python
{ "resource": "" }
q265916
MLEngineHook.create_version
test
def create_version(self, project_id, model_name, version_spec): """ Creates the Version on Google Cloud ML Engine. Returns the operation if the version was created successfully and raises an error otherwise. """ parent_name = 'projects/{}/models/{}'.format(project_id, model_name) create_request = self._mlengine.projects().models().versions().create( parent=parent_name, body=version_spec) response = create_request.execute() get_request = self._mlengine.projects().operations().get( name=response['name']) return _poll_with_exponential_delay( request=get_request, max_n=9, is_done_func=lambda resp: resp.get('done', False), is_error_func=lambda resp: resp.get('error', None) is not None)
python
{ "resource": "" }
q265917
MLEngineHook.set_default_version
test
def set_default_version(self, project_id, model_name, version_name): """ Sets a version to be the default. Blocks until finished. """ full_version_name = 'projects/{}/models/{}/versions/{}'.format( project_id, model_name, version_name) request = self._mlengine.projects().models().versions().setDefault( name=full_version_name, body={}) try: response = request.execute() self.log.info('Successfully set version: %s to default', response) return response except HttpError as e: self.log.error('Something went wrong: %s', e) raise
python
{ "resource": "" }
q265918
MLEngineHook.list_versions
test
def list_versions(self, project_id, model_name): """ Lists all available versions of a model. Blocks until finished. """ result = [] full_parent_name = 'projects/{}/models/{}'.format( project_id, model_name) request = self._mlengine.projects().models().versions().list( parent=full_parent_name, pageSize=100) response = request.execute() next_page_token = response.get('nextPageToken', None) result.extend(response.get('versions', [])) while next_page_token is not None: next_request = self._mlengine.projects().models().versions().list( parent=full_parent_name, pageToken=next_page_token, pageSize=100) response = next_request.execute() next_page_token = response.get('nextPageToken', None) result.extend(response.get('versions', [])) time.sleep(5) return result
python
{ "resource": "" }
q265919
MLEngineHook.delete_version
test
def delete_version(self, project_id, model_name, version_name): """ Deletes the given version of a model. Blocks until finished. """ full_name = 'projects/{}/models/{}/versions/{}'.format( project_id, model_name, version_name) delete_request = self._mlengine.projects().models().versions().delete( name=full_name) response = delete_request.execute() get_request = self._mlengine.projects().operations().get( name=response['name']) return _poll_with_exponential_delay( request=get_request, max_n=9, is_done_func=lambda resp: resp.get('done', False), is_error_func=lambda resp: resp.get('error', None) is not None)
python
{ "resource": "" }
q265920
MLEngineHook.create_model
test
def create_model(self, project_id, model): """ Create a Model. Blocks until finished. """ if not model['name']: raise ValueError("Model name must be provided and " "could not be an empty string") project = 'projects/{}'.format(project_id) request = self._mlengine.projects().models().create( parent=project, body=model) return request.execute()
python
{ "resource": "" }
q265921
MLEngineHook.get_model
test
def get_model(self, project_id, model_name): """ Gets a Model. Blocks until finished. """ if not model_name: raise ValueError("Model name must be provided and " "it could not be an empty string") full_model_name = 'projects/{}/models/{}'.format( project_id, model_name) request = self._mlengine.projects().models().get(name=full_model_name) try: return request.execute() except HttpError as e: if e.resp.status == 404: self.log.error('Model was not found: %s', e) return None raise
python
{ "resource": "" }
q265922
AwsDynamoDBHook.write_batch_data
test
def write_batch_data(self, items): """ Write batch items to dynamodb table with provisioned throughout capacity. """ dynamodb_conn = self.get_conn() try: table = dynamodb_conn.Table(self.table_name) with table.batch_writer(overwrite_by_pkeys=self.table_keys) as batch: for item in items: batch.put_item(Item=item) return True except Exception as general_error: raise AirflowException( 'Failed to insert items in dynamodb, error: {error}'.format( error=str(general_error) ) )
python
{ "resource": "" }
q265923
_integrate_plugins
test
def _integrate_plugins(): """Integrate plugins to the context.""" from airflow.plugins_manager import executors_modules for executors_module in executors_modules: sys.modules[executors_module.__name__] = executors_module globals()[executors_module._name] = executors_module
python
{ "resource": "" }
q265924
get_default_executor
test
def get_default_executor(): """Creates a new instance of the configured executor if none exists and returns it""" global DEFAULT_EXECUTOR if DEFAULT_EXECUTOR is not None: return DEFAULT_EXECUTOR executor_name = configuration.conf.get('core', 'EXECUTOR') DEFAULT_EXECUTOR = _get_executor(executor_name) log = LoggingMixin().log log.info("Using executor %s", executor_name) return DEFAULT_EXECUTOR
python
{ "resource": "" }
q265925
_get_executor
test
def _get_executor(executor_name): """ Creates a new instance of the named executor. In case the executor name is not know in airflow, look for it in the plugins """ if executor_name == Executors.LocalExecutor: return LocalExecutor() elif executor_name == Executors.SequentialExecutor: return SequentialExecutor() elif executor_name == Executors.CeleryExecutor: from airflow.executors.celery_executor import CeleryExecutor return CeleryExecutor() elif executor_name == Executors.DaskExecutor: from airflow.executors.dask_executor import DaskExecutor return DaskExecutor() elif executor_name == Executors.KubernetesExecutor: from airflow.contrib.executors.kubernetes_executor import KubernetesExecutor return KubernetesExecutor() else: # Loading plugins _integrate_plugins() executor_path = executor_name.split('.') if len(executor_path) != 2: raise AirflowException( "Executor {0} not supported: " "please specify in format plugin_module.executor".format(executor_name)) if executor_path[0] in globals(): return globals()[executor_path[0]].__dict__[executor_path[1]]() else: raise AirflowException("Executor {0} not supported.".format(executor_name))
python
{ "resource": "" }
q265926
SegmentHook.on_error
test
def on_error(self, error, items): """ Handles error callbacks when using Segment with segment_debug_mode set to True """ self.log.error('Encountered Segment error: {segment_error} with ' 'items: {with_items}'.format(segment_error=error, with_items=items)) raise AirflowException('Segment error: {}'.format(error))
python
{ "resource": "" }
q265927
MsSqlHook.get_conn
test
def get_conn(self): """ Returns a mssql connection object """ conn = self.get_connection(self.mssql_conn_id) conn = pymssql.connect( server=conn.host, user=conn.login, password=conn.password, database=self.schema or conn.schema, port=conn.port) return conn
python
{ "resource": "" }
q265928
trigger_dag
test
def trigger_dag(dag_id): """ Trigger a new dag run for a Dag with an execution date of now unless specified in the data. """ data = request.get_json(force=True) run_id = None if 'run_id' in data: run_id = data['run_id'] conf = None if 'conf' in data: conf = data['conf'] execution_date = None if 'execution_date' in data and data['execution_date'] is not None: execution_date = data['execution_date'] # Convert string datetime into actual datetime try: execution_date = timezone.parse(execution_date) except ValueError: error_message = ( 'Given execution date, {}, could not be identified ' 'as a date. Example date format: 2015-11-16T14:34:15+00:00' .format(execution_date)) _log.info(error_message) response = jsonify({'error': error_message}) response.status_code = 400 return response try: dr = trigger.trigger_dag(dag_id, run_id, conf, execution_date) except AirflowException as err: _log.error(err) response = jsonify(error="{}".format(err)) response.status_code = err.status_code return response if getattr(g, 'user', None): _log.info("User %s created %s", g.user, dr) response = jsonify(message="Created {}".format(dr)) return response
python
{ "resource": "" }
q265929
delete_dag
test
def delete_dag(dag_id): """ Delete all DB records related to the specified Dag. """ try: count = delete.delete_dag(dag_id) except AirflowException as err: _log.error(err) response = jsonify(error="{}".format(err)) response.status_code = err.status_code return response return jsonify(message="Removed {} record(s)".format(count), count=count)
python
{ "resource": "" }
q265930
task_info
test
def task_info(dag_id, task_id): """Returns a JSON with a task's public instance variables. """ try: info = get_task(dag_id, task_id) except AirflowException as err: _log.info(err) response = jsonify(error="{}".format(err)) response.status_code = err.status_code return response # JSONify and return. fields = {k: str(v) for k, v in vars(info).items() if not k.startswith('_')} return jsonify(fields)
python
{ "resource": "" }
q265931
get_pools
test
def get_pools(): """Get all pools.""" try: pools = pool_api.get_pools() except AirflowException as err: _log.error(err) response = jsonify(error="{}".format(err)) response.status_code = err.status_code return response else: return jsonify([p.to_json() for p in pools])
python
{ "resource": "" }
q265932
create_pool
test
def create_pool(): """Create a pool.""" params = request.get_json(force=True) try: pool = pool_api.create_pool(**params) except AirflowException as err: _log.error(err) response = jsonify(error="{}".format(err)) response.status_code = err.status_code return response else: return jsonify(pool.to_json())
python
{ "resource": "" }
q265933
delete_pool
test
def delete_pool(name): """Delete pool.""" try: pool = pool_api.delete_pool(name=name) except AirflowException as err: _log.error(err) response = jsonify(error="{}".format(err)) response.status_code = err.status_code return response else: return jsonify(pool.to_json())
python
{ "resource": "" }
q265934
AzureContainerInstanceHook.create_or_update
test
def create_or_update(self, resource_group, name, container_group): """ Create a new container group :param resource_group: the name of the resource group :type resource_group: str :param name: the name of the container group :type name: str :param container_group: the properties of the container group :type container_group: azure.mgmt.containerinstance.models.ContainerGroup """ self.connection.container_groups.create_or_update(resource_group, name, container_group)
python
{ "resource": "" }
q265935
AzureContainerInstanceHook.get_state_exitcode_details
test
def get_state_exitcode_details(self, resource_group, name): """ Get the state and exitcode of a container group :param resource_group: the name of the resource group :type resource_group: str :param name: the name of the container group :type name: str :return: A tuple with the state, exitcode, and details. If the exitcode is unknown 0 is returned. :rtype: tuple(state,exitcode,details) """ current_state = self._get_instance_view(resource_group, name).current_state return (current_state.state, current_state.exit_code, current_state.detail_status)
python
{ "resource": "" }
q265936
AzureContainerInstanceHook.get_messages
test
def get_messages(self, resource_group, name): """ Get the messages of a container group :param resource_group: the name of the resource group :type resource_group: str :param name: the name of the container group :type name: str :return: A list of the event messages :rtype: list[str] """ instance_view = self._get_instance_view(resource_group, name) return [event.message for event in instance_view.events]
python
{ "resource": "" }
q265937
AzureContainerInstanceHook.get_logs
test
def get_logs(self, resource_group, name, tail=1000): """ Get the tail from logs of a container group :param resource_group: the name of the resource group :type resource_group: str :param name: the name of the container group :type name: str :param tail: the size of the tail :type tail: int :return: A list of log messages :rtype: list[str] """ logs = self.connection.container.list_logs(resource_group, name, name, tail=tail) return logs.content.splitlines(True)
python
{ "resource": "" }
q265938
AzureContainerInstanceHook.delete
test
def delete(self, resource_group, name): """ Delete a container group :param resource_group: the name of the resource group :type resource_group: str :param name: the name of the container group :type name: str """ self.connection.container_groups.delete(resource_group, name)
python
{ "resource": "" }
q265939
AzureContainerInstanceHook.exists
test
def exists(self, resource_group, name): """ Test if a container group exists :param resource_group: the name of the resource group :type resource_group: str :param name: the name of the container group :type name: str """ for container in self.connection.container_groups.list_by_resource_group(resource_group): if container.name == name: return True return False
python
{ "resource": "" }
q265940
apply_defaults
test
def apply_defaults(func): """ Function decorator that Looks for an argument named "default_args", and fills the unspecified arguments from it. Since python2.* isn't clear about which arguments are missing when calling a function, and that this can be quite confusing with multi-level inheritance and argument defaults, this decorator also alerts with specific information about the missing arguments. """ # Cache inspect.signature for the wrapper closure to avoid calling it # at every decorated invocation. This is separate sig_cache created # per decoration, i.e. each function decorated using apply_defaults will # have a different sig_cache. sig_cache = signature(func) non_optional_args = { name for (name, param) in sig_cache.parameters.items() if param.default == param.empty and param.name != 'self' and param.kind not in (param.VAR_POSITIONAL, param.VAR_KEYWORD)} @wraps(func) def wrapper(*args, **kwargs): if len(args) > 1: raise AirflowException( "Use keyword arguments when initializing operators") dag_args = {} dag_params = {} dag = kwargs.get('dag', None) or settings.CONTEXT_MANAGER_DAG if dag: dag_args = copy(dag.default_args) or {} dag_params = copy(dag.params) or {} params = {} if 'params' in kwargs: params = kwargs['params'] dag_params.update(params) default_args = {} if 'default_args' in kwargs: default_args = kwargs['default_args'] if 'params' in default_args: dag_params.update(default_args['params']) del default_args['params'] dag_args.update(default_args) default_args = dag_args for arg in sig_cache.parameters: if arg not in kwargs and arg in default_args: kwargs[arg] = default_args[arg] missing_args = list(non_optional_args - set(kwargs)) if missing_args: msg = "Argument {0} is required".format(missing_args) raise AirflowException(msg) kwargs['params'] = dag_params result = func(*args, **kwargs) return result return wrapper
python
{ "resource": "" }
q265941
HiveToDruidTransfer.construct_ingest_query
test
def construct_ingest_query(self, static_path, columns): """ Builds an ingest query for an HDFS TSV load. :param static_path: The path on hdfs where the data is :type static_path: str :param columns: List of all the columns that are available :type columns: list """ # backward compatibility for num_shards, # but target_partition_size is the default setting # and overwrites the num_shards num_shards = self.num_shards target_partition_size = self.target_partition_size if self.target_partition_size == -1: if self.num_shards == -1: target_partition_size = DEFAULT_TARGET_PARTITION_SIZE else: num_shards = -1 metric_names = [m['fieldName'] for m in self.metric_spec if m['type'] != 'count'] # Take all the columns, which are not the time dimension # or a metric, as the dimension columns dimensions = [c for c in columns if c not in metric_names and c != self.ts_dim] ingest_query_dict = { "type": "index_hadoop", "spec": { "dataSchema": { "metricsSpec": self.metric_spec, "granularitySpec": { "queryGranularity": self.query_granularity, "intervals": self.intervals, "type": "uniform", "segmentGranularity": self.segment_granularity, }, "parser": { "type": "string", "parseSpec": { "columns": columns, "dimensionsSpec": { "dimensionExclusions": [], "dimensions": dimensions, # list of names "spatialDimensions": [] }, "timestampSpec": { "column": self.ts_dim, "format": "auto" }, "format": "tsv" } }, "dataSource": self.druid_datasource }, "tuningConfig": { "type": "hadoop", "jobProperties": { "mapreduce.job.user.classpath.first": "false", "mapreduce.map.output.compress": "false", "mapreduce.output.fileoutputformat.compress": "false", }, "partitionsSpec": { "type": "hashed", "targetPartitionSize": target_partition_size, "numShards": num_shards, }, }, "ioConfig": { "inputSpec": { "paths": static_path, "type": "static" }, "type": "hadoop" } } } if self.job_properties: ingest_query_dict['spec']['tuningConfig']['jobProperties'] \ .update(self.job_properties) if self.hadoop_dependency_coordinates: ingest_query_dict['hadoopDependencyCoordinates'] \ = self.hadoop_dependency_coordinates return ingest_query_dict
python
{ "resource": "" }
q265942
RedisPubSubSensor.poke
test
def poke(self, context): """ Check for message on subscribed channels and write to xcom the message with key ``message`` An example of message ``{'type': 'message', 'pattern': None, 'channel': b'test', 'data': b'hello'}`` :param context: the context object :type context: dict :return: ``True`` if message (with type 'message') is available or ``False`` if not """ self.log.info('RedisPubSubSensor checking for message on channels: %s', self.channels) message = self.pubsub.get_message() self.log.info('Message %s from channel %s', message, self.channels) # Process only message types if message and message['type'] == 'message': context['ti'].xcom_push(key='message', value=message) self.pubsub.unsubscribe(self.channels) return True return False
python
{ "resource": "" }
q265943
DagRun.find
test
def find(dag_id=None, run_id=None, execution_date=None, state=None, external_trigger=None, no_backfills=False, session=None): """ Returns a set of dag runs for the given search criteria. :param dag_id: the dag_id to find dag runs for :type dag_id: int, list :param run_id: defines the the run id for this dag run :type run_id: str :param execution_date: the execution date :type execution_date: datetime.datetime :param state: the state of the dag run :type state: airflow.utils.state.State :param external_trigger: whether this dag run is externally triggered :type external_trigger: bool :param no_backfills: return no backfills (True), return all (False). Defaults to False :type no_backfills: bool :param session: database session :type session: sqlalchemy.orm.session.Session """ DR = DagRun qry = session.query(DR) if dag_id: qry = qry.filter(DR.dag_id == dag_id) if run_id: qry = qry.filter(DR.run_id == run_id) if execution_date: if isinstance(execution_date, list): qry = qry.filter(DR.execution_date.in_(execution_date)) else: qry = qry.filter(DR.execution_date == execution_date) if state: qry = qry.filter(DR.state == state) if external_trigger is not None: qry = qry.filter(DR.external_trigger == external_trigger) if no_backfills: # in order to prevent a circular dependency from airflow.jobs import BackfillJob qry = qry.filter(DR.run_id.notlike(BackfillJob.ID_PREFIX + '%')) dr = qry.order_by(DR.execution_date).all() return dr
python
{ "resource": "" }
q265944
DagRun.get_task_instances
test
def get_task_instances(self, state=None, session=None): """ Returns the task instances for this dag run """ from airflow.models.taskinstance import TaskInstance # Avoid circular import tis = session.query(TaskInstance).filter( TaskInstance.dag_id == self.dag_id, TaskInstance.execution_date == self.execution_date, ) if state: if isinstance(state, six.string_types): tis = tis.filter(TaskInstance.state == state) else: # this is required to deal with NULL values if None in state: tis = tis.filter( or_(TaskInstance.state.in_(state), TaskInstance.state.is_(None)) ) else: tis = tis.filter(TaskInstance.state.in_(state)) if self.dag and self.dag.partial: tis = tis.filter(TaskInstance.task_id.in_(self.dag.task_ids)) return tis.all()
python
{ "resource": "" }
q265945
DagRun.get_task_instance
test
def get_task_instance(self, task_id, session=None): """ Returns the task instance specified by task_id for this dag run :param task_id: the task id """ from airflow.models.taskinstance import TaskInstance # Avoid circular import TI = TaskInstance ti = session.query(TI).filter( TI.dag_id == self.dag_id, TI.execution_date == self.execution_date, TI.task_id == task_id ).first() return ti
python
{ "resource": "" }
q265946
DagRun.get_previous_dagrun
test
def get_previous_dagrun(self, session=None): """The previous DagRun, if there is one""" return session.query(DagRun).filter( DagRun.dag_id == self.dag_id, DagRun.execution_date < self.execution_date ).order_by( DagRun.execution_date.desc() ).first()
python
{ "resource": "" }
q265947
DagRun.get_previous_scheduled_dagrun
test
def get_previous_scheduled_dagrun(self, session=None): """The previous, SCHEDULED DagRun, if there is one""" dag = self.get_dag() return session.query(DagRun).filter( DagRun.dag_id == self.dag_id, DagRun.execution_date == dag.previous_schedule(self.execution_date) ).first()
python
{ "resource": "" }
q265948
DagRun.update_state
test
def update_state(self, session=None): """ Determines the overall state of the DagRun based on the state of its TaskInstances. :return: State """ dag = self.get_dag() tis = self.get_task_instances(session=session) self.log.debug("Updating state for %s considering %s task(s)", self, len(tis)) for ti in list(tis): # skip in db? if ti.state == State.REMOVED: tis.remove(ti) else: ti.task = dag.get_task(ti.task_id) # pre-calculate # db is faster start_dttm = timezone.utcnow() unfinished_tasks = self.get_task_instances( state=State.unfinished(), session=session ) none_depends_on_past = all(not t.task.depends_on_past for t in unfinished_tasks) none_task_concurrency = all(t.task.task_concurrency is None for t in unfinished_tasks) # small speed up if unfinished_tasks and none_depends_on_past and none_task_concurrency: # todo: this can actually get pretty slow: one task costs between 0.01-015s no_dependencies_met = True for ut in unfinished_tasks: # We need to flag upstream and check for changes because upstream # failures/re-schedules can result in deadlock false positives old_state = ut.state deps_met = ut.are_dependencies_met( dep_context=DepContext( flag_upstream_failed=True, ignore_in_retry_period=True, ignore_in_reschedule_period=True), session=session) if deps_met or old_state != ut.current_state(session=session): no_dependencies_met = False break duration = (timezone.utcnow() - start_dttm).total_seconds() * 1000 Stats.timing("dagrun.dependency-check.{}".format(self.dag_id), duration) root_ids = [t.task_id for t in dag.roots] roots = [t for t in tis if t.task_id in root_ids] # if all roots finished and at least one failed, the run failed if (not unfinished_tasks and any(r.state in (State.FAILED, State.UPSTREAM_FAILED) for r in roots)): self.log.info('Marking run %s failed', self) self.set_state(State.FAILED) dag.handle_callback(self, success=False, reason='task_failure', session=session) # if all roots succeeded and no unfinished tasks, the run succeeded elif not unfinished_tasks and all(r.state in (State.SUCCESS, State.SKIPPED) for r in roots): self.log.info('Marking run %s successful', self) self.set_state(State.SUCCESS) dag.handle_callback(self, success=True, reason='success', session=session) # if *all tasks* are deadlocked, the run failed elif (unfinished_tasks and none_depends_on_past and none_task_concurrency and no_dependencies_met): self.log.info('Deadlock; marking run %s failed', self) self.set_state(State.FAILED) dag.handle_callback(self, success=False, reason='all_tasks_deadlocked', session=session) # finally, if the roots aren't done, the dag is still running else: self.set_state(State.RUNNING) self._emit_duration_stats_for_finished_state() # todo: determine we want to use with_for_update to make sure to lock the run session.merge(self) session.commit() return self.state
python
{ "resource": "" }
q265949
DagRun.verify_integrity
test
def verify_integrity(self, session=None): """ Verifies the DagRun by checking for removed tasks or tasks that are not in the database yet. It will set state to removed or add the task if required. """ from airflow.models.taskinstance import TaskInstance # Avoid circular import dag = self.get_dag() tis = self.get_task_instances(session=session) # check for removed or restored tasks task_ids = [] for ti in tis: task_ids.append(ti.task_id) task = None try: task = dag.get_task(ti.task_id) except AirflowException: if ti.state == State.REMOVED: pass # ti has already been removed, just ignore it elif self.state is not State.RUNNING and not dag.partial: self.log.warning("Failed to get task '{}' for dag '{}'. " "Marking it as removed.".format(ti, dag)) Stats.incr( "task_removed_from_dag.{}".format(dag.dag_id), 1, 1) ti.state = State.REMOVED is_task_in_dag = task is not None should_restore_task = is_task_in_dag and ti.state == State.REMOVED if should_restore_task: self.log.info("Restoring task '{}' which was previously " "removed from DAG '{}'".format(ti, dag)) Stats.incr("task_restored_to_dag.{}".format(dag.dag_id), 1, 1) ti.state = State.NONE # check for missing tasks for task in six.itervalues(dag.task_dict): if task.start_date > self.execution_date and not self.is_backfill: continue if task.task_id not in task_ids: Stats.incr( "task_instance_created-{}".format(task.__class__.__name__), 1, 1) ti = TaskInstance(task, self.execution_date) session.add(ti) session.commit()
python
{ "resource": "" }
q265950
jenkins_request_with_headers
test
def jenkins_request_with_headers(jenkins_server, req): """ We need to get the headers in addition to the body answer to get the location from them This function uses jenkins_request method from python-jenkins library with just the return call changed :param jenkins_server: The server to query :param req: The request to execute :return: Dict containing the response body (key body) and the headers coming along (headers) """ try: response = jenkins_server.jenkins_request(req) response_body = response.content response_headers = response.headers if response_body is None: raise jenkins.EmptyResponseException( "Error communicating with server[%s]: " "empty response" % jenkins_server.server) return {'body': response_body.decode('utf-8'), 'headers': response_headers} except HTTPError as e: # Jenkins's funky authentication means its nigh impossible to # distinguish errors. if e.code in [401, 403, 500]: # six.moves.urllib.error.HTTPError provides a 'reason' # attribute for all python version except for ver 2.6 # Falling back to HTTPError.msg since it contains the # same info as reason raise JenkinsException( 'Error in request. ' + 'Possibly authentication failed [%s]: %s' % ( e.code, e.msg) ) elif e.code == 404: raise jenkins.NotFoundException('Requested item could not be found') else: raise except socket.timeout as e: raise jenkins.TimeoutException('Error in request: %s' % e) except URLError as e: # python 2.6 compatibility to ensure same exception raised # since URLError wraps a socket timeout on python 2.6. if str(e.reason) == "timed out": raise jenkins.TimeoutException('Error in request: %s' % e.reason) raise JenkinsException('Error in request: %s' % e.reason)
python
{ "resource": "" }
q265951
context_to_airflow_vars
test
def context_to_airflow_vars(context, in_env_var_format=False): """ Given a context, this function provides a dictionary of values that can be used to externally reconstruct relations between dags, dag_runs, tasks and task_instances. Default to abc.def.ghi format and can be made to ABC_DEF_GHI format if in_env_var_format is set to True. :param context: The context for the task_instance of interest. :type context: dict :param in_env_var_format: If returned vars should be in ABC_DEF_GHI format. :type in_env_var_format: bool :return: task_instance context as dict. """ params = dict() if in_env_var_format: name_format = 'env_var_format' else: name_format = 'default' task_instance = context.get('task_instance') if task_instance and task_instance.dag_id: params[AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_DAG_ID'][ name_format]] = task_instance.dag_id if task_instance and task_instance.task_id: params[AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_TASK_ID'][ name_format]] = task_instance.task_id if task_instance and task_instance.execution_date: params[ AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_EXECUTION_DATE'][ name_format]] = task_instance.execution_date.isoformat() dag_run = context.get('dag_run') if dag_run and dag_run.run_id: params[AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_DAG_RUN_ID'][ name_format]] = dag_run.run_id return params
python
{ "resource": "" }
q265952
conditionally_trigger
test
def conditionally_trigger(context, dag_run_obj): """This function decides whether or not to Trigger the remote DAG""" c_p = context['params']['condition_param'] print("Controller DAG : conditionally_trigger = {}".format(c_p)) if context['params']['condition_param']: dag_run_obj.payload = {'message': context['params']['message']} pp.pprint(dag_run_obj.payload) return dag_run_obj
python
{ "resource": "" }
q265953
DatadogHook.send_metric
test
def send_metric(self, metric_name, datapoint, tags=None, type_=None, interval=None): """ Sends a single datapoint metric to DataDog :param metric_name: The name of the metric :type metric_name: str :param datapoint: A single integer or float related to the metric :type datapoint: int or float :param tags: A list of tags associated with the metric :type tags: list :param type_: Type of your metric: gauge, rate, or count :type type_: str :param interval: If the type of the metric is rate or count, define the corresponding interval :type interval: int """ response = api.Metric.send( metric=metric_name, points=datapoint, host=self.host, tags=tags, type=type_, interval=interval) self.validate_response(response) return response
python
{ "resource": "" }
q265954
DatadogHook.query_metric
test
def query_metric(self, query, from_seconds_ago, to_seconds_ago): """ Queries datadog for a specific metric, potentially with some function applied to it and returns the results. :param query: The datadog query to execute (see datadog docs) :type query: str :param from_seconds_ago: How many seconds ago to start querying for. :type from_seconds_ago: int :param to_seconds_ago: Up to how many seconds ago to query for. :type to_seconds_ago: int """ now = int(time.time()) response = api.Metric.query( start=now - from_seconds_ago, end=now - to_seconds_ago, query=query) self.validate_response(response) return response
python
{ "resource": "" }
q265955
DagBag.get_dag
test
def get_dag(self, dag_id): """ Gets the DAG out of the dictionary, and refreshes it if expired """ from airflow.models.dag import DagModel # Avoid circular import # If asking for a known subdag, we want to refresh the parent root_dag_id = dag_id if dag_id in self.dags: dag = self.dags[dag_id] if dag.is_subdag: root_dag_id = dag.parent_dag.dag_id # If the dag corresponding to root_dag_id is absent or expired orm_dag = DagModel.get_current(root_dag_id) if orm_dag and ( root_dag_id not in self.dags or ( orm_dag.last_expired and dag.last_loaded < orm_dag.last_expired ) ): # Reprocess source file found_dags = self.process_file( filepath=orm_dag.fileloc, only_if_updated=False) # If the source file no longer exports `dag_id`, delete it from self.dags if found_dags and dag_id in [found_dag.dag_id for found_dag in found_dags]: return self.dags[dag_id] elif dag_id in self.dags: del self.dags[dag_id] return self.dags.get(dag_id)
python
{ "resource": "" }
q265956
DagBag.kill_zombies
test
def kill_zombies(self, zombies, session=None): """ Fail given zombie tasks, which are tasks that haven't had a heartbeat for too long, in the current DagBag. :param zombies: zombie task instances to kill. :type zombies: airflow.utils.dag_processing.SimpleTaskInstance :param session: DB session. :type session: sqlalchemy.orm.session.Session """ from airflow.models.taskinstance import TaskInstance # Avoid circular import for zombie in zombies: if zombie.dag_id in self.dags: dag = self.dags[zombie.dag_id] if zombie.task_id in dag.task_ids: task = dag.get_task(zombie.task_id) ti = TaskInstance(task, zombie.execution_date) # Get properties needed for failure handling from SimpleTaskInstance. ti.start_date = zombie.start_date ti.end_date = zombie.end_date ti.try_number = zombie.try_number ti.state = zombie.state ti.test_mode = configuration.getboolean('core', 'unit_test_mode') ti.handle_failure("{} detected as zombie".format(ti), ti.test_mode, ti.get_template_context()) self.log.info( 'Marked zombie job %s as %s', ti, ti.state) Stats.incr('zombies_killed') session.commit()
python
{ "resource": "" }
q265957
DagBag.bag_dag
test
def bag_dag(self, dag, parent_dag, root_dag): """ Adds the DAG into the bag, recurses into sub dags. Throws AirflowDagCycleException if a cycle is detected in this dag or its subdags """ dag.test_cycle() # throws if a task cycle is found dag.resolve_template_files() dag.last_loaded = timezone.utcnow() for task in dag.tasks: settings.policy(task) subdags = dag.subdags try: for subdag in subdags: subdag.full_filepath = dag.full_filepath subdag.parent_dag = dag subdag.is_subdag = True self.bag_dag(subdag, parent_dag=dag, root_dag=root_dag) self.dags[dag.dag_id] = dag self.log.debug('Loaded DAG %s', dag) except AirflowDagCycleException as cycle_exception: # There was an error in bagging the dag. Remove it from the list of dags self.log.exception('Exception bagging dag: %s', dag.dag_id) # Only necessary at the root level since DAG.subdags automatically # performs DFS to search through all subdags if dag == root_dag: for subdag in subdags: if subdag.dag_id in self.dags: del self.dags[subdag.dag_id] raise cycle_exception
python
{ "resource": "" }
q265958
DagBag.collect_dags
test
def collect_dags( self, dag_folder=None, only_if_updated=True, include_examples=configuration.conf.getboolean('core', 'LOAD_EXAMPLES'), safe_mode=configuration.conf.getboolean('core', 'DAG_DISCOVERY_SAFE_MODE')): """ Given a file path or a folder, this method looks for python modules, imports them and adds them to the dagbag collection. Note that if a ``.airflowignore`` file is found while processing the directory, it will behave much like a ``.gitignore``, ignoring files that match any of the regex patterns specified in the file. **Note**: The patterns in .airflowignore are treated as un-anchored regexes, not shell-like glob patterns. """ start_dttm = timezone.utcnow() dag_folder = dag_folder or self.dag_folder # Used to store stats around DagBag processing stats = [] FileLoadStat = namedtuple( 'FileLoadStat', "file duration dag_num task_num dags") dag_folder = correct_maybe_zipped(dag_folder) for filepath in list_py_file_paths(dag_folder, safe_mode=safe_mode, include_examples=include_examples): try: ts = timezone.utcnow() found_dags = self.process_file( filepath, only_if_updated=only_if_updated, safe_mode=safe_mode) td = timezone.utcnow() - ts td = td.total_seconds() + ( float(td.microseconds) / 1000000) stats.append(FileLoadStat( filepath.replace(dag_folder, ''), td, len(found_dags), sum([len(dag.tasks) for dag in found_dags]), str([dag.dag_id for dag in found_dags]), )) except Exception as e: self.log.exception(e) Stats.gauge( 'collect_dags', (timezone.utcnow() - start_dttm).total_seconds(), 1) Stats.gauge( 'dagbag_size', len(self.dags), 1) Stats.gauge( 'dagbag_import_errors', len(self.import_errors), 1) self.dagbag_stats = sorted( stats, key=lambda x: x.duration, reverse=True)
python
{ "resource": "" }
q265959
DagBag.dagbag_report
test
def dagbag_report(self): """Prints a report around DagBag loading stats""" report = textwrap.dedent("""\n ------------------------------------------------------------------- DagBag loading stats for {dag_folder} ------------------------------------------------------------------- Number of DAGs: {dag_num} Total task number: {task_num} DagBag parsing time: {duration} {table} """) stats = self.dagbag_stats return report.format( dag_folder=self.dag_folder, duration=sum([o.duration for o in stats]), dag_num=sum([o.dag_num for o in stats]), task_num=sum([o.task_num for o in stats]), table=pprinttable(stats), )
python
{ "resource": "" }
q265960
ds_add
test
def ds_add(ds, days): """ Add or subtract days from a YYYY-MM-DD :param ds: anchor date in ``YYYY-MM-DD`` format to add to :type ds: str :param days: number of days to add to the ds, you can use negative values :type days: int >>> ds_add('2015-01-01', 5) '2015-01-06' >>> ds_add('2015-01-06', -5) '2015-01-01' """ ds = datetime.strptime(ds, '%Y-%m-%d') if days: ds = ds + timedelta(days) return ds.isoformat()[:10]
python
{ "resource": "" }
q265961
ds_format
test
def ds_format(ds, input_format, output_format): """ Takes an input string and outputs another string as specified in the output format :param ds: input string which contains a date :type ds: str :param input_format: input string format. E.g. %Y-%m-%d :type input_format: str :param output_format: output string format E.g. %Y-%m-%d :type output_format: str >>> ds_format('2015-01-01', "%Y-%m-%d", "%m-%d-%y") '01-01-15' >>> ds_format('1/5/2015', "%m/%d/%Y", "%Y-%m-%d") '2015-01-05' """ return datetime.strptime(ds, input_format).strftime(output_format)
python
{ "resource": "" }
q265962
HdfsSensorRegex.poke
test
def poke(self, context): """ poke matching files in a directory with self.regex :return: Bool depending on the search criteria """ sb = self.hook(self.hdfs_conn_id).get_conn() self.log.info( 'Poking for %s to be a directory with files matching %s', self.filepath, self.regex.pattern ) result = [f for f in sb.ls([self.filepath], include_toplevel=False) if f['file_type'] == 'f' and self.regex.match(f['path'].replace('%s/' % self.filepath, ''))] result = self.filter_for_ignored_ext(result, self.ignored_ext, self.ignore_copying) result = self.filter_for_filesize(result, self.file_size) return bool(result)
python
{ "resource": "" }
q265963
HdfsSensorFolder.poke
test
def poke(self, context): """ poke for a non empty directory :return: Bool depending on the search criteria """ sb = self.hook(self.hdfs_conn_id).get_conn() result = [f for f in sb.ls([self.filepath], include_toplevel=True)] result = self.filter_for_ignored_ext(result, self.ignored_ext, self.ignore_copying) result = self.filter_for_filesize(result, self.file_size) if self.be_empty: self.log.info('Poking for filepath %s to a empty directory', self.filepath) return len(result) == 1 and result[0]['path'] == self.filepath else: self.log.info('Poking for filepath %s to a non empty directory', self.filepath) result.pop(0) return bool(result) and result[0]['file_type'] == 'f'
python
{ "resource": "" }
q265964
clear_task_instances
test
def clear_task_instances(tis, session, activate_dag_runs=True, dag=None, ): """ Clears a set of task instances, but makes sure the running ones get killed. :param tis: a list of task instances :param session: current session :param activate_dag_runs: flag to check for active dag run :param dag: DAG object """ job_ids = [] for ti in tis: if ti.state == State.RUNNING: if ti.job_id: ti.state = State.SHUTDOWN job_ids.append(ti.job_id) else: task_id = ti.task_id if dag and dag.has_task(task_id): task = dag.get_task(task_id) task_retries = task.retries ti.max_tries = ti.try_number + task_retries - 1 else: # Ignore errors when updating max_tries if dag is None or # task not found in dag since database records could be # outdated. We make max_tries the maximum value of its # original max_tries or the current task try number. ti.max_tries = max(ti.max_tries, ti.try_number - 1) ti.state = State.NONE session.merge(ti) if job_ids: from airflow.jobs import BaseJob as BJ for job in session.query(BJ).filter(BJ.id.in_(job_ids)).all(): job.state = State.SHUTDOWN if activate_dag_runs and tis: from airflow.models.dagrun import DagRun # Avoid circular import drs = session.query(DagRun).filter( DagRun.dag_id.in_({ti.dag_id for ti in tis}), DagRun.execution_date.in_({ti.execution_date for ti in tis}), ).all() for dr in drs: dr.state = State.RUNNING dr.start_date = timezone.utcnow()
python
{ "resource": "" }
q265965
TaskInstance.try_number
test
def try_number(self): """ Return the try number that this task number will be when it is actually run. If the TI is currently running, this will match the column in the databse, in all othercases this will be incremenetd """ # This is designed so that task logs end up in the right file. if self.state == State.RUNNING: return self._try_number return self._try_number + 1
python
{ "resource": "" }
q265966
TaskInstance.generate_command
test
def generate_command(dag_id, task_id, execution_date, mark_success=False, ignore_all_deps=False, ignore_depends_on_past=False, ignore_task_deps=False, ignore_ti_state=False, local=False, pickle_id=None, file_path=None, raw=False, job_id=None, pool=None, cfg_path=None ): """ Generates the shell command required to execute this task instance. :param dag_id: DAG ID :type dag_id: unicode :param task_id: Task ID :type task_id: unicode :param execution_date: Execution date for the task :type execution_date: datetime :param mark_success: Whether to mark the task as successful :type mark_success: bool :param ignore_all_deps: Ignore all ignorable dependencies. Overrides the other ignore_* parameters. :type ignore_all_deps: bool :param ignore_depends_on_past: Ignore depends_on_past parameter of DAGs (e.g. for Backfills) :type ignore_depends_on_past: bool :param ignore_task_deps: Ignore task-specific dependencies such as depends_on_past and trigger rule :type ignore_task_deps: bool :param ignore_ti_state: Ignore the task instance's previous failure/success :type ignore_ti_state: bool :param local: Whether to run the task locally :type local: bool :param pickle_id: If the DAG was serialized to the DB, the ID associated with the pickled DAG :type pickle_id: unicode :param file_path: path to the file containing the DAG definition :param raw: raw mode (needs more details) :param job_id: job ID (needs more details) :param pool: the Airflow pool that the task should run in :type pool: unicode :param cfg_path: the Path to the configuration file :type cfg_path: basestring :return: shell command that can be used to run the task instance """ iso = execution_date.isoformat() cmd = ["airflow", "run", str(dag_id), str(task_id), str(iso)] cmd.extend(["--mark_success"]) if mark_success else None cmd.extend(["--pickle", str(pickle_id)]) if pickle_id else None cmd.extend(["--job_id", str(job_id)]) if job_id else None cmd.extend(["-A"]) if ignore_all_deps else None cmd.extend(["-i"]) if ignore_task_deps else None cmd.extend(["-I"]) if ignore_depends_on_past else None cmd.extend(["--force"]) if ignore_ti_state else None cmd.extend(["--local"]) if local else None cmd.extend(["--pool", pool]) if pool else None cmd.extend(["--raw"]) if raw else None cmd.extend(["-sd", file_path]) if file_path else None cmd.extend(["--cfg_path", cfg_path]) if cfg_path else None return cmd
python
{ "resource": "" }
q265967
TaskInstance.current_state
test
def current_state(self, session=None): """ Get the very latest state from the database, if a session is passed, we use and looking up the state becomes part of the session, otherwise a new session is used. """ TI = TaskInstance ti = session.query(TI).filter( TI.dag_id == self.dag_id, TI.task_id == self.task_id, TI.execution_date == self.execution_date, ).all() if ti: state = ti[0].state else: state = None return state
python
{ "resource": "" }
q265968
TaskInstance.error
test
def error(self, session=None): """ Forces the task instance's state to FAILED in the database. """ self.log.error("Recording the task instance as FAILED") self.state = State.FAILED session.merge(self) session.commit()
python
{ "resource": "" }
q265969
TaskInstance.refresh_from_db
test
def refresh_from_db(self, session=None, lock_for_update=False): """ Refreshes the task instance from the database based on the primary key :param lock_for_update: if True, indicates that the database should lock the TaskInstance (issuing a FOR UPDATE clause) until the session is committed. """ TI = TaskInstance qry = session.query(TI).filter( TI.dag_id == self.dag_id, TI.task_id == self.task_id, TI.execution_date == self.execution_date) if lock_for_update: ti = qry.with_for_update().first() else: ti = qry.first() if ti: self.state = ti.state self.start_date = ti.start_date self.end_date = ti.end_date # Get the raw value of try_number column, don't read through the # accessor here otherwise it will be incremeneted by one already. self.try_number = ti._try_number self.max_tries = ti.max_tries self.hostname = ti.hostname self.pid = ti.pid self.executor_config = ti.executor_config else: self.state = None
python
{ "resource": "" }
q265970
TaskInstance.clear_xcom_data
test
def clear_xcom_data(self, session=None): """ Clears all XCom data from the database for the task instance """ session.query(XCom).filter( XCom.dag_id == self.dag_id, XCom.task_id == self.task_id, XCom.execution_date == self.execution_date ).delete() session.commit()
python
{ "resource": "" }
q265971
TaskInstance.key
test
def key(self): """ Returns a tuple that identifies the task instance uniquely """ return self.dag_id, self.task_id, self.execution_date, self.try_number
python
{ "resource": "" }
q265972
TaskInstance.are_dependents_done
test
def are_dependents_done(self, session=None): """ Checks whether the dependents of this task instance have all succeeded. This is meant to be used by wait_for_downstream. This is useful when you do not want to start processing the next schedule of a task until the dependents are done. For instance, if the task DROPs and recreates a table. """ task = self.task if not task.downstream_task_ids: return True ti = session.query(func.count(TaskInstance.task_id)).filter( TaskInstance.dag_id == self.dag_id, TaskInstance.task_id.in_(task.downstream_task_ids), TaskInstance.execution_date == self.execution_date, TaskInstance.state == State.SUCCESS, ) count = ti[0][0] return count == len(task.downstream_task_ids)
python
{ "resource": "" }
q265973
TaskInstance.next_retry_datetime
test
def next_retry_datetime(self): """ Get datetime of the next retry if the task instance fails. For exponential backoff, retry_delay is used as base and will be converted to seconds. """ delay = self.task.retry_delay if self.task.retry_exponential_backoff: min_backoff = int(delay.total_seconds() * (2 ** (self.try_number - 2))) # deterministic per task instance hash = int(hashlib.sha1("{}#{}#{}#{}".format(self.dag_id, self.task_id, self.execution_date, self.try_number) .encode('utf-8')).hexdigest(), 16) # between 0.5 * delay * (2^retry_number) and 1.0 * delay * (2^retry_number) modded_hash = min_backoff + hash % min_backoff # timedelta has a maximum representable value. The exponentiation # here means this value can be exceeded after a certain number # of tries (around 50 if the initial delay is 1s, even fewer if # the delay is larger). Cap the value here before creating a # timedelta object so the operation doesn't fail. delay_backoff_in_seconds = min( modded_hash, timedelta.max.total_seconds() - 1 ) delay = timedelta(seconds=delay_backoff_in_seconds) if self.task.max_retry_delay: delay = min(self.task.max_retry_delay, delay) return self.end_date + delay
python
{ "resource": "" }
q265974
TaskInstance.ready_for_retry
test
def ready_for_retry(self): """ Checks on whether the task instance is in the right state and timeframe to be retried. """ return (self.state == State.UP_FOR_RETRY and self.next_retry_datetime() < timezone.utcnow())
python
{ "resource": "" }
q265975
TaskInstance.pool_full
test
def pool_full(self, session): """ Returns a boolean as to whether the slot pool has room for this task to run """ if not self.task.pool: return False pool = ( session .query(Pool) .filter(Pool.pool == self.task.pool) .first() ) if not pool: return False open_slots = pool.open_slots(session=session) return open_slots <= 0
python
{ "resource": "" }
q265976
TaskInstance.get_dagrun
test
def get_dagrun(self, session): """ Returns the DagRun for this TaskInstance :param session: :return: DagRun """ from airflow.models.dagrun import DagRun # Avoid circular import dr = session.query(DagRun).filter( DagRun.dag_id == self.dag_id, DagRun.execution_date == self.execution_date ).first() return dr
python
{ "resource": "" }
q265977
TaskInstance.xcom_push
test
def xcom_push( self, key, value, execution_date=None): """ Make an XCom available for tasks to pull. :param key: A key for the XCom :type key: str :param value: A value for the XCom. The value is pickled and stored in the database. :type value: any pickleable object :param execution_date: if provided, the XCom will not be visible until this date. This can be used, for example, to send a message to a task on a future date without it being immediately visible. :type execution_date: datetime """ if execution_date and execution_date < self.execution_date: raise ValueError( 'execution_date can not be in the past (current ' 'execution_date is {}; received {})'.format( self.execution_date, execution_date)) XCom.set( key=key, value=value, task_id=self.task_id, dag_id=self.dag_id, execution_date=execution_date or self.execution_date)
python
{ "resource": "" }
q265978
TaskInstance.xcom_pull
test
def xcom_pull( self, task_ids=None, dag_id=None, key=XCOM_RETURN_KEY, include_prior_dates=False): """ Pull XComs that optionally meet certain criteria. The default value for `key` limits the search to XComs that were returned by other tasks (as opposed to those that were pushed manually). To remove this filter, pass key=None (or any desired value). If a single task_id string is provided, the result is the value of the most recent matching XCom from that task_id. If multiple task_ids are provided, a tuple of matching values is returned. None is returned whenever no matches are found. :param key: A key for the XCom. If provided, only XComs with matching keys will be returned. The default key is 'return_value', also available as a constant XCOM_RETURN_KEY. This key is automatically given to XComs returned by tasks (as opposed to being pushed manually). To remove the filter, pass key=None. :type key: str :param task_ids: Only XComs from tasks with matching ids will be pulled. Can pass None to remove the filter. :type task_ids: str or iterable of strings (representing task_ids) :param dag_id: If provided, only pulls XComs from this DAG. If None (default), the DAG of the calling task is used. :type dag_id: str :param include_prior_dates: If False, only XComs from the current execution_date are returned. If True, XComs from previous dates are returned as well. :type include_prior_dates: bool """ if dag_id is None: dag_id = self.dag_id pull_fn = functools.partial( XCom.get_one, execution_date=self.execution_date, key=key, dag_id=dag_id, include_prior_dates=include_prior_dates) if is_container(task_ids): return tuple(pull_fn(task_id=t) for t in task_ids) else: return pull_fn(task_id=task_ids)
python
{ "resource": "" }
q265979
TaskInstance.init_run_context
test
def init_run_context(self, raw=False): """ Sets the log context. """ self.raw = raw self._set_context(self)
python
{ "resource": "" }
q265980
WasbTaskHandler.close
test
def close(self): """ Close and upload local log file to remote storage Wasb. """ # When application exit, system shuts down all handlers by # calling close method. Here we check if logger is already # closed to prevent uploading the log to remote storage multiple # times when `logging.shutdown` is called. if self.closed: return super().close() if not self.upload_on_close: return local_loc = os.path.join(self.local_base, self.log_relative_path) remote_loc = os.path.join(self.remote_base, self.log_relative_path) if os.path.exists(local_loc): # read log and remove old logs to get just the latest additions with open(local_loc, 'r') as logfile: log = logfile.read() self.wasb_write(log, remote_loc, append=True) if self.delete_local_copy: shutil.rmtree(os.path.dirname(local_loc)) # Mark closed so we don't double write if close is called twice self.closed = True
python
{ "resource": "" }
q265981
GceHook.get_conn
test
def get_conn(self): """ Retrieves connection to Google Compute Engine. :return: Google Compute Engine services object :rtype: dict """ if not self._conn: http_authorized = self._authorize() self._conn = build('compute', self.api_version, http=http_authorized, cache_discovery=False) return self._conn
python
{ "resource": "" }
q265982
GceHook.start_instance
test
def start_instance(self, zone, resource_id, project_id=None): """ Starts an existing instance defined by project_id, zone and resource_id. Must be called with keyword arguments rather than positional. :param zone: Google Cloud Platform zone where the instance exists :type zone: str :param resource_id: Name of the Compute Engine instance resource :type resource_id: str :param project_id: Optional, Google Cloud Platform project ID where the Compute Engine Instance exists. If set to None or missing, the default project_id from the GCP connection is used. :type project_id: str :return: None """ response = self.get_conn().instances().start( project=project_id, zone=zone, instance=resource_id ).execute(num_retries=self.num_retries) try: operation_name = response["name"] except KeyError: raise AirflowException( "Wrong response '{}' returned - it should contain " "'name' field".format(response)) self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name, zone=zone)
python
{ "resource": "" }
q265983
GceHook.set_machine_type
test
def set_machine_type(self, zone, resource_id, body, project_id=None): """ Sets machine type of an instance defined by project_id, zone and resource_id. Must be called with keyword arguments rather than positional. :param zone: Google Cloud Platform zone where the instance exists. :type zone: str :param resource_id: Name of the Compute Engine instance resource :type resource_id: str :param body: Body required by the Compute Engine setMachineType API, as described in https://cloud.google.com/compute/docs/reference/rest/v1/instances/setMachineType :type body: dict :param project_id: Optional, Google Cloud Platform project ID where the Compute Engine Instance exists. If set to None or missing, the default project_id from the GCP connection is used. :type project_id: str :return: None """ response = self._execute_set_machine_type(zone, resource_id, body, project_id) try: operation_name = response["name"] except KeyError: raise AirflowException( "Wrong response '{}' returned - it should contain " "'name' field".format(response)) self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name, zone=zone)
python
{ "resource": "" }
q265984
GceHook.get_instance_template
test
def get_instance_template(self, resource_id, project_id=None): """ Retrieves instance template by project_id and resource_id. Must be called with keyword arguments rather than positional. :param resource_id: Name of the instance template :type resource_id: str :param project_id: Optional, Google Cloud Platform project ID where the Compute Engine Instance exists. If set to None or missing, the default project_id from the GCP connection is used. :type project_id: str :return: Instance template representation as object according to https://cloud.google.com/compute/docs/reference/rest/v1/instanceTemplates :rtype: dict """ response = self.get_conn().instanceTemplates().get( project=project_id, instanceTemplate=resource_id ).execute(num_retries=self.num_retries) return response
python
{ "resource": "" }
q265985
GceHook.insert_instance_template
test
def insert_instance_template(self, body, request_id=None, project_id=None): """ Inserts instance template using body specified Must be called with keyword arguments rather than positional. :param body: Instance template representation as object according to https://cloud.google.com/compute/docs/reference/rest/v1/instanceTemplates :type body: dict :param request_id: Optional, unique request_id that you might add to achieve full idempotence (for example when client call times out repeating the request with the same request id will not create a new instance template again) It should be in UUID format as defined in RFC 4122 :type request_id: str :param project_id: Optional, Google Cloud Platform project ID where the Compute Engine Instance exists. If set to None or missing, the default project_id from the GCP connection is used. :type project_id: str :return: None """ response = self.get_conn().instanceTemplates().insert( project=project_id, body=body, requestId=request_id ).execute(num_retries=self.num_retries) try: operation_name = response["name"] except KeyError: raise AirflowException( "Wrong response '{}' returned - it should contain " "'name' field".format(response)) self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name)
python
{ "resource": "" }
q265986
GceHook.get_instance_group_manager
test
def get_instance_group_manager(self, zone, resource_id, project_id=None): """ Retrieves Instance Group Manager by project_id, zone and resource_id. Must be called with keyword arguments rather than positional. :param zone: Google Cloud Platform zone where the Instance Group Manager exists :type zone: str :param resource_id: Name of the Instance Group Manager :type resource_id: str :param project_id: Optional, Google Cloud Platform project ID where the Compute Engine Instance exists. If set to None or missing, the default project_id from the GCP connection is used. :type project_id: str :return: Instance group manager representation as object according to https://cloud.google.com/compute/docs/reference/rest/beta/instanceGroupManagers :rtype: dict """ response = self.get_conn().instanceGroupManagers().get( project=project_id, zone=zone, instanceGroupManager=resource_id ).execute(num_retries=self.num_retries) return response
python
{ "resource": "" }
q265987
GceHook.patch_instance_group_manager
test
def patch_instance_group_manager(self, zone, resource_id, body, request_id=None, project_id=None): """ Patches Instance Group Manager with the specified body. Must be called with keyword arguments rather than positional. :param zone: Google Cloud Platform zone where the Instance Group Manager exists :type zone: str :param resource_id: Name of the Instance Group Manager :type resource_id: str :param body: Instance Group Manager representation as json-merge-patch object according to https://cloud.google.com/compute/docs/reference/rest/beta/instanceTemplates/patch :type body: dict :param request_id: Optional, unique request_id that you might add to achieve full idempotence (for example when client call times out repeating the request with the same request id will not create a new instance template again). It should be in UUID format as defined in RFC 4122 :type request_id: str :param project_id: Optional, Google Cloud Platform project ID where the Compute Engine Instance exists. If set to None or missing, the default project_id from the GCP connection is used. :type project_id: str :return: None """ response = self.get_conn().instanceGroupManagers().patch( project=project_id, zone=zone, instanceGroupManager=resource_id, body=body, requestId=request_id ).execute(num_retries=self.num_retries) try: operation_name = response["name"] except KeyError: raise AirflowException( "Wrong response '{}' returned - it should contain " "'name' field".format(response)) self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name, zone=zone)
python
{ "resource": "" }
q265988
GceHook._wait_for_operation_to_complete
test
def _wait_for_operation_to_complete(self, project_id, operation_name, zone=None): """ Waits for the named operation to complete - checks status of the async call. :param operation_name: name of the operation :type operation_name: str :param zone: optional region of the request (might be None for global operations) :type zone: str :return: None """ service = self.get_conn() while True: if zone is None: # noinspection PyTypeChecker operation_response = self._check_global_operation_status( service, operation_name, project_id) else: # noinspection PyTypeChecker operation_response = self._check_zone_operation_status( service, operation_name, project_id, zone, self.num_retries) if operation_response.get("status") == GceOperationStatus.DONE: error = operation_response.get("error") if error: code = operation_response.get("httpErrorStatusCode") msg = operation_response.get("httpErrorMessage") # Extracting the errors list as string and trimming square braces error_msg = str(error.get("errors"))[1:-1] raise AirflowException("{} {}: ".format(code, msg) + error_msg) # No meaningful info to return from the response in case of success return time.sleep(TIME_TO_SLEEP_IN_SECONDS)
python
{ "resource": "" }
q265989
S3Hook.check_for_bucket
test
def check_for_bucket(self, bucket_name): """ Check if bucket_name exists. :param bucket_name: the name of the bucket :type bucket_name: str """ try: self.get_conn().head_bucket(Bucket=bucket_name) return True except ClientError as e: self.log.info(e.response["Error"]["Message"]) return False
python
{ "resource": "" }
q265990
S3Hook.create_bucket
test
def create_bucket(self, bucket_name, region_name=None): """ Creates an Amazon S3 bucket. :param bucket_name: The name of the bucket :type bucket_name: str :param region_name: The name of the aws region in which to create the bucket. :type region_name: str """ s3_conn = self.get_conn() if not region_name: region_name = s3_conn.meta.region_name if region_name == 'us-east-1': self.get_conn().create_bucket(Bucket=bucket_name) else: self.get_conn().create_bucket(Bucket=bucket_name, CreateBucketConfiguration={ 'LocationConstraint': region_name })
python
{ "resource": "" }
q265991
S3Hook.check_for_prefix
test
def check_for_prefix(self, bucket_name, prefix, delimiter): """ Checks that a prefix exists in a bucket :param bucket_name: the name of the bucket :type bucket_name: str :param prefix: a key prefix :type prefix: str :param delimiter: the delimiter marks key hierarchy. :type delimiter: str """ prefix = prefix + delimiter if prefix[-1] != delimiter else prefix prefix_split = re.split(r'(\w+[{d}])$'.format(d=delimiter), prefix, 1) previous_level = prefix_split[0] plist = self.list_prefixes(bucket_name, previous_level, delimiter) return False if plist is None else prefix in plist
python
{ "resource": "" }
q265992
S3Hook.list_prefixes
test
def list_prefixes(self, bucket_name, prefix='', delimiter='', page_size=None, max_items=None): """ Lists prefixes in a bucket under prefix :param bucket_name: the name of the bucket :type bucket_name: str :param prefix: a key prefix :type prefix: str :param delimiter: the delimiter marks key hierarchy. :type delimiter: str :param page_size: pagination size :type page_size: int :param max_items: maximum items to return :type max_items: int """ config = { 'PageSize': page_size, 'MaxItems': max_items, } paginator = self.get_conn().get_paginator('list_objects_v2') response = paginator.paginate(Bucket=bucket_name, Prefix=prefix, Delimiter=delimiter, PaginationConfig=config) has_results = False prefixes = [] for page in response: if 'CommonPrefixes' in page: has_results = True for p in page['CommonPrefixes']: prefixes.append(p['Prefix']) if has_results: return prefixes
python
{ "resource": "" }
q265993
S3Hook.list_keys
test
def list_keys(self, bucket_name, prefix='', delimiter='', page_size=None, max_items=None): """ Lists keys in a bucket under prefix and not containing delimiter :param bucket_name: the name of the bucket :type bucket_name: str :param prefix: a key prefix :type prefix: str :param delimiter: the delimiter marks key hierarchy. :type delimiter: str :param page_size: pagination size :type page_size: int :param max_items: maximum items to return :type max_items: int """ config = { 'PageSize': page_size, 'MaxItems': max_items, } paginator = self.get_conn().get_paginator('list_objects_v2') response = paginator.paginate(Bucket=bucket_name, Prefix=prefix, Delimiter=delimiter, PaginationConfig=config) has_results = False keys = [] for page in response: if 'Contents' in page: has_results = True for k in page['Contents']: keys.append(k['Key']) if has_results: return keys
python
{ "resource": "" }
q265994
S3Hook.check_for_key
test
def check_for_key(self, key, bucket_name=None): """ Checks if a key exists in a bucket :param key: S3 key that will point to the file :type key: str :param bucket_name: Name of the bucket in which the file is stored :type bucket_name: str """ if not bucket_name: (bucket_name, key) = self.parse_s3_url(key) try: self.get_conn().head_object(Bucket=bucket_name, Key=key) return True except ClientError as e: self.log.info(e.response["Error"]["Message"]) return False
python
{ "resource": "" }
q265995
S3Hook.get_key
test
def get_key(self, key, bucket_name=None): """ Returns a boto3.s3.Object :param key: the path to the key :type key: str :param bucket_name: the name of the bucket :type bucket_name: str """ if not bucket_name: (bucket_name, key) = self.parse_s3_url(key) obj = self.get_resource_type('s3').Object(bucket_name, key) obj.load() return obj
python
{ "resource": "" }
q265996
S3Hook.read_key
test
def read_key(self, key, bucket_name=None): """ Reads a key from S3 :param key: S3 key that will point to the file :type key: str :param bucket_name: Name of the bucket in which the file is stored :type bucket_name: str """ obj = self.get_key(key, bucket_name) return obj.get()['Body'].read().decode('utf-8')
python
{ "resource": "" }
q265997
S3Hook.select_key
test
def select_key(self, key, bucket_name=None, expression='SELECT * FROM S3Object', expression_type='SQL', input_serialization=None, output_serialization=None): """ Reads a key with S3 Select. :param key: S3 key that will point to the file :type key: str :param bucket_name: Name of the bucket in which the file is stored :type bucket_name: str :param expression: S3 Select expression :type expression: str :param expression_type: S3 Select expression type :type expression_type: str :param input_serialization: S3 Select input data serialization format :type input_serialization: dict :param output_serialization: S3 Select output data serialization format :type output_serialization: dict :return: retrieved subset of original data by S3 Select :rtype: str .. seealso:: For more details about S3 Select parameters: http://boto3.readthedocs.io/en/latest/reference/services/s3.html#S3.Client.select_object_content """ if input_serialization is None: input_serialization = {'CSV': {}} if output_serialization is None: output_serialization = {'CSV': {}} if not bucket_name: (bucket_name, key) = self.parse_s3_url(key) response = self.get_conn().select_object_content( Bucket=bucket_name, Key=key, Expression=expression, ExpressionType=expression_type, InputSerialization=input_serialization, OutputSerialization=output_serialization) return ''.join(event['Records']['Payload'].decode('utf-8') for event in response['Payload'] if 'Records' in event)
python
{ "resource": "" }
q265998
S3Hook.check_for_wildcard_key
test
def check_for_wildcard_key(self, wildcard_key, bucket_name=None, delimiter=''): """ Checks that a key matching a wildcard expression exists in a bucket :param wildcard_key: the path to the key :type wildcard_key: str :param bucket_name: the name of the bucket :type bucket_name: str :param delimiter: the delimiter marks key hierarchy :type delimiter: str """ return self.get_wildcard_key(wildcard_key=wildcard_key, bucket_name=bucket_name, delimiter=delimiter) is not None
python
{ "resource": "" }
q265999
S3Hook.get_wildcard_key
test
def get_wildcard_key(self, wildcard_key, bucket_name=None, delimiter=''): """ Returns a boto3.s3.Object object matching the wildcard expression :param wildcard_key: the path to the key :type wildcard_key: str :param bucket_name: the name of the bucket :type bucket_name: str :param delimiter: the delimiter marks key hierarchy :type delimiter: str """ if not bucket_name: (bucket_name, wildcard_key) = self.parse_s3_url(wildcard_key) prefix = re.split(r'[*]', wildcard_key, 1)[0] klist = self.list_keys(bucket_name, prefix=prefix, delimiter=delimiter) if klist: key_matches = [k for k in klist if fnmatch.fnmatch(k, wildcard_key)] if key_matches: return self.get_key(key_matches[0], bucket_name)
python
{ "resource": "" }