repo_name stringclasses 4
values | method_name stringlengths 3 72 | method_code stringlengths 87 3.59k | method_summary stringlengths 12 196 | original_method_code stringlengths 129 8.98k | method_path stringlengths 15 136 |
|---|---|---|---|---|---|
apache/airflow | DingdingHook._get_endpoint | def _get_endpoint(self):
conn = self.get_connection(self.http_conn_id)
token = conn.password
if not token:
raise AirflowException('Dingding token is requests but get nothing, '
'check you conn_id configuration.')
return 'robot/send?access_to... | Get Dingding endpoint for sending message. | def _get_endpoint(self):
"""
Get Dingding endpoint for sending message.
"""
conn = self.get_connection(self.http_conn_id)
token = conn.password
if not token:
raise AirflowException('Dingding token is requests but get nothing, '
... | airflow/contrib/hooks/dingding_hook.py |
apache/airflow | DingdingHook.send | def send(self):
support_type = ['text', 'link', 'markdown', 'actionCard', 'feedCard']
if self.message_type not in support_type:
raise ValueError('DingdingWebhookHook only support {} '
'so far, but receive {}'.format(support_type, self.message_type))
data... | Send Dingding message | def send(self):
"""
Send Dingding message
"""
support_type = ['text', 'link', 'markdown', 'actionCard', 'feedCard']
if self.message_type not in support_type:
raise ValueError('DingdingWebhookHook only support {} '
'so far, but receive {}'.... | airflow/contrib/hooks/dingding_hook.py |
apache/airflow | _bind_parameters | def _bind_parameters(operation, parameters):
string_parameters = {}
for (name, value) in iteritems(parameters):
if value is None:
string_parameters[name] = 'NULL'
elif isinstance(value, basestring):
string_parameters[name] = "'" + _escape(value) + "'"
else:
... | Helper method that binds parameters to a SQL query. | def _bind_parameters(operation, parameters):
""" Helper method that binds parameters to a SQL query. """
# inspired by MySQL Python Connector (conversion.py)
string_parameters = {}
for (name, value) in iteritems(parameters):
if value is None:
string_parameters[name] = 'NULL'
... | airflow/contrib/hooks/bigquery_hook.py |
apache/airflow | _escape | def _escape(s):
e = s
e = e.replace('\\', '\\\\')
e = e.replace('\n', '\\n')
e = e.replace('\r', '\\r')
e = e.replace("'", "\\'")
e = e.replace('"', '\\"')
return e | Helper method that escapes parameters to a SQL query. | def _escape(s):
""" Helper method that escapes parameters to a SQL query. """
e = s
e = e.replace('\\', '\\\\')
e = e.replace('\n', '\\n')
e = e.replace('\r', '\\r')
e = e.replace("'", "\\'")
e = e.replace('"', '\\"')
return e | airflow/contrib/hooks/bigquery_hook.py |
apache/airflow | _bq_cast | def _bq_cast(string_field, bq_type):
if string_field is None:
return None
elif bq_type == 'INTEGER':
return int(string_field)
elif bq_type == 'FLOAT' or bq_type == 'TIMESTAMP':
return float(string_field)
elif bq_type == 'BOOLEAN':
if string_field not in ['true', 'false']:... | Helper method that casts a BigQuery row to the appropriate data types. This is useful because BigQuery returns all fields as strings. | def _bq_cast(string_field, bq_type):
"""
Helper method that casts a BigQuery row to the appropriate data types.
This is useful because BigQuery returns all fields as strings.
"""
if string_field is None:
return None
elif bq_type == 'INTEGER':
return int(string_field)
elif bq_... | airflow/contrib/hooks/bigquery_hook.py |
apache/airflow | _validate_value | def _validate_value(key, value, expected_type):
if not isinstance(value, expected_type):
raise TypeError("{} argument must have a type {} not {}".format(
key, expected_type, type(value))) | function to check expected type and raise error if type is not correct | def _validate_value(key, value, expected_type):
""" function to check expected type and raise
error if type is not correct """
if not isinstance(value, expected_type):
raise TypeError("{} argument must have a type {} not {}".format(
key, expected_type, type(value))) | airflow/contrib/hooks/bigquery_hook.py |
apache/airflow | BigQueryHook.table_exists | def table_exists(self, project_id, dataset_id, table_id):
service = self.get_service()
try:
service.tables().get(
projectId=project_id, datasetId=dataset_id,
tableId=table_id).execute(num_retries=self.num_retries)
return True
except HttpErr... | Checks for the existence of a table in Google BigQuery. | def table_exists(self, project_id, dataset_id, table_id):
"""
Checks for the existence of a table in Google BigQuery.
:param project_id: The Google cloud project in which to look for the
table. The connection supplied to the hook must provide access to
the specified proj... | airflow/contrib/hooks/bigquery_hook.py |
apache/airflow | BigQueryBaseCursor.create_empty_table | def create_empty_table(self,
project_id,
dataset_id,
table_id,
schema_fields=None,
time_partitioning=None,
cluster_fields=None,
lab... | Creates a new, empty table in the dataset. To create a view, which is defined by a SQL query, parse a dictionary to 'view' kwarg | def create_empty_table(self,
project_id,
dataset_id,
table_id,
schema_fields=None,
time_partitioning=None,
cluster_fields=None,
lab... | airflow/contrib/hooks/bigquery_hook.py |
apache/airflow | BigQueryBaseCursor.patch_table | def patch_table(self,
dataset_id,
table_id,
project_id=None,
description=None,
expiration_time=None,
external_data_configuration=None,
friendly_name=None,
label... | Patch information in an existing table. It only updates fileds that are provided in the request object. | def patch_table(self,
dataset_id,
table_id,
project_id=None,
description=None,
expiration_time=None,
external_data_configuration=None,
friendly_name=None,
label... | airflow/contrib/hooks/bigquery_hook.py |
apache/airflow | BigQueryBaseCursor.cancel_query | def cancel_query(self):
jobs = self.service.jobs()
if (self.running_job_id and
not self.poll_job_complete(self.running_job_id)):
self.log.info('Attempting to cancel job : %s, %s', self.project_id,
self.running_job_id)
if self.location:
... | Cancel all started queries that have not yet completed | def cancel_query(self):
"""
Cancel all started queries that have not yet completed
"""
jobs = self.service.jobs()
if (self.running_job_id and
not self.poll_job_complete(self.running_job_id)):
self.log.info('Attempting to cancel job : %s, %s', self.proj... | airflow/contrib/hooks/bigquery_hook.py |
apache/airflow | BigQueryBaseCursor.run_table_delete | def run_table_delete(self, deletion_dataset_table,
ignore_if_missing=False):
deletion_project, deletion_dataset, deletion_table = \
_split_tablename(table_input=deletion_dataset_table,
default_project_id=self.project_id)
try:
... | Delete an existing table from the dataset; If the table does not exist, return an error unless ignore_if_missing is set to True. | def run_table_delete(self, deletion_dataset_table,
ignore_if_missing=False):
"""
Delete an existing table from the dataset;
If the table does not exist, return an error unless ignore_if_missing
is set to True.
:param deletion_dataset_table: A dotted
... | airflow/contrib/hooks/bigquery_hook.py |
apache/airflow | BigQueryBaseCursor.run_table_upsert | def run_table_upsert(self, dataset_id, table_resource, project_id=None):
table_id = table_resource['tableReference']['tableId']
project_id = project_id if project_id is not None else self.project_id
tables_list_resp = self.service.tables().list(
projectId=project_id, dataset... | creates a new, empty table in the dataset; If the table already exists, update the existing table. Since BigQuery does not natively allow table upserts, this is not an atomic operation. | def run_table_upsert(self, dataset_id, table_resource, project_id=None):
"""
creates a new, empty table in the dataset;
If the table already exists, update the existing table.
Since BigQuery does not natively allow table upserts, this is not an
atomic operation.
:param d... | airflow/contrib/hooks/bigquery_hook.py |
apache/airflow | BigQueryBaseCursor.get_dataset | def get_dataset(self, dataset_id, project_id=None):
if not dataset_id or not isinstance(dataset_id, str):
raise ValueError("dataset_id argument must be provided and has "
"a type 'str'. You provided: {}".format(dataset_id))
dataset_project_id = project_id if pro... | Method returns dataset_resource if dataset exist and raised 404 error if dataset does not exist | def get_dataset(self, dataset_id, project_id=None):
"""
Method returns dataset_resource if dataset exist
and raised 404 error if dataset does not exist
:param dataset_id: The BigQuery Dataset ID
:type dataset_id: str
:param project_id: The GCP Project ID
:type pr... | airflow/contrib/hooks/bigquery_hook.py |
apache/airflow | BigQueryBaseCursor.get_datasets_list | def get_datasets_list(self, project_id=None):
dataset_project_id = project_id if project_id else self.project_id
try:
datasets_list = self.service.datasets().list(
projectId=dataset_project_id).execute(num_retries=self.num_retries)['datasets']
self.log.info("Data... | Method returns full list of BigQuery datasets in the current project | def get_datasets_list(self, project_id=None):
"""
Method returns full list of BigQuery datasets in the current project
.. seealso::
For more information, see:
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list
:param project_id: Google Cloud ... | airflow/contrib/hooks/bigquery_hook.py |
apache/airflow | BigQueryBaseCursor.insert_all | def insert_all(self, project_id, dataset_id, table_id,
rows, ignore_unknown_values=False,
skip_invalid_rows=False, fail_on_error=False):
dataset_project_id = project_id if project_id else self.project_id
body = {
"rows": rows,
"ignoreUnknown... | Method to stream data into BigQuery one record at a time without needing to run a load job | def insert_all(self, project_id, dataset_id, table_id,
rows, ignore_unknown_values=False,
skip_invalid_rows=False, fail_on_error=False):
"""
Method to stream data into BigQuery one record at a time without needing
to run a load job
.. seealso::
... | airflow/contrib/hooks/bigquery_hook.py |
apache/airflow | BigQueryCursor.execute | def execute(self, operation, parameters=None):
sql = _bind_parameters(operation,
parameters) if parameters else operation
self.job_id = self.run_query(sql) | Executes a BigQuery query, and returns the job ID. | def execute(self, operation, parameters=None):
"""
Executes a BigQuery query, and returns the job ID.
:param operation: The query to execute.
:type operation: str
:param parameters: Parameters to substitute into the query.
:type parameters: dict
"""
sql =... | airflow/contrib/hooks/bigquery_hook.py |
apache/airflow | BigQueryCursor.executemany | def executemany(self, operation, seq_of_parameters):
for parameters in seq_of_parameters:
self.execute(operation, parameters) | Execute a BigQuery query multiple times with different parameters. | def executemany(self, operation, seq_of_parameters):
"""
Execute a BigQuery query multiple times with different parameters.
:param operation: The query to execute.
:type operation: str
:param seq_of_parameters: List of dictionary parameters to substitute into the
que... | airflow/contrib/hooks/bigquery_hook.py |
apache/airflow | BigQueryCursor.next | def next(self):
if not self.job_id:
return None
if len(self.buffer) == 0:
if self.all_pages_loaded:
return None
query_results = (self.service.jobs().getQueryResults(
projectId=self.project_id,
jobId=self.job_id,
... | Helper method for fetchone, which returns the next row from a buffer. If the buffer is empty, attempts to paginate through the result set for the next page, and load it into the buffer. | def next(self):
"""
Helper method for fetchone, which returns the next row from a buffer.
If the buffer is empty, attempts to paginate through the result set for
the next page, and load it into the buffer.
"""
if not self.job_id:
return None
if len(se... | airflow/contrib/hooks/bigquery_hook.py |
apache/airflow | PostgresToGoogleCloudStorageOperator._query_postgres | def _query_postgres(self):
postgres = PostgresHook(postgres_conn_id=self.postgres_conn_id)
conn = postgres.get_conn()
cursor = conn.cursor()
cursor.execute(self.sql, self.parameters)
return cursor | Queries Postgres and returns a cursor to the results. | def _query_postgres(self):
"""
Queries Postgres and returns a cursor to the results.
"""
postgres = PostgresHook(postgres_conn_id=self.postgres_conn_id)
conn = postgres.get_conn()
cursor = conn.cursor()
cursor.execute(self.sql, self.parameters)
return curs... | airflow/contrib/operators/postgres_to_gcs_operator.py |
apache/airflow | _make_intermediate_dirs | def _make_intermediate_dirs(sftp_client, remote_directory):
if remote_directory == '/':
sftp_client.chdir('/')
return
if remote_directory == '':
return
try:
sftp_client.chdir(remote_directory)
except IOError:
dirname, basename = os.path.split(remote_directory.rstr... | Create all the intermediate directories in a remote host | def _make_intermediate_dirs(sftp_client, remote_directory):
"""
Create all the intermediate directories in a remote host
:param sftp_client: A Paramiko SFTP client.
:param remote_directory: Absolute Path of the directory containing the file
:return:
"""
if remote_directory == '/':
s... | airflow/contrib/operators/sftp_operator.py |
apache/airflow | SQSHook.create_queue | def create_queue(self, queue_name, attributes=None):
return self.get_conn().create_queue(QueueName=queue_name, Attributes=attributes or {}) | Create queue using connection object | def create_queue(self, queue_name, attributes=None):
"""
Create queue using connection object
:param queue_name: name of the queue.
:type queue_name: str
:param attributes: additional attributes for the queue (default: None)
For details of the attributes parameter se... | airflow/contrib/hooks/aws_sqs_hook.py |
apache/airflow | SQSHook.send_message | def send_message(self, queue_url, message_body, delay_seconds=0, message_attributes=None):
return self.get_conn().send_message(QueueUrl=queue_url,
MessageBody=message_body,
DelaySeconds=delay_seconds,
... | Send message to the queue | def send_message(self, queue_url, message_body, delay_seconds=0, message_attributes=None):
"""
Send message to the queue
:param queue_url: queue url
:type queue_url: str
:param message_body: the contents of the message
:type message_body: str
:param delay_seconds... | airflow/contrib/hooks/aws_sqs_hook.py |
apache/airflow | BaseTaskRunner.run_command | def run_command(self, run_with=None, join_args=False):
run_with = run_with or []
cmd = [" ".join(self._command)] if join_args else self._command
full_cmd = run_with + cmd
self.log.info('Running: %s', full_cmd)
proc = subprocess.Popen(
full_cmd,
stdout=sub... | Run the task command. | def run_command(self, run_with=None, join_args=False):
"""
Run the task command.
:param run_with: list of tokens to run the task command with e.g. ``['bash', '-c']``
:type run_with: list
:param join_args: whether to concatenate the list of command tokens e.g. ``['airflow', 'run'... | airflow/task/task_runner/base_task_runner.py |
apache/airflow | BaseTaskRunner.on_finish | def on_finish(self):
if self._cfg_path and os.path.isfile(self._cfg_path):
if self.run_as_user:
subprocess.call(['sudo', 'rm', self._cfg_path], close_fds=True)
else:
os.remove(self._cfg_path) | A callback that should be called when this is done running. | def on_finish(self):
"""
A callback that should be called when this is done running.
"""
if self._cfg_path and os.path.isfile(self._cfg_path):
if self.run_as_user:
subprocess.call(['sudo', 'rm', self._cfg_path], close_fds=True)
else:
... | airflow/task/task_runner/base_task_runner.py |
apache/airflow | _main | def _main():
usage = "usage: nvd3.py [options]"
parser = OptionParser(usage=usage,
version=("python-nvd3 - Charts generator with "
"nvd3.js and d3.js"))
parser.add_option("-q", "--quiet",
action="store_false", dest="verb... | Parse options and process commands | def _main():
"""
Parse options and process commands
"""
# Parse arguments
usage = "usage: nvd3.py [options]"
parser = OptionParser(usage=usage,
version=("python-nvd3 - Charts generator with "
"nvd3.js and d3.js"))
parser.add_option... | airflow/_vendor/nvd3/NVD3Chart.py |
apache/airflow | NVD3Chart.buildhtmlheader | def buildhtmlheader(self):
self.htmlheader = ''
global _js_initialized
if '_js_initialized' not in globals() or not _js_initialized:
for css in self.header_css:
self.htmlheader += css
for js in self.header_js:
self.htmlheader += js | generate HTML header content | def buildhtmlheader(self):
"""generate HTML header content"""
self.htmlheader = ''
# If the JavaScript assets have already been injected, don't bother re-sourcing them.
global _js_initialized
if '_js_initialized' not in globals() or not _js_initialized:
for css in sel... | airflow/_vendor/nvd3/NVD3Chart.py |
apache/airflow | NVD3Chart.buildcontainer | def buildcontainer(self):
if self.container:
return
if self.width:
if self.width[-1] != '%':
self.style += 'width:%spx;' % self.width
else:
self.style += 'width:%s;' % self.width
if self.height:
if self.hei... | generate HTML div | def buildcontainer(self):
"""generate HTML div"""
if self.container:
return
# Create SVG div with style
if self.width:
if self.width[-1] != '%':
self.style += 'width:%spx;' % self.width
else:
self.style += 'width:%s;' %... | airflow/_vendor/nvd3/NVD3Chart.py |
apache/airflow | NVD3Chart.buildjschart | def buildjschart(self):
self.jschart = ''
if self.tooltip_condition_string == '':
self.tooltip_condition_string = 'var y = String(graph.point.y);\n'
self.series_js = json.dumps(self.series) | generate javascript code for the chart | def buildjschart(self):
"""generate javascript code for the chart"""
self.jschart = ''
# add custom tooltip string in jschart
# default condition (if build_custom_tooltip is not called explicitly with date_flag=True)
if self.tooltip_condition_string == '':
self.toolt... | airflow/_vendor/nvd3/NVD3Chart.py |
apache/airflow | NVD3Chart.create_x_axis | def create_x_axis(self, name, label=None, format=None, date=False, custom_format=False):
axis = {}
if custom_format and format:
axis['tickFormat'] = format
elif format:
if format == 'AM_PM':
axis['tickFormat'] = "function(d) { return get_am_pm(parseInt(d))... | Create X-axis | def create_x_axis(self, name, label=None, format=None, date=False, custom_format=False):
"""Create X-axis"""
axis = {}
if custom_format and format:
axis['tickFormat'] = format
elif format:
if format == 'AM_PM':
axis['tickFormat'] = "function(d) { r... | airflow/_vendor/nvd3/NVD3Chart.py |
apache/airflow | NVD3Chart.create_y_axis | def create_y_axis(self, name, label=None, format=None, custom_format=False):
axis = {}
if custom_format and format:
axis['tickFormat'] = format
elif format:
axis['tickFormat'] = "d3.format(',%s')" % format
if label:
axis['axisLabel'] = "'" + label + ... | Create Y-axis | def create_y_axis(self, name, label=None, format=None, custom_format=False):
"""
Create Y-axis
"""
axis = {}
if custom_format and format:
axis['tickFormat'] = format
elif format:
axis['tickFormat'] = "d3.format(',%s')" % format
if label:
... | airflow/_vendor/nvd3/NVD3Chart.py |
apache/airflow | action_logging | def action_logging(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
with create_session() as session:
if g.user.is_anonymous:
user = 'anonymous'
else:
user = g.user.username
log = Log(
event=f.__name__,
... | Decorator to log user actions | def action_logging(f):
"""
Decorator to log user actions
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
with create_session() as session:
if g.user.is_anonymous:
user = 'anonymous'
else:
user = g.user.username
log =... | airflow/www/decorators.py |
apache/airflow | gzipped | def gzipped(f):
@functools.wraps(f)
def view_func(*args, **kwargs):
@after_this_request
def zipper(response):
accept_encoding = request.headers.get('Accept-Encoding', '')
if 'gzip' not in accept_encoding.lower():
return response
response.dire... | Decorator to make a view compressed | def gzipped(f):
"""
Decorator to make a view compressed
"""
@functools.wraps(f)
def view_func(*args, **kwargs):
@after_this_request
def zipper(response):
accept_encoding = request.headers.get('Accept-Encoding', '')
if 'gzip' not in accept_encoding.lower():
... | airflow/www/decorators.py |
apache/airflow | DagModel.create_dagrun | def create_dagrun(self,
run_id,
state,
execution_date,
start_date=None,
external_trigger=False,
conf=None,
session=None):
return self.get_dag().create_dagrun(... | Creates a dag run from this dag including the tasks associated with this dag. | def create_dagrun(self,
run_id,
state,
execution_date,
start_date=None,
external_trigger=False,
conf=None,
session=None):
"""
Creates a dag run from t... | airflow/models/dag.py |
apache/airflow | SQSPublishOperator.execute | def execute(self, context):
hook = SQSHook(aws_conn_id=self.aws_conn_id)
result = hook.send_message(queue_url=self.sqs_queue,
message_body=self.message_content,
delay_seconds=self.delay_seconds,
mes... | Publish the message to SQS queue | def execute(self, context):
"""
Publish the message to SQS queue
:param context: the context object
:type context: dict
:return: dict with information about the message sent
For details of the returned dict see :py:meth:`botocore.client.SQS.send_message`
:rty... | airflow/contrib/operators/aws_sqs_publish_operator.py |
apache/airflow | json_response | def json_response(obj):
return Response(
response=json.dumps(
obj, indent=4, cls=AirflowJsonEncoder),
status=200,
mimetype="application/json") | returns a json response from a json serializable python object | def json_response(obj):
"""
returns a json response from a json serializable python object
"""
return Response(
response=json.dumps(
obj, indent=4, cls=AirflowJsonEncoder),
status=200,
mimetype="application/json") | airflow/www/utils.py |
apache/airflow | open_maybe_zipped | def open_maybe_zipped(f, mode='r'):
_, archive, filename = ZIP_REGEX.search(f).groups()
if archive and zipfile.is_zipfile(archive):
return zipfile.ZipFile(archive, mode=mode).open(filename)
else:
return io.open(f, mode=mode) | Opens the given file. If the path contains a folder with a .zip suffix, then the folder is treated as a zip archive, opening the file inside the archive. | def open_maybe_zipped(f, mode='r'):
"""
Opens the given file. If the path contains a folder with a .zip suffix, then
the folder is treated as a zip archive, opening the file inside the archive.
:return: a file object, as in `open`, or as in `ZipFile.open`.
"""
_, archive, filename = ZIP_REGEX.... | airflow/www/utils.py |
apache/airflow | make_cache_key | def make_cache_key(*args, **kwargs):
path = request.path
args = str(hash(frozenset(request.args.items())))
return (path + args).encode('ascii', 'ignore') | Used by cache to get a unique key per URL | def make_cache_key(*args, **kwargs):
"""
Used by cache to get a unique key per URL
"""
path = request.path
args = str(hash(frozenset(request.args.items())))
return (path + args).encode('ascii', 'ignore') | airflow/www/utils.py |
apache/airflow | CloudVideoIntelligenceHook.annotate_video | def annotate_video(
self,
input_uri=None,
input_content=None,
features=None,
video_context=None,
output_uri=None,
location=None,
retry=None,
timeout=None,
metadata=None,
):
client = self.get_conn()
return client.annotate... | Performs video annotation. | def annotate_video(
self,
input_uri=None,
input_content=None,
features=None,
video_context=None,
output_uri=None,
location=None,
retry=None,
timeout=None,
metadata=None,
):
"""
Performs video annotation.
:param ... | airflow/contrib/hooks/gcp_video_intelligence_hook.py |
apache/airflow | OpsgenieAlertHook._get_api_key | def _get_api_key(self):
conn = self.get_connection(self.http_conn_id)
api_key = conn.password
if not api_key:
raise AirflowException('Opsgenie API Key is required for this hook, '
'please check your conn_id configuration.')
return api_key | Get Opsgenie api_key for creating alert | def _get_api_key(self):
"""
Get Opsgenie api_key for creating alert
"""
conn = self.get_connection(self.http_conn_id)
api_key = conn.password
if not api_key:
raise AirflowException('Opsgenie API Key is required for this hook, '
... | airflow/contrib/hooks/opsgenie_alert_hook.py |
apache/airflow | OpsgenieAlertHook.get_conn | def get_conn(self, headers=None):
conn = self.get_connection(self.http_conn_id)
self.base_url = conn.host if conn.host else 'https://api.opsgenie.com'
session = requests.Session()
if headers:
session.headers.update(headers)
return session | Overwrite HttpHook get_conn because this hook just needs base_url and headers, and does not need generic params | def get_conn(self, headers=None):
"""
Overwrite HttpHook get_conn because this hook just needs base_url
and headers, and does not need generic params
:param headers: additional headers to be passed through as a dictionary
:type headers: dict
"""
conn = self.get_c... | airflow/contrib/hooks/opsgenie_alert_hook.py |
apache/airflow | OpsgenieAlertHook.execute | def execute(self, payload={}):
api_key = self._get_api_key()
return self.run(endpoint='v2/alerts',
data=json.dumps(payload),
headers={'Content-Type': 'application/json',
'Authorization': 'GenieKey %s' % api_key}) | Execute the Opsgenie Alert call | def execute(self, payload={}):
"""
Execute the Opsgenie Alert call
:param payload: Opsgenie API Create Alert payload values
See https://docs.opsgenie.com/docs/alert-api#section-create-alert
:type payload: dict
"""
api_key = self._get_api_key()
return ... | airflow/contrib/hooks/opsgenie_alert_hook.py |
apache/airflow | OpsgenieAlertOperator._build_opsgenie_payload | def _build_opsgenie_payload(self):
payload = {}
for key in [
"message", "alias", "description", "responders",
"visibleTo", "actions", "tags", "details", "entity",
"source", "priority", "user", "note"
]:
val = getattr(self, key)
if val:... | Construct the Opsgenie JSON payload. All relevant parameters are combined here to a valid Opsgenie JSON payload. | def _build_opsgenie_payload(self):
"""
Construct the Opsgenie JSON payload. All relevant parameters are combined here
to a valid Opsgenie JSON payload.
:return: Opsgenie payload (dict) to send
"""
payload = {}
for key in [
"message", "alias", "descri... | airflow/contrib/operators/opsgenie_alert_operator.py |
apache/airflow | OpsgenieAlertOperator.execute | def execute(self, context):
self.hook = OpsgenieAlertHook(self.opsgenie_conn_id)
self.hook.execute(self._build_opsgenie_payload()) | Call the OpsgenieAlertHook to post message | def execute(self, context):
"""
Call the OpsgenieAlertHook to post message
"""
self.hook = OpsgenieAlertHook(self.opsgenie_conn_id)
self.hook.execute(self._build_opsgenie_payload()) | airflow/contrib/operators/opsgenie_alert_operator.py |
apache/airflow | AWSAthenaHook.get_conn | def get_conn(self):
if not self.conn:
self.conn = self.get_client_type('athena')
return self.conn | check if aws conn exists already or create one and return it | def get_conn(self):
"""
check if aws conn exists already or create one and return it
:return: boto3 session
"""
if not self.conn:
self.conn = self.get_client_type('athena')
return self.conn | airflow/contrib/hooks/aws_athena_hook.py |
apache/airflow | AWSAthenaHook.run_query | def run_query(self, query, query_context, result_configuration, client_request_token=None):
response = self.conn.start_query_execution(QueryString=query,
ClientRequestToken=client_request_token,
QueryExecutionC... | Run Presto query on athena with provided config and return submitted query_execution_id | def run_query(self, query, query_context, result_configuration, client_request_token=None):
"""
Run Presto query on athena with provided config and return submitted query_execution_id
:param query: Presto query to run
:type query: str
:param query_context: Context in which query... | airflow/contrib/hooks/aws_athena_hook.py |
apache/airflow | AWSAthenaHook.check_query_status | def check_query_status(self, query_execution_id):
response = self.conn.get_query_execution(QueryExecutionId=query_execution_id)
state = None
try:
state = response['QueryExecution']['Status']['State']
except Exception as ex:
self.log.error('Exception while getting ... | Fetch the status of submitted athena query. | def check_query_status(self, query_execution_id):
"""
Fetch the status of submitted athena query. Returns None or one of valid query states.
:param query_execution_id: Id of submitted athena query
:type query_execution_id: str
:return: str
"""
response = self.con... | airflow/contrib/hooks/aws_athena_hook.py |
apache/airflow | AWSAthenaHook.poll_query_status | def poll_query_status(self, query_execution_id, max_tries=None):
try_number = 1
final_query_state = None
while True:
query_state = self.check_query_status(query_execution_id)
if query_state is None:
self.log.info('Trial {try_number}: Invalid query state.... | Poll the status of submitted athena query until query state reaches final state. | def poll_query_status(self, query_execution_id, max_tries=None):
"""
Poll the status of submitted athena query until query state reaches final state.
Returns one of the final states
:param query_execution_id: Id of submitted athena query
:type query_execution_id: str
:pa... | airflow/contrib/hooks/aws_athena_hook.py |
apache/airflow | ZendeskHook.__handle_rate_limit_exception | def __handle_rate_limit_exception(self, rate_limit_exception):
retry_after = int(
rate_limit_exception.response.headers.get('Retry-After', 60))
self.log.info(
"Hit Zendesk API rate limit. Pausing for %s seconds",
retry_after
)
time.sleep(retry_after) | Sleep for the time specified in the exception. If not specified, wait for 60 seconds. | def __handle_rate_limit_exception(self, rate_limit_exception):
"""
Sleep for the time specified in the exception. If not specified, wait
for 60 seconds.
"""
retry_after = int(
rate_limit_exception.response.headers.get('Retry-After', 60))
self.log.info(
... | airflow/hooks/zendesk_hook.py |
apache/airflow | ZendeskHook.call | def call(self, path, query=None, get_all_pages=True, side_loading=False):
zendesk = self.get_conn()
first_request_successful = False
while not first_request_successful:
try:
results = zendesk.call(path, query)
first_request_successful = True
... | Call Zendesk API and return results | def call(self, path, query=None, get_all_pages=True, side_loading=False):
"""
Call Zendesk API and return results
:param path: The Zendesk API to call
:param query: Query parameters
:param get_all_pages: Accumulate results over all pages before
returning. Due to s... | airflow/hooks/zendesk_hook.py |
apache/airflow | AwsGlueCatalogHook.get_partitions | def get_partitions(self,
database_name,
table_name,
expression='',
page_size=None,
max_items=None):
config = {
'PageSize': page_size,
'MaxItems': max_items,
}
... | Retrieves the partition values for a table. | def get_partitions(self,
database_name,
table_name,
expression='',
page_size=None,
max_items=None):
"""
Retrieves the partition values for a table.
:param database_name: The name o... | airflow/contrib/hooks/aws_glue_catalog_hook.py |
apache/airflow | AwsGlueCatalogHook.get_table | def get_table(self, database_name, table_name):
result = self.get_conn().get_table(DatabaseName=database_name, Name=table_name)
return result['Table'] | Get the information of the table | def get_table(self, database_name, table_name):
"""
Get the information of the table
:param database_name: Name of hive database (schema) @table belongs to
:type database_name: str
:param table_name: Name of hive table
:type table_name: str
:rtype: dict
... | airflow/contrib/hooks/aws_glue_catalog_hook.py |
apache/airflow | AwsGlueCatalogHook.get_table_location | def get_table_location(self, database_name, table_name):
table = self.get_table(database_name, table_name)
return table['StorageDescriptor']['Location'] | Get the physical location of the table | def get_table_location(self, database_name, table_name):
"""
Get the physical location of the table
:param database_name: Name of hive database (schema) @table belongs to
:type database_name: str
:param table_name: Name of hive table
:type table_name: str
:return... | airflow/contrib/hooks/aws_glue_catalog_hook.py |
apache/airflow | RedshiftHook.cluster_status | def cluster_status(self, cluster_identifier):
conn = self.get_conn()
try:
response = conn.describe_clusters(
ClusterIdentifier=cluster_identifier)['Clusters']
return response[0]['ClusterStatus'] if response else None
except conn.exceptions.ClusterNotFoundF... | Return status of a cluster | def cluster_status(self, cluster_identifier):
"""
Return status of a cluster
:param cluster_identifier: unique identifier of a cluster
:type cluster_identifier: str
"""
conn = self.get_conn()
try:
response = conn.describe_clusters(
Clu... | airflow/contrib/hooks/redshift_hook.py |
apache/airflow | RedshiftHook.delete_cluster | def delete_cluster(
self,
cluster_identifier,
skip_final_cluster_snapshot=True,
final_cluster_snapshot_identifier=''):
response = self.get_conn().delete_cluster(
ClusterIdentifier=cluster_identifier,
SkipFinalClusterSnapshot=skip_final_clus... | Delete a cluster and optionally create a snapshot | def delete_cluster(
self,
cluster_identifier,
skip_final_cluster_snapshot=True,
final_cluster_snapshot_identifier=''):
"""
Delete a cluster and optionally create a snapshot
:param cluster_identifier: unique identifier of a cluster
:type cl... | airflow/contrib/hooks/redshift_hook.py |
apache/airflow | RedshiftHook.describe_cluster_snapshots | def describe_cluster_snapshots(self, cluster_identifier):
response = self.get_conn().describe_cluster_snapshots(
ClusterIdentifier=cluster_identifier
)
if 'Snapshots' not in response:
return None
snapshots = response['Snapshots']
snapshots = filter(lambda ... | Gets a list of snapshots for a cluster | def describe_cluster_snapshots(self, cluster_identifier):
"""
Gets a list of snapshots for a cluster
:param cluster_identifier: unique identifier of a cluster
:type cluster_identifier: str
"""
response = self.get_conn().describe_cluster_snapshots(
ClusterIden... | airflow/contrib/hooks/redshift_hook.py |
apache/airflow | RedshiftHook.restore_from_cluster_snapshot | def restore_from_cluster_snapshot(self, cluster_identifier, snapshot_identifier):
response = self.get_conn().restore_from_cluster_snapshot(
ClusterIdentifier=cluster_identifier,
SnapshotIdentifier=snapshot_identifier
)
return response['Cluster'] if response['Cluster'] els... | Restores a cluster from its snapshot | def restore_from_cluster_snapshot(self, cluster_identifier, snapshot_identifier):
"""
Restores a cluster from its snapshot
:param cluster_identifier: unique identifier of a cluster
:type cluster_identifier: str
:param snapshot_identifier: unique identifier for a snapshot of a cl... | airflow/contrib/hooks/redshift_hook.py |
apache/airflow | RedshiftHook.create_cluster_snapshot | def create_cluster_snapshot(self, snapshot_identifier, cluster_identifier):
response = self.get_conn().create_cluster_snapshot(
SnapshotIdentifier=snapshot_identifier,
ClusterIdentifier=cluster_identifier,
)
return response['Snapshot'] if response['Snapshot'] else None | Creates a snapshot of a cluster | def create_cluster_snapshot(self, snapshot_identifier, cluster_identifier):
"""
Creates a snapshot of a cluster
:param snapshot_identifier: unique identifier for a snapshot of a cluster
:type snapshot_identifier: str
:param cluster_identifier: unique identifier of a cluster
... | airflow/contrib/hooks/redshift_hook.py |
apache/airflow | SlackAPIOperator.execute | def execute(self, **kwargs):
if not self.api_params:
self.construct_api_call_params()
slack = SlackHook(token=self.token, slack_conn_id=self.slack_conn_id)
slack.call(self.method, self.api_params) | SlackAPIOperator calls will not fail even if the call is not unsuccessful. It should not prevent a DAG from completing in success | def execute(self, **kwargs):
"""
SlackAPIOperator calls will not fail even if the call is not unsuccessful.
It should not prevent a DAG from completing in success
"""
if not self.api_params:
self.construct_api_call_params()
slack = SlackHook(token=self.token, ... | airflow/operators/slack_operator.py |
apache/airflow | HdfsSensor.filter_for_filesize | def filter_for_filesize(result, size=None):
if size:
log = LoggingMixin().log
log.debug(
'Filtering for file size >= %s in files: %s',
size, map(lambda x: x['path'], result)
)
size *= settings.MEGABYTE
result = [x for x ... | Will test the filepath result and test if its size is at least self.filesize | def filter_for_filesize(result, size=None):
"""
Will test the filepath result and test if its size is at least self.filesize
:param result: a list of dicts returned by Snakebite ls
:param size: the file size in MB a file should be at least to trigger True
:return: (bool) dependi... | airflow/sensors/hdfs_sensor.py |
apache/airflow | HdfsSensor.filter_for_ignored_ext | def filter_for_ignored_ext(result, ignored_ext, ignore_copying):
if ignore_copying:
log = LoggingMixin().log
regex_builder = r"^.*\.(%s$)$" % '$|'.join(ignored_ext)
ignored_extensions_regex = re.compile(regex_builder)
log.debug(
'Filtering result f... | Will filter if instructed to do so the result to remove matching criteria | def filter_for_ignored_ext(result, ignored_ext, ignore_copying):
"""
Will filter if instructed to do so the result to remove matching criteria
:param result: list of dicts returned by Snakebite ls
:type result: list[dict]
:param ignored_ext: list of ignored extensions
:t... | airflow/sensors/hdfs_sensor.py |
apache/airflow | MongoToS3Operator.execute | def execute(self, context):
s3_conn = S3Hook(self.s3_conn_id)
if self.is_pipeline:
results = MongoHook(self.mongo_conn_id).aggregate(
mongo_collection=self.mongo_collection,
aggregate_query=self.mongo_query,
mongo_db=self.mongo_db
... | Executed by task_instance at runtime | def execute(self, context):
"""
Executed by task_instance at runtime
"""
s3_conn = S3Hook(self.s3_conn_id)
# Grab collection and execute query according to whether or not it is a pipeline
if self.is_pipeline:
results = MongoHook(self.mongo_conn_id).aggregate(... | airflow/contrib/operators/mongo_to_s3.py |
apache/airflow | get_pool | def get_pool(name, session=None):
if not (name and name.strip()):
raise AirflowBadRequest("Pool name shouldn't be empty")
pool = session.query(Pool).filter_by(pool=name).first()
if pool is None:
raise PoolNotFound("Pool '%s' doesn't exist" % name)
return pool | Get pool by a given name. | def get_pool(name, session=None):
"""Get pool by a given name."""
if not (name and name.strip()):
raise AirflowBadRequest("Pool name shouldn't be empty")
pool = session.query(Pool).filter_by(pool=name).first()
if pool is None:
raise PoolNotFound("Pool '%s' doesn't exist" % name)
re... | airflow/api/common/experimental/pool.py |
apache/airflow | create_pool | def create_pool(name, slots, description, session=None):
if not (name and name.strip()):
raise AirflowBadRequest("Pool name shouldn't be empty")
try:
slots = int(slots)
except ValueError:
raise AirflowBadRequest("Bad value for `slots`: %s" % slots)
session.expire_on_commit = Fa... | Create a pool with a given parameters. | def create_pool(name, slots, description, session=None):
"""Create a pool with a given parameters."""
if not (name and name.strip()):
raise AirflowBadRequest("Pool name shouldn't be empty")
try:
slots = int(slots)
except ValueError:
raise AirflowBadRequest("Bad value for `slots`... | airflow/api/common/experimental/pool.py |
apache/airflow | delete_pool | def delete_pool(name, session=None):
if not (name and name.strip()):
raise AirflowBadRequest("Pool name shouldn't be empty")
pool = session.query(Pool).filter_by(pool=name).first()
if pool is None:
raise PoolNotFound("Pool '%s' doesn't exist" % name)
session.delete(pool)
session.co... | Delete pool by a given name. | def delete_pool(name, session=None):
"""Delete pool by a given name."""
if not (name and name.strip()):
raise AirflowBadRequest("Pool name shouldn't be empty")
pool = session.query(Pool).filter_by(pool=name).first()
if pool is None:
raise PoolNotFound("Pool '%s' doesn't exist" % name)
... | airflow/api/common/experimental/pool.py |
apache/airflow | GKEClusterHook._dict_to_proto | def _dict_to_proto(py_dict, proto):
dict_json_str = json.dumps(py_dict)
return json_format.Parse(dict_json_str, proto) | Converts a python dictionary to the proto supplied | def _dict_to_proto(py_dict, proto):
"""
Converts a python dictionary to the proto supplied
:param py_dict: The dictionary to convert
:type py_dict: dict
:param proto: The proto object to merge with dictionary
:type proto: protobuf
:return: A parsed python diction... | airflow/contrib/hooks/gcp_container_hook.py |
apache/airflow | GKEClusterHook.wait_for_operation | def wait_for_operation(self, operation, project_id=None):
self.log.info("Waiting for OPERATION_NAME %s", operation.name)
time.sleep(OPERATIONAL_POLL_INTERVAL)
while operation.status != Operation.Status.DONE:
if operation.status == Operation.Status.RUNNING or operation.status == \
... | Given an operation, continuously fetches the status from Google Cloud until either completion or an error occurring | def wait_for_operation(self, operation, project_id=None):
"""
Given an operation, continuously fetches the status from Google Cloud until either
completion or an error occurring
:param operation: The Operation to wait for
:type operation: google.cloud.container_V1.gapic.enums.Op... | airflow/contrib/hooks/gcp_container_hook.py |
apache/airflow | GKEClusterHook.get_operation | def get_operation(self, operation_name, project_id=None):
return self.get_client().get_operation(project_id=project_id or self.project_id,
zone=self.location,
operation_id=operation_name) | Fetches the operation from Google Cloud | def get_operation(self, operation_name, project_id=None):
"""
Fetches the operation from Google Cloud
:param operation_name: Name of operation to fetch
:type operation_name: str
:param project_id: Google Cloud Platform project ID
:type project_id: str
:return: Th... | airflow/contrib/hooks/gcp_container_hook.py |
apache/airflow | GKEClusterHook._append_label | def _append_label(cluster_proto, key, val):
val = val.replace('.', '-').replace('+', '-')
cluster_proto.resource_labels.update({key: val})
return cluster_proto | Append labels to provided Cluster Protobuf Labels must fit the regex ``[a-z]([-a-z0-9]*[a-z0-9])?`` (current airflow version string follows semantic versioning | def _append_label(cluster_proto, key, val):
"""
Append labels to provided Cluster Protobuf
Labels must fit the regex ``[a-z]([-a-z0-9]*[a-z0-9])?`` (current
airflow version string follows semantic versioning spec: x.y.z).
:param cluster_proto: The proto to append resource_labe... | airflow/contrib/hooks/gcp_container_hook.py |
apache/airflow | GKEClusterHook.create_cluster | def create_cluster(self, cluster, project_id=None, retry=DEFAULT, timeout=DEFAULT):
if isinstance(cluster, dict):
cluster_proto = Cluster()
cluster = self._dict_to_proto(py_dict=cluster, proto=cluster_proto)
elif not isinstance(cluster, Cluster):
raise AirflowExceptio... | Creates a cluster, consisting of the specified number and type of Google Compute Engine instances. | def create_cluster(self, cluster, project_id=None, retry=DEFAULT, timeout=DEFAULT):
"""
Creates a cluster, consisting of the specified number and type of Google Compute
Engine instances.
:param cluster: A Cluster protobuf or dict. If dict is provided, it must
be of the same ... | airflow/contrib/hooks/gcp_container_hook.py |
apache/airflow | GKEClusterHook.get_cluster | def get_cluster(self, name, project_id=None, retry=DEFAULT, timeout=DEFAULT):
self.log.info(
"Fetching cluster (project_id=%s, zone=%s, cluster_name=%s)",
project_id or self.project_id, self.location, name
)
return self.get_client().get_cluster(project_id=project_id or s... | Gets details of specified cluster | def get_cluster(self, name, project_id=None, retry=DEFAULT, timeout=DEFAULT):
"""
Gets details of specified cluster
:param name: The name of the cluster to retrieve
:type name: str
:param project_id: Google Cloud Platform project ID
:type project_id: str
:param r... | airflow/contrib/hooks/gcp_container_hook.py |
apache/airflow | DiscordWebhookHook._get_webhook_endpoint | def _get_webhook_endpoint(self, http_conn_id, webhook_endpoint):
if webhook_endpoint:
endpoint = webhook_endpoint
elif http_conn_id:
conn = self.get_connection(http_conn_id)
extra = conn.extra_dejson
endpoint = extra.get('webhook_endpoint', '')
els... | Given a Discord http_conn_id, return the default webhook endpoint or override if a webhook_endpoint is manually supplied. | def _get_webhook_endpoint(self, http_conn_id, webhook_endpoint):
"""
Given a Discord http_conn_id, return the default webhook endpoint or override if a
webhook_endpoint is manually supplied.
:param http_conn_id: The provided connection ID
:param webhook_endpoint: The manually pr... | airflow/contrib/hooks/discord_webhook_hook.py |
apache/airflow | DiscordWebhookHook._build_discord_payload | def _build_discord_payload(self):
payload = {}
if self.username:
payload['username'] = self.username
if self.avatar_url:
payload['avatar_url'] = self.avatar_url
payload['tts'] = self.tts
if len(self.message) <= 2000:
payload['content'] = sel... | Construct the Discord JSON payload. All relevant parameters are combined here to a valid Discord JSON payload. | def _build_discord_payload(self):
"""
Construct the Discord JSON payload. All relevant parameters are combined here
to a valid Discord JSON payload.
:return: Discord payload (str) to send
"""
payload = {}
if self.username:
payload['username'] = self.... | airflow/contrib/hooks/discord_webhook_hook.py |
apache/airflow | DiscordWebhookHook.execute | def execute(self):
proxies = {}
if self.proxy:
proxies = {'https': self.proxy}
discord_payload = self._build_discord_payload()
self.run(endpoint=self.webhook_endpoint,
data=discord_payload,
headers={'Content-type': 'application... | Execute the Discord webhook call | def execute(self):
"""
Execute the Discord webhook call
"""
proxies = {}
if self.proxy:
# we only need https proxy for Discord
proxies = {'https': self.proxy}
discord_payload = self._build_discord_payload()
self.run(endpoint=self.webhook_... | airflow/contrib/hooks/discord_webhook_hook.py |
apache/airflow | GoogleCloudKMSHook.encrypt | def encrypt(self, key_name, plaintext, authenticated_data=None):
keys = self.get_conn().projects().locations().keyRings().cryptoKeys()
body = {'plaintext': _b64encode(plaintext)}
if authenticated_data:
body['additionalAuthenticatedData'] = _b64encode(authenticated_data)
requ... | Encrypts a plaintext message using Google Cloud KMS. | def encrypt(self, key_name, plaintext, authenticated_data=None):
"""
Encrypts a plaintext message using Google Cloud KMS.
:param key_name: The Resource Name for the key (or key version)
to be used for encyption. Of the form
``projects/*/location... | airflow/contrib/hooks/gcp_kms_hook.py |
apache/airflow | SqoopHook.import_table | def import_table(self, table, target_dir=None, append=False, file_type="text",
columns=None, split_by=None, where=None, direct=False,
driver=None, extra_import_options=None):
cmd = self._import_cmd(target_dir, append, file_type, split_by, direct,
... | Imports table from remote location to target dir. Arguments are copies of direct sqoop command line arguments | def import_table(self, table, target_dir=None, append=False, file_type="text",
columns=None, split_by=None, where=None, direct=False,
driver=None, extra_import_options=None):
"""
Imports table from remote location to target dir. Arguments are
copies of d... | airflow/contrib/hooks/sqoop_hook.py |
apache/airflow | SqoopHook.import_query | def import_query(self, query, target_dir, append=False, file_type="text",
split_by=None, direct=None, driver=None, extra_import_options=None):
cmd = self._import_cmd(target_dir, append, file_type, split_by, direct,
driver, extra_import_options)
cmd += ... | Imports a specific query from the rdbms to hdfs | def import_query(self, query, target_dir, append=False, file_type="text",
split_by=None, direct=None, driver=None, extra_import_options=None):
"""
Imports a specific query from the rdbms to hdfs
:param query: Free format query to run
:param target_dir: HDFS destinat... | airflow/contrib/hooks/sqoop_hook.py |
apache/airflow | SqoopHook.export_table | def export_table(self, table, export_dir, input_null_string,
input_null_non_string, staging_table,
clear_staging_table, enclosed_by,
escaped_by, input_fields_terminated_by,
input_lines_terminated_by,
input_optionall... | Exports Hive table to remote location. Arguments are copies of direct sqoop command line Arguments | def export_table(self, table, export_dir, input_null_string,
input_null_non_string, staging_table,
clear_staging_table, enclosed_by,
escaped_by, input_fields_terminated_by,
input_lines_terminated_by,
input_optionall... | airflow/contrib/hooks/sqoop_hook.py |
apache/airflow | GCPTextToSpeechHook.get_conn | def get_conn(self):
if not self._client:
self._client = TextToSpeechClient(credentials=self._get_credentials())
return self._client | Retrieves connection to Cloud Text to Speech. | def get_conn(self):
"""
Retrieves connection to Cloud Text to Speech.
:return: Google Cloud Text to Speech client object.
:rtype: google.cloud.texttospeech_v1.TextToSpeechClient
"""
if not self._client:
self._client = TextToSpeechClient(credentials=self._get_... | airflow/contrib/hooks/gcp_text_to_speech_hook.py |
apache/airflow | GCPTextToSpeechHook.synthesize_speech | def synthesize_speech(self, input_data, voice, audio_config, retry=None, timeout=None):
client = self.get_conn()
self.log.info("Synthesizing input: %s" % input_data)
return client.synthesize_speech(
input_=input_data, voice=voice, audio_config=audio_config, retry=retry, timeout=timeo... | Synthesizes text input | def synthesize_speech(self, input_data, voice, audio_config, retry=None, timeout=None):
"""
Synthesizes text input
:param input_data: text input to be synthesized. See more:
https://googleapis.github.io/google-cloud-python/latest/texttospeech/gapic/v1/types.html#google.cloud.texttos... | airflow/contrib/hooks/gcp_text_to_speech_hook.py |
apache/airflow | S3TaskHandler.close | def close(self):
if self.closed:
return
super().close()
if not self.upload_on_close:
return
local_loc = os.path.join(self.local_base, self.log_relative_path)
remote_loc = os.path.join(self.remote_base, self.log_relati... | Close and upload local log file to remote storage S3. | def close(self):
"""
Close and upload local log file to remote storage S3.
"""
# When application exit, system shuts down all handlers by
# calling close method. Here we check if logger is already
# closed to prevent uploading the log to remote storage multiple
# ... | airflow/utils/log/s3_task_handler.py |
apache/airflow | WorkerConfiguration._get_init_containers | def _get_init_containers(self):
if self.kube_config.dags_volume_claim or \
self.kube_config.dags_volume_host or self.kube_config.dags_in_image:
return []
init_environment = [{
'name': 'GIT_SYNC_REPO',
'value': self.kube_config.git_repo
... | When using git to retrieve the DAGs, use the GitSync Init Container | def _get_init_containers(self):
"""When using git to retrieve the DAGs, use the GitSync Init Container"""
# If we're using volume claims to mount the dags, no init container is needed
if self.kube_config.dags_volume_claim or \
self.kube_config.dags_volume_host or self.kube_config.dags... | airflow/contrib/kubernetes/worker_configuration.py |
apache/airflow | WorkerConfiguration._get_environment | def _get_environment(self):
env = {}
for env_var_name, env_var_val in six.iteritems(self.kube_config.kube_env_vars):
env[env_var_name] = env_var_val
env["AIRFLOW__CORE__EXECUTOR"] = "LocalExecutor"
if self.kube_config.airflow_configmap:
env['AIRFLOW_HOME'] = se... | Defines any necessary environment variables for the pod executor | def _get_environment(self):
"""Defines any necessary environment variables for the pod executor"""
env = {}
for env_var_name, env_var_val in six.iteritems(self.kube_config.kube_env_vars):
env[env_var_name] = env_var_val
env["AIRFLOW__CORE__EXECUTOR"] = "LocalExecutor"
... | airflow/contrib/kubernetes/worker_configuration.py |
apache/airflow | WorkerConfiguration._get_secrets | def _get_secrets(self):
worker_secrets = []
for env_var_name, obj_key_pair in six.iteritems(self.kube_config.kube_secrets):
k8s_secret_obj, k8s_secret_key = obj_key_pair.split('=')
worker_secrets.append(
Secret('env', env_var_name, k8s_secret_obj, k8s_secret_key)... | Defines any necessary secrets for the pod executor | def _get_secrets(self):
"""Defines any necessary secrets for the pod executor"""
worker_secrets = []
for env_var_name, obj_key_pair in six.iteritems(self.kube_config.kube_secrets):
k8s_secret_obj, k8s_secret_key = obj_key_pair.split('=')
worker_secrets.append(
... | airflow/contrib/kubernetes/worker_configuration.py |
apache/airflow | WorkerConfiguration._get_security_context | def _get_security_context(self):
security_context = {}
if self.kube_config.worker_run_as_user:
security_context['runAsUser'] = self.kube_config.worker_run_as_user
if self.kube_config.worker_fs_group:
security_context['fsGroup'] = self.kube_config.worker_fs_group
... | Defines the security context | def _get_security_context(self):
"""Defines the security context"""
security_context = {}
if self.kube_config.worker_run_as_user:
security_context['runAsUser'] = self.kube_config.worker_run_as_user
if self.kube_config.worker_fs_group:
security_context['fsGroup']... | airflow/contrib/kubernetes/worker_configuration.py |
apache/airflow | QuboleHook.get_extra_links | def get_extra_links(self, operator, dttm):
conn = BaseHook.get_connection(operator.kwargs['qubole_conn_id'])
if conn and conn.host:
host = re.sub(r'api$', 'v2/analyze?command_id=', conn.host)
else:
host = 'https://api.qubole.com/v2/analyze?command_id='
ti = TaskI... | Get link to qubole command result page. | def get_extra_links(self, operator, dttm):
"""
Get link to qubole command result page.
:param operator: operator
:param dttm: datetime
:return: url link
"""
conn = BaseHook.get_connection(operator.kwargs['qubole_conn_id'])
if conn and conn.host:
... | airflow/contrib/hooks/qubole_hook.py |
apache/airflow | DagFileProcessor._launch_process | def _launch_process(result_queue,
file_path,
pickle_dags,
dag_id_white_list,
thread_name,
zombies):
def helper():
log = logging.getLogger("airflow.processor")
... | Launch a process to process the given file. | def _launch_process(result_queue,
file_path,
pickle_dags,
dag_id_white_list,
thread_name,
zombies):
"""
Launch a process to process the given file.
:param result_queue: the qu... | airflow/jobs.py |
apache/airflow | DagFileProcessor.start | def start(self):
self._process = DagFileProcessor._launch_process(
self._result_queue,
self.file_path,
self._pickle_dags,
self._dag_id_white_list,
"DagFileProcessor{}".format(self._instance_id),
self._zombies)
self._start_time = tim... | Launch the process and start processing the DAG. | def start(self):
"""
Launch the process and start processing the DAG.
"""
self._process = DagFileProcessor._launch_process(
self._result_queue,
self.file_path,
self._pickle_dags,
self._dag_id_white_list,
"DagFileProcessor{}".for... | airflow/jobs.py |
apache/airflow | DagFileProcessor.done | def done(self):
if self._process is None:
raise AirflowException("Tried to see if it's done before starting!")
if self._done:
return True
if self._result_queue and not self._result_queue.empty():
self._result = self._result_queue.get_nowait()
... | Check if the process launched to process this file is done. | def done(self):
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
if self._process is None:
raise AirflowException("Tried to see if it's done before starting!")
if self._... | airflow/jobs.py |
apache/airflow | SchedulerJob._exit_gracefully | def _exit_gracefully(self, signum, frame):
self.log.info("Exiting gracefully upon receiving signal %s", signum)
if self.processor_agent:
self.processor_agent.end()
sys.exit(os.EX_OK) | Helper method to clean up processor_agent to avoid leaving orphan processes. | def _exit_gracefully(self, signum, frame):
"""
Helper method to clean up processor_agent to avoid leaving orphan processes.
"""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
if self.processor_agent:
self.processor_agent.end()
sys.exit(os... | airflow/jobs.py |
apache/airflow | SchedulerJob._process_task_instances | def _process_task_instances(self, dag, queue, session=None):
dag_runs = DagRun.find(dag_id=dag.dag_id, state=State.RUNNING, session=session)
active_dag_runs = []
for run in dag_runs:
self.log.info("Examining DAG run %s", run)
if run.execution_date > ... | This method schedules the tasks for a single DAG by looking at the active DAG runs and adding task instances that should run to the queue. | def _process_task_instances(self, dag, queue, session=None):
"""
This method schedules the tasks for a single DAG by looking at the
active DAG runs and adding task instances that should run to the
queue.
"""
# update the state of the previously active dag runs
da... | airflow/jobs.py |
apache/airflow | SchedulerJob.__get_concurrency_maps | def __get_concurrency_maps(self, states, session=None):
TI = models.TaskInstance
ti_concurrency_query = (
session
.query(TI.task_id, TI.dag_id, func.count('*'))
.filter(TI.state.in_(states))
.group_by(TI.task_id, TI.dag_id)
).all()
dag_map ... | Get the concurrency maps. | def __get_concurrency_maps(self, states, session=None):
"""
Get the concurrency maps.
:param states: List of states to query for
:type states: list[airflow.utils.state.State]
:return: A map from (dag_id, task_id) to # of task instances and
a map from (dag_id, task_id) t... | airflow/jobs.py |
apache/airflow | SchedulerJob._change_state_for_executable_task_instances | def _change_state_for_executable_task_instances(self, task_instances,
acceptable_states, session=None):
if len(task_instances) == 0:
session.commit()
return []
TI = models.TaskInstance
filter_for_ti_state_change = (
... | Changes the state of task instances in the list with one of the given states to QUEUED atomically, and returns the TIs changed in SimpleTaskInstance format. | def _change_state_for_executable_task_instances(self, task_instances,
acceptable_states, session=None):
"""
Changes the state of task instances in the list with one of the given states
to QUEUED atomically, and returns the TIs changed in Simple... | airflow/jobs.py |
apache/airflow | SchedulerJob._enqueue_task_instances_with_queued_state | def _enqueue_task_instances_with_queued_state(self, simple_dag_bag,
simple_task_instances):
TI = models.TaskInstance
for simple_task_instance in simple_task_instances:
simple_dag = simple_dag_bag.get_dag(simple_task_instance.dag_id)
... | Takes task_instances, which should have been set to queued, and enqueues them with the executor. | def _enqueue_task_instances_with_queued_state(self, simple_dag_bag,
simple_task_instances):
"""
Takes task_instances, which should have been set to queued, and enqueues them
with the executor.
:param simple_task_instances: TaskInstances ... | airflow/jobs.py |
apache/airflow | SchedulerJob._execute_task_instances | def _execute_task_instances(self,
simple_dag_bag,
states,
session=None):
executable_tis = self._find_executable_task_instances(simple_dag_bag, states,
ses... | Attempts to execute TaskInstances that should be executed by the scheduler. There are three | def _execute_task_instances(self,
simple_dag_bag,
states,
session=None):
"""
Attempts to execute TaskInstances that should be executed by the scheduler.
There are three steps:
1. Pick TIs by ... | airflow/jobs.py |
apache/airflow | SchedulerJob._change_state_for_tasks_failed_to_execute | def _change_state_for_tasks_failed_to_execute(self, session):
if self.executor.queued_tasks:
TI = models.TaskInstance
filter_for_ti_state_change = (
[and_(
TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution... | If there are tasks left over in the executor, we set them back to SCHEDULED to avoid creating hanging tasks. | def _change_state_for_tasks_failed_to_execute(self, session):
"""
If there are tasks left over in the executor,
we set them back to SCHEDULED to avoid creating hanging tasks.
:param session: session for ORM operations
"""
if self.executor.queued_tasks:
TI = m... | airflow/jobs.py |
apache/airflow | SchedulerJob._process_executor_events | def _process_executor_events(self, simple_dag_bag, session=None):
TI = models.TaskInstance
for key, state in list(self.executor.get_event_buffer(simple_dag_bag.dag_ids)
.items()):
dag_id, task_id, execution_date, try_number = key
self.... | Respond to executor events. | def _process_executor_events(self, simple_dag_bag, session=None):
"""
Respond to executor events.
"""
# TODO: this shares quite a lot of code with _manage_executor_state
TI = models.TaskInstance
for key, state in list(self.executor.get_event_buffer(simple_dag_bag.dag_ids... | airflow/jobs.py |
apache/airflow | SchedulerJob.process_file | def process_file(self, file_path, zombies, pickle_dags=False, session=None):
self.log.info("Processing file %s for tasks to queue", file_path)
simple_dags = []
try:
dagbag = models.DagBag(file_path, include_examples=False)
except Exception:
self.log.exce... | Process a Python file containing Airflow DAGs. This | def process_file(self, file_path, zombies, pickle_dags=False, session=None):
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Pickle the DAG and save it to the DB (if necessary).
3. For e... | airflow/jobs.py |
apache/airflow | BackfillJob._update_counters | def _update_counters(self, ti_status):
for key, ti in list(ti_status.running.items()):
ti.refresh_from_db()
if ti.state == State.SUCCESS:
ti_status.succeeded.add(key)
self.log.debug("Task instance %s succeeded. Don't rerun.", ti)
ti_status.... | Updates the counters per state of the tasks that were running. Can re-add to tasks to run in case required. | def _update_counters(self, ti_status):
"""
Updates the counters per state of the tasks that were running. Can re-add
to tasks to run in case required.
:param ti_status: the internal status of the backfill job tasks
:type ti_status: BackfillJob._DagRunTaskStatus
"""
... | airflow/jobs.py |
apache/airflow | BackfillJob._manage_executor_state | def _manage_executor_state(self, running):
executor = self.executor
for key, state in list(executor.get_event_buffer().items()):
if key not in running:
self.log.warning(
"%s state %s not in running=%s",
key, state, running.values()
... | Checks if the executor agrees with the state of task instances that are running | def _manage_executor_state(self, running):
"""
Checks if the executor agrees with the state of task instances
that are running
:param running: dict of key, task to verify
"""
executor = self.executor
for key, state in list(executor.get_event_buffer().items()):
... | airflow/jobs.py |
apache/airflow | BackfillJob._execute_for_run_dates | def _execute_for_run_dates(self, run_dates, ti_status, executor, pickle_id,
start_date, session=None):
for next_run_date in run_dates:
dag_run = self._get_dag_run(next_run_date, session=session)
tis_map = self._task_instances_for_dag_run(dag_run,
... | Computes the dag runs and their respective task instances for the given run dates and executes the task instances. | def _execute_for_run_dates(self, run_dates, ti_status, executor, pickle_id,
start_date, session=None):
"""
Computes the dag runs and their respective task instances for
the given run dates and executes the task instances.
Returns a list of execution dates o... | airflow/jobs.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.