_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 75 19.8k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q266100 | BigtableHook.update_cluster | test | def update_cluster(instance, cluster_id, nodes):
"""
Updates number of nodes in the specified Cloud Bigtable cluster.
Raises google.api_core.exceptions.NotFound if the cluster does not exist.
:type instance: Instance
:param instance: The Cloud Bigtable instance that owns the cluster.
:type cluster_id: str
:param cluster_id: The ID of the cluster.
:type nodes: int
:param nodes: The desired number of nodes.
"""
cluster = Cluster(cluster_id, instance)
cluster.serve_nodes = nodes
cluster.update() | python | {
"resource": ""
} |
q266101 | HiveCliHook._prepare_cli_cmd | test | def _prepare_cli_cmd(self):
"""
This function creates the command list from available information
"""
conn = self.conn
hive_bin = 'hive'
cmd_extra = []
if self.use_beeline:
hive_bin = 'beeline'
jdbc_url = "jdbc:hive2://{host}:{port}/{schema}".format(
host=conn.host, port=conn.port, schema=conn.schema)
if configuration.conf.get('core', 'security') == 'kerberos':
template = conn.extra_dejson.get(
'principal', "hive/_HOST@EXAMPLE.COM")
if "_HOST" in template:
template = utils.replace_hostname_pattern(
utils.get_components(template))
proxy_user = "" # noqa
if conn.extra_dejson.get('proxy_user') == "login" and conn.login:
proxy_user = "hive.server2.proxy.user={0}".format(conn.login)
elif conn.extra_dejson.get('proxy_user') == "owner" and self.run_as:
proxy_user = "hive.server2.proxy.user={0}".format(self.run_as)
jdbc_url += ";principal={template};{proxy_user}".format(
template=template, proxy_user=proxy_user)
elif self.auth:
jdbc_url += ";auth=" + self.auth
jdbc_url = '"{}"'.format(jdbc_url)
cmd_extra += ['-u', jdbc_url]
if conn.login:
cmd_extra += ['-n', conn.login]
if conn.password:
cmd_extra += ['-p', conn.password]
hive_params_list = self.hive_cli_params.split()
return [hive_bin] + cmd_extra + hive_params_list | python | {
"resource": ""
} |
q266102 | HiveCliHook._prepare_hiveconf | test | def _prepare_hiveconf(d):
"""
This function prepares a list of hiveconf params
from a dictionary of key value pairs.
:param d:
:type d: dict
>>> hh = HiveCliHook()
>>> hive_conf = {"hive.exec.dynamic.partition": "true",
... "hive.exec.dynamic.partition.mode": "nonstrict"}
>>> hh._prepare_hiveconf(hive_conf)
["-hiveconf", "hive.exec.dynamic.partition=true",\
"-hiveconf", "hive.exec.dynamic.partition.mode=nonstrict"]
"""
if not d:
return []
return as_flattened_list(
zip(["-hiveconf"] * len(d),
["{}={}".format(k, v) for k, v in d.items()])
) | python | {
"resource": ""
} |
q266103 | HiveCliHook.load_df | test | def load_df(
self,
df,
table,
field_dict=None,
delimiter=',',
encoding='utf8',
pandas_kwargs=None, **kwargs):
"""
Loads a pandas DataFrame into hive.
Hive data types will be inferred if not passed but column names will
not be sanitized.
:param df: DataFrame to load into a Hive table
:type df: pandas.DataFrame
:param table: target Hive table, use dot notation to target a
specific database
:type table: str
:param field_dict: mapping from column name to hive data type.
Note that it must be OrderedDict so as to keep columns' order.
:type field_dict: collections.OrderedDict
:param delimiter: field delimiter in the file
:type delimiter: str
:param encoding: str encoding to use when writing DataFrame to file
:type encoding: str
:param pandas_kwargs: passed to DataFrame.to_csv
:type pandas_kwargs: dict
:param kwargs: passed to self.load_file
"""
def _infer_field_types_from_df(df):
DTYPE_KIND_HIVE_TYPE = {
'b': 'BOOLEAN', # boolean
'i': 'BIGINT', # signed integer
'u': 'BIGINT', # unsigned integer
'f': 'DOUBLE', # floating-point
'c': 'STRING', # complex floating-point
'M': 'TIMESTAMP', # datetime
'O': 'STRING', # object
'S': 'STRING', # (byte-)string
'U': 'STRING', # Unicode
'V': 'STRING' # void
}
d = OrderedDict()
for col, dtype in df.dtypes.iteritems():
d[col] = DTYPE_KIND_HIVE_TYPE[dtype.kind]
return d
if pandas_kwargs is None:
pandas_kwargs = {}
with TemporaryDirectory(prefix='airflow_hiveop_') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir, mode="w") as f:
if field_dict is None:
field_dict = _infer_field_types_from_df(df)
df.to_csv(path_or_buf=f,
sep=delimiter,
header=False,
index=False,
encoding=encoding,
date_format="%Y-%m-%d %H:%M:%S",
**pandas_kwargs)
f.flush()
return self.load_file(filepath=f.name,
table=table,
delimiter=delimiter,
field_dict=field_dict,
**kwargs) | python | {
"resource": ""
} |
q266104 | HiveCliHook.load_file | test | def load_file(
self,
filepath,
table,
delimiter=",",
field_dict=None,
create=True,
overwrite=True,
partition=None,
recreate=False,
tblproperties=None):
"""
Loads a local file into Hive
Note that the table generated in Hive uses ``STORED AS textfile``
which isn't the most efficient serialization format. If a
large amount of data is loaded and/or if the tables gets
queried considerably, you may want to use this operator only to
stage the data into a temporary table before loading it into its
final destination using a ``HiveOperator``.
:param filepath: local filepath of the file to load
:type filepath: str
:param table: target Hive table, use dot notation to target a
specific database
:type table: str
:param delimiter: field delimiter in the file
:type delimiter: str
:param field_dict: A dictionary of the fields name in the file
as keys and their Hive types as values.
Note that it must be OrderedDict so as to keep columns' order.
:type field_dict: collections.OrderedDict
:param create: whether to create the table if it doesn't exist
:type create: bool
:param overwrite: whether to overwrite the data in table or partition
:type overwrite: bool
:param partition: target partition as a dict of partition columns
and values
:type partition: dict
:param recreate: whether to drop and recreate the table at every
execution
:type recreate: bool
:param tblproperties: TBLPROPERTIES of the hive table being created
:type tblproperties: dict
"""
hql = ''
if recreate:
hql += "DROP TABLE IF EXISTS {table};\n".format(table=table)
if create or recreate:
if field_dict is None:
raise ValueError("Must provide a field dict when creating a table")
fields = ",\n ".join(
[k + ' ' + v for k, v in field_dict.items()])
hql += "CREATE TABLE IF NOT EXISTS {table} (\n{fields})\n".format(
table=table, fields=fields)
if partition:
pfields = ",\n ".join(
[p + " STRING" for p in partition])
hql += "PARTITIONED BY ({pfields})\n".format(pfields=pfields)
hql += "ROW FORMAT DELIMITED\n"
hql += "FIELDS TERMINATED BY '{delimiter}'\n".format(delimiter=delimiter)
hql += "STORED AS textfile\n"
if tblproperties is not None:
tprops = ", ".join(
["'{0}'='{1}'".format(k, v) for k, v in tblproperties.items()])
hql += "TBLPROPERTIES({tprops})\n".format(tprops=tprops)
hql += ";"
self.log.info(hql)
self.run_cli(hql)
hql = "LOAD DATA LOCAL INPATH '{filepath}' ".format(filepath=filepath)
if overwrite:
hql += "OVERWRITE "
hql += "INTO TABLE {table} ".format(table=table)
if partition:
pvals = ", ".join(
["{0}='{1}'".format(k, v) for k, v in partition.items()])
hql += "PARTITION ({pvals})".format(pvals=pvals)
# As a workaround for HIVE-10541, add a newline character
# at the end of hql (AIRFLOW-2412).
hql += ';\n'
self.log.info(hql)
self.run_cli(hql) | python | {
"resource": ""
} |
q266105 | HiveMetastoreHook.get_metastore_client | test | def get_metastore_client(self):
"""
Returns a Hive thrift client.
"""
import hmsclient
from thrift.transport import TSocket, TTransport
from thrift.protocol import TBinaryProtocol
ms = self.metastore_conn
auth_mechanism = ms.extra_dejson.get('authMechanism', 'NOSASL')
if configuration.conf.get('core', 'security') == 'kerberos':
auth_mechanism = ms.extra_dejson.get('authMechanism', 'GSSAPI')
kerberos_service_name = ms.extra_dejson.get('kerberos_service_name', 'hive')
socket = TSocket.TSocket(ms.host, ms.port)
if configuration.conf.get('core', 'security') == 'kerberos' \
and auth_mechanism == 'GSSAPI':
try:
import saslwrapper as sasl
except ImportError:
import sasl
def sasl_factory():
sasl_client = sasl.Client()
sasl_client.setAttr("host", ms.host)
sasl_client.setAttr("service", kerberos_service_name)
sasl_client.init()
return sasl_client
from thrift_sasl import TSaslClientTransport
transport = TSaslClientTransport(sasl_factory, "GSSAPI", socket)
else:
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
return hmsclient.HMSClient(iprot=protocol) | python | {
"resource": ""
} |
q266106 | HiveMetastoreHook.check_for_named_partition | test | def check_for_named_partition(self, schema, table, partition_name):
"""
Checks whether a partition with a given name exists
:param schema: Name of hive schema (database) @table belongs to
:type schema: str
:param table: Name of hive table @partition belongs to
:type schema: str
:partition: Name of the partitions to check for (eg `a=b/c=d`)
:type schema: str
:rtype: bool
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.check_for_named_partition('airflow', t, "ds=2015-01-01")
True
>>> hh.check_for_named_partition('airflow', t, "ds=xxx")
False
"""
with self.metastore as client:
return client.check_for_named_partition(schema, table, partition_name) | python | {
"resource": ""
} |
q266107 | HiveMetastoreHook.table_exists | test | def table_exists(self, table_name, db='default'):
"""
Check if table exists
>>> hh = HiveMetastoreHook()
>>> hh.table_exists(db='airflow', table_name='static_babynames')
True
>>> hh.table_exists(db='airflow', table_name='does_not_exist')
False
"""
try:
self.get_table(table_name, db)
return True
except Exception:
return False | python | {
"resource": ""
} |
q266108 | HiveServer2Hook.get_conn | test | def get_conn(self, schema=None):
"""
Returns a Hive connection object.
"""
db = self.get_connection(self.hiveserver2_conn_id)
auth_mechanism = db.extra_dejson.get('authMechanism', 'NONE')
if auth_mechanism == 'NONE' and db.login is None:
# we need to give a username
username = 'airflow'
kerberos_service_name = None
if configuration.conf.get('core', 'security') == 'kerberos':
auth_mechanism = db.extra_dejson.get('authMechanism', 'KERBEROS')
kerberos_service_name = db.extra_dejson.get('kerberos_service_name', 'hive')
# pyhive uses GSSAPI instead of KERBEROS as a auth_mechanism identifier
if auth_mechanism == 'GSSAPI':
self.log.warning(
"Detected deprecated 'GSSAPI' for authMechanism "
"for %s. Please use 'KERBEROS' instead",
self.hiveserver2_conn_id
)
auth_mechanism = 'KERBEROS'
from pyhive.hive import connect
return connect(
host=db.host,
port=db.port,
auth=auth_mechanism,
kerberos_service_name=kerberos_service_name,
username=db.login or username,
password=db.password,
database=schema or db.schema or 'default') | python | {
"resource": ""
} |
q266109 | HiveServer2Hook.get_results | test | def get_results(self, hql, schema='default', fetch_size=None, hive_conf=None):
"""
Get results of the provided hql in target schema.
:param hql: hql to be executed.
:type hql: str or list
:param schema: target schema, default to 'default'.
:type schema: str
:param fetch_size: max size of result to fetch.
:type fetch_size: int
:param hive_conf: hive_conf to execute alone with the hql.
:type hive_conf: dict
:return: results of hql execution, dict with data (list of results) and header
:rtype: dict
"""
results_iter = self._get_results(hql, schema,
fetch_size=fetch_size, hive_conf=hive_conf)
header = next(results_iter)
results = {
'data': list(results_iter),
'header': header
}
return results | python | {
"resource": ""
} |
q266110 | HiveServer2Hook.to_csv | test | def to_csv(
self,
hql,
csv_filepath,
schema='default',
delimiter=',',
lineterminator='\r\n',
output_header=True,
fetch_size=1000,
hive_conf=None):
"""
Execute hql in target schema and write results to a csv file.
:param hql: hql to be executed.
:type hql: str or list
:param csv_filepath: filepath of csv to write results into.
:type csv_filepath: str
:param schema: target schema, default to 'default'.
:type schema: str
:param delimiter: delimiter of the csv file, default to ','.
:type delimiter: str
:param lineterminator: lineterminator of the csv file.
:type lineterminator: str
:param output_header: header of the csv file, default to True.
:type output_header: bool
:param fetch_size: number of result rows to write into the csv file, default to 1000.
:type fetch_size: int
:param hive_conf: hive_conf to execute alone with the hql.
:type hive_conf: dict
"""
results_iter = self._get_results(hql, schema,
fetch_size=fetch_size, hive_conf=hive_conf)
header = next(results_iter)
message = None
i = 0
with open(csv_filepath, 'wb') as f:
writer = csv.writer(f,
delimiter=delimiter,
lineterminator=lineterminator,
encoding='utf-8')
try:
if output_header:
self.log.debug('Cursor description is %s', header)
writer.writerow([c[0] for c in header])
for i, row in enumerate(results_iter, 1):
writer.writerow(row)
if i % fetch_size == 0:
self.log.info("Written %s rows so far.", i)
except ValueError as exception:
message = str(exception)
if message:
# need to clean up the file first
os.remove(csv_filepath)
raise ValueError(message)
self.log.info("Done. Loaded a total of %s rows.", i) | python | {
"resource": ""
} |
q266111 | HiveServer2Hook.get_records | test | def get_records(self, hql, schema='default', hive_conf=None):
"""
Get a set of records from a Hive query.
:param hql: hql to be executed.
:type hql: str or list
:param schema: target schema, default to 'default'.
:type schema: str
:param hive_conf: hive_conf to execute alone with the hql.
:type hive_conf: dict
:return: result of hive execution
:rtype: list
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> len(hh.get_records(sql))
100
"""
return self.get_results(hql, schema=schema, hive_conf=hive_conf)['data'] | python | {
"resource": ""
} |
q266112 | HiveServer2Hook.get_pandas_df | test | def get_pandas_df(self, hql, schema='default'):
"""
Get a pandas dataframe from a Hive query
:param hql: hql to be executed.
:type hql: str or list
:param schema: target schema, default to 'default'.
:type schema: str
:return: result of hql execution
:rtype: DataFrame
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> df = hh.get_pandas_df(sql)
>>> len(df.index)
100
:return: pandas.DateFrame
"""
import pandas as pd
res = self.get_results(hql, schema=schema)
df = pd.DataFrame(res['data'])
df.columns = [c[0] for c in res['header']]
return df | python | {
"resource": ""
} |
q266113 | CloudVisionHook.get_conn | test | def get_conn(self):
"""
Retrieves connection to Cloud Vision.
:return: Google Cloud Vision client object.
:rtype: google.cloud.vision_v1.ProductSearchClient
"""
if not self._client:
self._client = ProductSearchClient(credentials=self._get_credentials())
return self._client | python | {
"resource": ""
} |
q266114 | DingdingHook._get_endpoint | test | def _get_endpoint(self):
"""
Get Dingding endpoint for sending message.
"""
conn = self.get_connection(self.http_conn_id)
token = conn.password
if not token:
raise AirflowException('Dingding token is requests but get nothing, '
'check you conn_id configuration.')
return 'robot/send?access_token={}'.format(token) | python | {
"resource": ""
} |
q266115 | DingdingHook.send | test | def send(self):
"""
Send Dingding message
"""
support_type = ['text', 'link', 'markdown', 'actionCard', 'feedCard']
if self.message_type not in support_type:
raise ValueError('DingdingWebhookHook only support {} '
'so far, but receive {}'.format(support_type, self.message_type))
data = self._build_message()
self.log.info('Sending Dingding type %s message %s', self.message_type, data)
resp = self.run(endpoint=self._get_endpoint(),
data=data,
headers={'Content-Type': 'application/json'})
# Dingding success send message will with errcode equal to 0
if int(resp.json().get('errcode')) != 0:
raise AirflowException('Send Dingding message failed, receive error '
'message %s', resp.text)
self.log.info('Success Send Dingding message') | python | {
"resource": ""
} |
q266116 | _bind_parameters | test | def _bind_parameters(operation, parameters):
""" Helper method that binds parameters to a SQL query. """
# inspired by MySQL Python Connector (conversion.py)
string_parameters = {}
for (name, value) in iteritems(parameters):
if value is None:
string_parameters[name] = 'NULL'
elif isinstance(value, basestring):
string_parameters[name] = "'" + _escape(value) + "'"
else:
string_parameters[name] = str(value)
return operation % string_parameters | python | {
"resource": ""
} |
q266117 | _escape | test | def _escape(s):
""" Helper method that escapes parameters to a SQL query. """
e = s
e = e.replace('\\', '\\\\')
e = e.replace('\n', '\\n')
e = e.replace('\r', '\\r')
e = e.replace("'", "\\'")
e = e.replace('"', '\\"')
return e | python | {
"resource": ""
} |
q266118 | _bq_cast | test | def _bq_cast(string_field, bq_type):
"""
Helper method that casts a BigQuery row to the appropriate data types.
This is useful because BigQuery returns all fields as strings.
"""
if string_field is None:
return None
elif bq_type == 'INTEGER':
return int(string_field)
elif bq_type == 'FLOAT' or bq_type == 'TIMESTAMP':
return float(string_field)
elif bq_type == 'BOOLEAN':
if string_field not in ['true', 'false']:
raise ValueError("{} must have value 'true' or 'false'".format(
string_field))
return string_field == 'true'
else:
return string_field | python | {
"resource": ""
} |
q266119 | _validate_value | test | def _validate_value(key, value, expected_type):
""" function to check expected type and raise
error if type is not correct """
if not isinstance(value, expected_type):
raise TypeError("{} argument must have a type {} not {}".format(
key, expected_type, type(value))) | python | {
"resource": ""
} |
q266120 | BigQueryHook.get_conn | test | def get_conn(self):
"""
Returns a BigQuery PEP 249 connection object.
"""
service = self.get_service()
project = self._get_field('project')
return BigQueryConnection(
service=service,
project_id=project,
use_legacy_sql=self.use_legacy_sql,
location=self.location,
num_retries=self.num_retries
) | python | {
"resource": ""
} |
q266121 | BigQueryHook.get_service | test | def get_service(self):
"""
Returns a BigQuery service object.
"""
http_authorized = self._authorize()
return build(
'bigquery', 'v2', http=http_authorized, cache_discovery=False) | python | {
"resource": ""
} |
q266122 | BigQueryHook.table_exists | test | def table_exists(self, project_id, dataset_id, table_id):
"""
Checks for the existence of a table in Google BigQuery.
:param project_id: The Google cloud project in which to look for the
table. The connection supplied to the hook must provide access to
the specified project.
:type project_id: str
:param dataset_id: The name of the dataset in which to look for the
table.
:type dataset_id: str
:param table_id: The name of the table to check the existence of.
:type table_id: str
"""
service = self.get_service()
try:
service.tables().get(
projectId=project_id, datasetId=dataset_id,
tableId=table_id).execute(num_retries=self.num_retries)
return True
except HttpError as e:
if e.resp['status'] == '404':
return False
raise | python | {
"resource": ""
} |
q266123 | BigQueryBaseCursor.create_empty_table | test | def create_empty_table(self,
project_id,
dataset_id,
table_id,
schema_fields=None,
time_partitioning=None,
cluster_fields=None,
labels=None,
view=None,
num_retries=None):
"""
Creates a new, empty table in the dataset.
To create a view, which is defined by a SQL query, parse a dictionary to 'view' kwarg
:param project_id: The project to create the table into.
:type project_id: str
:param dataset_id: The dataset to create the table into.
:type dataset_id: str
:param table_id: The Name of the table to be created.
:type table_id: str
:param schema_fields: If set, the schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schema
:type schema_fields: list
:param labels: a dictionary containing labels for the table, passed to BigQuery
:type labels: dict
**Example**: ::
schema_fields=[{"name": "emp_name", "type": "STRING", "mode": "REQUIRED"},
{"name": "salary", "type": "INTEGER", "mode": "NULLABLE"}]
:param time_partitioning: configure optional time partitioning fields i.e.
partition by field, type and expiration as per API specifications.
.. seealso::
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#timePartitioning
:type time_partitioning: dict
:param cluster_fields: [Optional] The fields used for clustering.
Must be specified with time_partitioning, data in the table will be first
partitioned and subsequently clustered.
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#clustering.fields
:type cluster_fields: list
:param view: [Optional] A dictionary containing definition for the view.
If set, it will create a view instead of a table:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#view
:type view: dict
**Example**: ::
view = {
"query": "SELECT * FROM `test-project-id.test_dataset_id.test_table_prefix*` LIMIT 1000",
"useLegacySql": False
}
:return: None
"""
project_id = project_id if project_id is not None else self.project_id
table_resource = {
'tableReference': {
'tableId': table_id
}
}
if schema_fields:
table_resource['schema'] = {'fields': schema_fields}
if time_partitioning:
table_resource['timePartitioning'] = time_partitioning
if cluster_fields:
table_resource['clustering'] = {
'fields': cluster_fields
}
if labels:
table_resource['labels'] = labels
if view:
table_resource['view'] = view
num_retries = num_retries if num_retries else self.num_retries
self.log.info('Creating Table %s:%s.%s',
project_id, dataset_id, table_id)
try:
self.service.tables().insert(
projectId=project_id,
datasetId=dataset_id,
body=table_resource).execute(num_retries=num_retries)
self.log.info('Table created successfully: %s:%s.%s',
project_id, dataset_id, table_id)
except HttpError as err:
raise AirflowException(
'BigQuery job failed. Error was: {}'.format(err.content)
) | python | {
"resource": ""
} |
q266124 | BigQueryBaseCursor.patch_table | test | def patch_table(self,
dataset_id,
table_id,
project_id=None,
description=None,
expiration_time=None,
external_data_configuration=None,
friendly_name=None,
labels=None,
schema=None,
time_partitioning=None,
view=None,
require_partition_filter=None):
"""
Patch information in an existing table.
It only updates fileds that are provided in the request object.
Reference: https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/patch
:param dataset_id: The dataset containing the table to be patched.
:type dataset_id: str
:param table_id: The Name of the table to be patched.
:type table_id: str
:param project_id: The project containing the table to be patched.
:type project_id: str
:param description: [Optional] A user-friendly description of this table.
:type description: str
:param expiration_time: [Optional] The time when this table expires,
in milliseconds since the epoch.
:type expiration_time: int
:param external_data_configuration: [Optional] A dictionary containing
properties of a table stored outside of BigQuery.
:type external_data_configuration: dict
:param friendly_name: [Optional] A descriptive name for this table.
:type friendly_name: str
:param labels: [Optional] A dictionary containing labels associated with this table.
:type labels: dict
:param schema: [Optional] If set, the schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schema
The supported schema modifications and unsupported schema modification are listed here:
https://cloud.google.com/bigquery/docs/managing-table-schemas
**Example**: ::
schema=[{"name": "emp_name", "type": "STRING", "mode": "REQUIRED"},
{"name": "salary", "type": "INTEGER", "mode": "NULLABLE"}]
:type schema: list
:param time_partitioning: [Optional] A dictionary containing time-based partitioning
definition for the table.
:type time_partitioning: dict
:param view: [Optional] A dictionary containing definition for the view.
If set, it will patch a view instead of a table:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#view
**Example**: ::
view = {
"query": "SELECT * FROM `test-project-id.test_dataset_id.test_table_prefix*` LIMIT 500",
"useLegacySql": False
}
:type view: dict
:param require_partition_filter: [Optional] If true, queries over the this table require a
partition filter. If false, queries over the table
:type require_partition_filter: bool
"""
project_id = project_id if project_id is not None else self.project_id
table_resource = {}
if description is not None:
table_resource['description'] = description
if expiration_time is not None:
table_resource['expirationTime'] = expiration_time
if external_data_configuration:
table_resource['externalDataConfiguration'] = external_data_configuration
if friendly_name is not None:
table_resource['friendlyName'] = friendly_name
if labels:
table_resource['labels'] = labels
if schema:
table_resource['schema'] = {'fields': schema}
if time_partitioning:
table_resource['timePartitioning'] = time_partitioning
if view:
table_resource['view'] = view
if require_partition_filter is not None:
table_resource['requirePartitionFilter'] = require_partition_filter
self.log.info('Patching Table %s:%s.%s',
project_id, dataset_id, table_id)
try:
self.service.tables().patch(
projectId=project_id,
datasetId=dataset_id,
tableId=table_id,
body=table_resource).execute(num_retries=self.num_retries)
self.log.info('Table patched successfully: %s:%s.%s',
project_id, dataset_id, table_id)
except HttpError as err:
raise AirflowException(
'BigQuery job failed. Error was: {}'.format(err.content)
) | python | {
"resource": ""
} |
q266125 | BigQueryBaseCursor.cancel_query | test | def cancel_query(self):
"""
Cancel all started queries that have not yet completed
"""
jobs = self.service.jobs()
if (self.running_job_id and
not self.poll_job_complete(self.running_job_id)):
self.log.info('Attempting to cancel job : %s, %s', self.project_id,
self.running_job_id)
if self.location:
jobs.cancel(
projectId=self.project_id,
jobId=self.running_job_id,
location=self.location).execute(num_retries=self.num_retries)
else:
jobs.cancel(
projectId=self.project_id,
jobId=self.running_job_id).execute(num_retries=self.num_retries)
else:
self.log.info('No running BigQuery jobs to cancel.')
return
# Wait for all the calls to cancel to finish
max_polling_attempts = 12
polling_attempts = 0
job_complete = False
while polling_attempts < max_polling_attempts and not job_complete:
polling_attempts = polling_attempts + 1
job_complete = self.poll_job_complete(self.running_job_id)
if job_complete:
self.log.info('Job successfully canceled: %s, %s',
self.project_id, self.running_job_id)
elif polling_attempts == max_polling_attempts:
self.log.info(
"Stopping polling due to timeout. Job with id %s "
"has not completed cancel and may or may not finish.",
self.running_job_id)
else:
self.log.info('Waiting for canceled job with id %s to finish.',
self.running_job_id)
time.sleep(5) | python | {
"resource": ""
} |
q266126 | BigQueryBaseCursor.run_table_delete | test | def run_table_delete(self, deletion_dataset_table,
ignore_if_missing=False):
"""
Delete an existing table from the dataset;
If the table does not exist, return an error unless ignore_if_missing
is set to True.
:param deletion_dataset_table: A dotted
``(<project>.|<project>:)<dataset>.<table>`` that indicates which table
will be deleted.
:type deletion_dataset_table: str
:param ignore_if_missing: if True, then return success even if the
requested table does not exist.
:type ignore_if_missing: bool
:return:
"""
deletion_project, deletion_dataset, deletion_table = \
_split_tablename(table_input=deletion_dataset_table,
default_project_id=self.project_id)
try:
self.service.tables() \
.delete(projectId=deletion_project,
datasetId=deletion_dataset,
tableId=deletion_table) \
.execute(num_retries=self.num_retries)
self.log.info('Deleted table %s:%s.%s.', deletion_project,
deletion_dataset, deletion_table)
except HttpError:
if not ignore_if_missing:
raise Exception('Table deletion failed. Table does not exist.')
else:
self.log.info('Table does not exist. Skipping.') | python | {
"resource": ""
} |
q266127 | BigQueryBaseCursor.run_table_upsert | test | def run_table_upsert(self, dataset_id, table_resource, project_id=None):
"""
creates a new, empty table in the dataset;
If the table already exists, update the existing table.
Since BigQuery does not natively allow table upserts, this is not an
atomic operation.
:param dataset_id: the dataset to upsert the table into.
:type dataset_id: str
:param table_resource: a table resource. see
https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
:type table_resource: dict
:param project_id: the project to upsert the table into. If None,
project will be self.project_id.
:return:
"""
# check to see if the table exists
table_id = table_resource['tableReference']['tableId']
project_id = project_id if project_id is not None else self.project_id
tables_list_resp = self.service.tables().list(
projectId=project_id, datasetId=dataset_id).execute(num_retries=self.num_retries)
while True:
for table in tables_list_resp.get('tables', []):
if table['tableReference']['tableId'] == table_id:
# found the table, do update
self.log.info('Table %s:%s.%s exists, updating.',
project_id, dataset_id, table_id)
return self.service.tables().update(
projectId=project_id,
datasetId=dataset_id,
tableId=table_id,
body=table_resource).execute(num_retries=self.num_retries)
# If there is a next page, we need to check the next page.
if 'nextPageToken' in tables_list_resp:
tables_list_resp = self.service.tables()\
.list(projectId=project_id,
datasetId=dataset_id,
pageToken=tables_list_resp['nextPageToken'])\
.execute(num_retries=self.num_retries)
# If there is no next page, then the table doesn't exist.
else:
# do insert
self.log.info('Table %s:%s.%s does not exist. creating.',
project_id, dataset_id, table_id)
return self.service.tables().insert(
projectId=project_id,
datasetId=dataset_id,
body=table_resource).execute(num_retries=self.num_retries) | python | {
"resource": ""
} |
q266128 | BigQueryBaseCursor.run_grant_dataset_view_access | test | def run_grant_dataset_view_access(self,
source_dataset,
view_dataset,
view_table,
source_project=None,
view_project=None):
"""
Grant authorized view access of a dataset to a view table.
If this view has already been granted access to the dataset, do nothing.
This method is not atomic. Running it may clobber a simultaneous update.
:param source_dataset: the source dataset
:type source_dataset: str
:param view_dataset: the dataset that the view is in
:type view_dataset: str
:param view_table: the table of the view
:type view_table: str
:param source_project: the project of the source dataset. If None,
self.project_id will be used.
:type source_project: str
:param view_project: the project that the view is in. If None,
self.project_id will be used.
:type view_project: str
:return: the datasets resource of the source dataset.
"""
# Apply default values to projects
source_project = source_project if source_project else self.project_id
view_project = view_project if view_project else self.project_id
# we don't want to clobber any existing accesses, so we have to get
# info on the dataset before we can add view access
source_dataset_resource = self.service.datasets().get(
projectId=source_project, datasetId=source_dataset).execute(num_retries=self.num_retries)
access = source_dataset_resource[
'access'] if 'access' in source_dataset_resource else []
view_access = {
'view': {
'projectId': view_project,
'datasetId': view_dataset,
'tableId': view_table
}
}
# check to see if the view we want to add already exists.
if view_access not in access:
self.log.info(
'Granting table %s:%s.%s authorized view access to %s:%s dataset.',
view_project, view_dataset, view_table, source_project,
source_dataset)
access.append(view_access)
return self.service.datasets().patch(
projectId=source_project,
datasetId=source_dataset,
body={
'access': access
}).execute(num_retries=self.num_retries)
else:
# if view is already in access, do nothing.
self.log.info(
'Table %s:%s.%s already has authorized view access to %s:%s dataset.',
view_project, view_dataset, view_table, source_project, source_dataset)
return source_dataset_resource | python | {
"resource": ""
} |
q266129 | BigQueryBaseCursor.get_dataset | test | def get_dataset(self, dataset_id, project_id=None):
"""
Method returns dataset_resource if dataset exist
and raised 404 error if dataset does not exist
:param dataset_id: The BigQuery Dataset ID
:type dataset_id: str
:param project_id: The GCP Project ID
:type project_id: str
:return: dataset_resource
.. seealso::
For more information, see Dataset Resource content:
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
"""
if not dataset_id or not isinstance(dataset_id, str):
raise ValueError("dataset_id argument must be provided and has "
"a type 'str'. You provided: {}".format(dataset_id))
dataset_project_id = project_id if project_id else self.project_id
try:
dataset_resource = self.service.datasets().get(
datasetId=dataset_id, projectId=dataset_project_id).execute(num_retries=self.num_retries)
self.log.info("Dataset Resource: %s", dataset_resource)
except HttpError as err:
raise AirflowException(
'BigQuery job failed. Error was: {}'.format(err.content))
return dataset_resource | python | {
"resource": ""
} |
q266130 | BigQueryBaseCursor.get_datasets_list | test | def get_datasets_list(self, project_id=None):
"""
Method returns full list of BigQuery datasets in the current project
.. seealso::
For more information, see:
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list
:param project_id: Google Cloud Project for which you
try to get all datasets
:type project_id: str
:return: datasets_list
Example of returned datasets_list: ::
{
"kind":"bigquery#dataset",
"location":"US",
"id":"your-project:dataset_2_test",
"datasetReference":{
"projectId":"your-project",
"datasetId":"dataset_2_test"
}
},
{
"kind":"bigquery#dataset",
"location":"US",
"id":"your-project:dataset_1_test",
"datasetReference":{
"projectId":"your-project",
"datasetId":"dataset_1_test"
}
}
]
"""
dataset_project_id = project_id if project_id else self.project_id
try:
datasets_list = self.service.datasets().list(
projectId=dataset_project_id).execute(num_retries=self.num_retries)['datasets']
self.log.info("Datasets List: %s", datasets_list)
except HttpError as err:
raise AirflowException(
'BigQuery job failed. Error was: {}'.format(err.content))
return datasets_list | python | {
"resource": ""
} |
q266131 | BigQueryBaseCursor.insert_all | test | def insert_all(self, project_id, dataset_id, table_id,
rows, ignore_unknown_values=False,
skip_invalid_rows=False, fail_on_error=False):
"""
Method to stream data into BigQuery one record at a time without needing
to run a load job
.. seealso::
For more information, see:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/insertAll
:param project_id: The name of the project where we have the table
:type project_id: str
:param dataset_id: The name of the dataset where we have the table
:type dataset_id: str
:param table_id: The name of the table
:type table_id: str
:param rows: the rows to insert
:type rows: list
**Example or rows**:
rows=[{"json": {"a_key": "a_value_0"}}, {"json": {"a_key": "a_value_1"}}]
:param ignore_unknown_values: [Optional] Accept rows that contain values
that do not match the schema. The unknown values are ignored.
The default value is false, which treats unknown values as errors.
:type ignore_unknown_values: bool
:param skip_invalid_rows: [Optional] Insert all valid rows of a request,
even if invalid rows exist. The default value is false, which causes
the entire request to fail if any invalid rows exist.
:type skip_invalid_rows: bool
:param fail_on_error: [Optional] Force the task to fail if any errors occur.
The default value is false, which indicates the task should not fail
even if any insertion errors occur.
:type fail_on_error: bool
"""
dataset_project_id = project_id if project_id else self.project_id
body = {
"rows": rows,
"ignoreUnknownValues": ignore_unknown_values,
"kind": "bigquery#tableDataInsertAllRequest",
"skipInvalidRows": skip_invalid_rows,
}
try:
self.log.info(
'Inserting %s row(s) into Table %s:%s.%s',
len(rows), dataset_project_id, dataset_id, table_id
)
resp = self.service.tabledata().insertAll(
projectId=dataset_project_id, datasetId=dataset_id,
tableId=table_id, body=body
).execute(num_retries=self.num_retries)
if 'insertErrors' not in resp:
self.log.info(
'All row(s) inserted successfully: %s:%s.%s',
dataset_project_id, dataset_id, table_id
)
else:
error_msg = '{} insert error(s) occurred: {}:{}.{}. Details: {}'.format(
len(resp['insertErrors']),
dataset_project_id, dataset_id, table_id, resp['insertErrors'])
if fail_on_error:
raise AirflowException(
'BigQuery job failed. Error was: {}'.format(error_msg)
)
self.log.info(error_msg)
except HttpError as err:
raise AirflowException(
'BigQuery job failed. Error was: {}'.format(err.content)
) | python | {
"resource": ""
} |
q266132 | BigQueryCursor.execute | test | def execute(self, operation, parameters=None):
"""
Executes a BigQuery query, and returns the job ID.
:param operation: The query to execute.
:type operation: str
:param parameters: Parameters to substitute into the query.
:type parameters: dict
"""
sql = _bind_parameters(operation,
parameters) if parameters else operation
self.job_id = self.run_query(sql) | python | {
"resource": ""
} |
q266133 | BigQueryCursor.executemany | test | def executemany(self, operation, seq_of_parameters):
"""
Execute a BigQuery query multiple times with different parameters.
:param operation: The query to execute.
:type operation: str
:param seq_of_parameters: List of dictionary parameters to substitute into the
query.
:type seq_of_parameters: list
"""
for parameters in seq_of_parameters:
self.execute(operation, parameters) | python | {
"resource": ""
} |
q266134 | BigQueryCursor.next | test | def next(self):
"""
Helper method for fetchone, which returns the next row from a buffer.
If the buffer is empty, attempts to paginate through the result set for
the next page, and load it into the buffer.
"""
if not self.job_id:
return None
if len(self.buffer) == 0:
if self.all_pages_loaded:
return None
query_results = (self.service.jobs().getQueryResults(
projectId=self.project_id,
jobId=self.job_id,
pageToken=self.page_token).execute(num_retries=self.num_retries))
if 'rows' in query_results and query_results['rows']:
self.page_token = query_results.get('pageToken')
fields = query_results['schema']['fields']
col_types = [field['type'] for field in fields]
rows = query_results['rows']
for dict_row in rows:
typed_row = ([
_bq_cast(vs['v'], col_types[idx])
for idx, vs in enumerate(dict_row['f'])
])
self.buffer.append(typed_row)
if not self.page_token:
self.all_pages_loaded = True
else:
# Reset all state since we've exhausted the results.
self.page_token = None
self.job_id = None
self.page_token = None
return None
return self.buffer.pop(0) | python | {
"resource": ""
} |
q266135 | PostgresToGoogleCloudStorageOperator._query_postgres | test | def _query_postgres(self):
"""
Queries Postgres and returns a cursor to the results.
"""
postgres = PostgresHook(postgres_conn_id=self.postgres_conn_id)
conn = postgres.get_conn()
cursor = conn.cursor()
cursor.execute(self.sql, self.parameters)
return cursor | python | {
"resource": ""
} |
q266136 | _make_intermediate_dirs | test | def _make_intermediate_dirs(sftp_client, remote_directory):
"""
Create all the intermediate directories in a remote host
:param sftp_client: A Paramiko SFTP client.
:param remote_directory: Absolute Path of the directory containing the file
:return:
"""
if remote_directory == '/':
sftp_client.chdir('/')
return
if remote_directory == '':
return
try:
sftp_client.chdir(remote_directory)
except IOError:
dirname, basename = os.path.split(remote_directory.rstrip('/'))
_make_intermediate_dirs(sftp_client, dirname)
sftp_client.mkdir(basename)
sftp_client.chdir(basename)
return | python | {
"resource": ""
} |
q266137 | SQSHook.create_queue | test | def create_queue(self, queue_name, attributes=None):
"""
Create queue using connection object
:param queue_name: name of the queue.
:type queue_name: str
:param attributes: additional attributes for the queue (default: None)
For details of the attributes parameter see :py:meth:`botocore.client.SQS.create_queue`
:type attributes: dict
:return: dict with the information about the queue
For details of the returned value see :py:meth:`botocore.client.SQS.create_queue`
:rtype: dict
"""
return self.get_conn().create_queue(QueueName=queue_name, Attributes=attributes or {}) | python | {
"resource": ""
} |
q266138 | SQSHook.send_message | test | def send_message(self, queue_url, message_body, delay_seconds=0, message_attributes=None):
"""
Send message to the queue
:param queue_url: queue url
:type queue_url: str
:param message_body: the contents of the message
:type message_body: str
:param delay_seconds: seconds to delay the message
:type delay_seconds: int
:param message_attributes: additional attributes for the message (default: None)
For details of the attributes parameter see :py:meth:`botocore.client.SQS.send_message`
:type message_attributes: dict
:return: dict with the information about the message sent
For details of the returned value see :py:meth:`botocore.client.SQS.send_message`
:rtype: dict
"""
return self.get_conn().send_message(QueueUrl=queue_url,
MessageBody=message_body,
DelaySeconds=delay_seconds,
MessageAttributes=message_attributes or {}) | python | {
"resource": ""
} |
q266139 | BaseTaskRunner.run_command | test | def run_command(self, run_with=None, join_args=False):
"""
Run the task command.
:param run_with: list of tokens to run the task command with e.g. ``['bash', '-c']``
:type run_with: list
:param join_args: whether to concatenate the list of command tokens e.g. ``['airflow', 'run']`` vs
``['airflow run']``
:param join_args: bool
:return: the process that was run
:rtype: subprocess.Popen
"""
run_with = run_with or []
cmd = [" ".join(self._command)] if join_args else self._command
full_cmd = run_with + cmd
self.log.info('Running: %s', full_cmd)
proc = subprocess.Popen(
full_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
close_fds=True,
env=os.environ.copy(),
preexec_fn=os.setsid
)
# Start daemon thread to read subprocess logging output
log_reader = threading.Thread(
target=self._read_task_logs,
args=(proc.stdout,),
)
log_reader.daemon = True
log_reader.start()
return proc | python | {
"resource": ""
} |
q266140 | BaseTaskRunner.on_finish | test | def on_finish(self):
"""
A callback that should be called when this is done running.
"""
if self._cfg_path and os.path.isfile(self._cfg_path):
if self.run_as_user:
subprocess.call(['sudo', 'rm', self._cfg_path], close_fds=True)
else:
os.remove(self._cfg_path) | python | {
"resource": ""
} |
q266141 | _main | test | def _main():
"""
Parse options and process commands
"""
# Parse arguments
usage = "usage: nvd3.py [options]"
parser = OptionParser(usage=usage,
version=("python-nvd3 - Charts generator with "
"nvd3.js and d3.js"))
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose", default=True,
help="don't print messages to stdout")
(options, args) = parser.parse_args() | python | {
"resource": ""
} |
q266142 | NVD3Chart.buildhtmlheader | test | def buildhtmlheader(self):
"""generate HTML header content"""
self.htmlheader = ''
# If the JavaScript assets have already been injected, don't bother re-sourcing them.
global _js_initialized
if '_js_initialized' not in globals() or not _js_initialized:
for css in self.header_css:
self.htmlheader += css
for js in self.header_js:
self.htmlheader += js | python | {
"resource": ""
} |
q266143 | NVD3Chart.buildcontainer | test | def buildcontainer(self):
"""generate HTML div"""
if self.container:
return
# Create SVG div with style
if self.width:
if self.width[-1] != '%':
self.style += 'width:%spx;' % self.width
else:
self.style += 'width:%s;' % self.width
if self.height:
if self.height[-1] != '%':
self.style += 'height:%spx;' % self.height
else:
self.style += 'height:%s;' % self.height
if self.style:
self.style = 'style="%s"' % self.style
self.container = self.containerheader + \
'<div id="%s"><svg %s></svg></div>\n' % (self.name, self.style) | python | {
"resource": ""
} |
q266144 | NVD3Chart.buildjschart | test | def buildjschart(self):
"""generate javascript code for the chart"""
self.jschart = ''
# add custom tooltip string in jschart
# default condition (if build_custom_tooltip is not called explicitly with date_flag=True)
if self.tooltip_condition_string == '':
self.tooltip_condition_string = 'var y = String(graph.point.y);\n'
# Include data
self.series_js = json.dumps(self.series) | python | {
"resource": ""
} |
q266145 | NVD3Chart.create_x_axis | test | def create_x_axis(self, name, label=None, format=None, date=False, custom_format=False):
"""Create X-axis"""
axis = {}
if custom_format and format:
axis['tickFormat'] = format
elif format:
if format == 'AM_PM':
axis['tickFormat'] = "function(d) { return get_am_pm(parseInt(d)); }"
else:
axis['tickFormat'] = "d3.format(',%s')" % format
if label:
axis['axisLabel'] = "'" + label + "'"
# date format : see https://github.com/mbostock/d3/wiki/Time-Formatting
if date:
self.dateformat = format
axis['tickFormat'] = ("function(d) { return d3.time.format('%s')"
"(new Date(parseInt(d))) }\n"
"" % self.dateformat)
# flag is the x Axis is a date
if name[0] == 'x':
self.x_axis_date = True
# Add new axis to list of axis
self.axislist[name] = axis
# Create x2Axis if focus_enable
if name == "xAxis" and self.focus_enable:
self.axislist['x2Axis'] = axis | python | {
"resource": ""
} |
q266146 | NVD3Chart.create_y_axis | test | def create_y_axis(self, name, label=None, format=None, custom_format=False):
"""
Create Y-axis
"""
axis = {}
if custom_format and format:
axis['tickFormat'] = format
elif format:
axis['tickFormat'] = "d3.format(',%s')" % format
if label:
axis['axisLabel'] = "'" + label + "'"
# Add new axis to list of axis
self.axislist[name] = axis | python | {
"resource": ""
} |
q266147 | SqliteHook.get_conn | test | def get_conn(self):
"""
Returns a sqlite connection object
"""
conn = self.get_connection(self.sqlite_conn_id)
conn = sqlite3.connect(conn.host)
return conn | python | {
"resource": ""
} |
q266148 | action_logging | test | def action_logging(f):
"""
Decorator to log user actions
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
with create_session() as session:
if g.user.is_anonymous:
user = 'anonymous'
else:
user = g.user.username
log = Log(
event=f.__name__,
task_instance=None,
owner=user,
extra=str(list(request.args.items())),
task_id=request.args.get('task_id'),
dag_id=request.args.get('dag_id'))
if 'execution_date' in request.args:
log.execution_date = pendulum.parse(
request.args.get('execution_date'))
session.add(log)
return f(*args, **kwargs)
return wrapper | python | {
"resource": ""
} |
q266149 | gzipped | test | def gzipped(f):
"""
Decorator to make a view compressed
"""
@functools.wraps(f)
def view_func(*args, **kwargs):
@after_this_request
def zipper(response):
accept_encoding = request.headers.get('Accept-Encoding', '')
if 'gzip' not in accept_encoding.lower():
return response
response.direct_passthrough = False
if (response.status_code < 200 or response.status_code >= 300 or
'Content-Encoding' in response.headers):
return response
gzip_buffer = IO()
gzip_file = gzip.GzipFile(mode='wb',
fileobj=gzip_buffer)
gzip_file.write(response.data)
gzip_file.close()
response.data = gzip_buffer.getvalue()
response.headers['Content-Encoding'] = 'gzip'
response.headers['Vary'] = 'Accept-Encoding'
response.headers['Content-Length'] = len(response.data)
return response
return f(*args, **kwargs)
return view_func | python | {
"resource": ""
} |
q266150 | get_last_dagrun | test | def get_last_dagrun(dag_id, session, include_externally_triggered=False):
"""
Returns the last dag run for a dag, None if there was none.
Last dag run can be any type of run eg. scheduled or backfilled.
Overridden DagRuns are ignored.
"""
DR = DagRun
query = session.query(DR).filter(DR.dag_id == dag_id)
if not include_externally_triggered:
query = query.filter(DR.external_trigger == False) # noqa
query = query.order_by(DR.execution_date.desc())
return query.first() | python | {
"resource": ""
} |
q266151 | DagModel.create_dagrun | test | def create_dagrun(self,
run_id,
state,
execution_date,
start_date=None,
external_trigger=False,
conf=None,
session=None):
"""
Creates a dag run from this dag including the tasks associated with this dag.
Returns the dag run.
:param run_id: defines the the run id for this dag run
:type run_id: str
:param execution_date: the execution date of this dag run
:type execution_date: datetime.datetime
:param state: the state of the dag run
:type state: airflow.utils.state.State
:param start_date: the date this dag run should be evaluated
:type start_date: datetime.datetime
:param external_trigger: whether this dag run is externally triggered
:type external_trigger: bool
:param session: database session
:type session: sqlalchemy.orm.session.Session
"""
return self.get_dag().create_dagrun(run_id=run_id,
state=state,
execution_date=execution_date,
start_date=start_date,
external_trigger=external_trigger,
conf=conf,
session=session) | python | {
"resource": ""
} |
q266152 | SQSPublishOperator.execute | test | def execute(self, context):
"""
Publish the message to SQS queue
:param context: the context object
:type context: dict
:return: dict with information about the message sent
For details of the returned dict see :py:meth:`botocore.client.SQS.send_message`
:rtype: dict
"""
hook = SQSHook(aws_conn_id=self.aws_conn_id)
result = hook.send_message(queue_url=self.sqs_queue,
message_body=self.message_content,
delay_seconds=self.delay_seconds,
message_attributes=self.message_attributes)
self.log.info('result is send_message is %s', result)
return result | python | {
"resource": ""
} |
q266153 | json_response | test | def json_response(obj):
"""
returns a json response from a json serializable python object
"""
return Response(
response=json.dumps(
obj, indent=4, cls=AirflowJsonEncoder),
status=200,
mimetype="application/json") | python | {
"resource": ""
} |
q266154 | open_maybe_zipped | test | def open_maybe_zipped(f, mode='r'):
"""
Opens the given file. If the path contains a folder with a .zip suffix, then
the folder is treated as a zip archive, opening the file inside the archive.
:return: a file object, as in `open`, or as in `ZipFile.open`.
"""
_, archive, filename = ZIP_REGEX.search(f).groups()
if archive and zipfile.is_zipfile(archive):
return zipfile.ZipFile(archive, mode=mode).open(filename)
else:
return io.open(f, mode=mode) | python | {
"resource": ""
} |
q266155 | make_cache_key | test | def make_cache_key(*args, **kwargs):
"""
Used by cache to get a unique key per URL
"""
path = request.path
args = str(hash(frozenset(request.args.items())))
return (path + args).encode('ascii', 'ignore') | python | {
"resource": ""
} |
q266156 | CloudVideoIntelligenceHook.get_conn | test | def get_conn(self):
"""
Returns Gcp Video Intelligence Service client
:rtype: google.cloud.videointelligence_v1.VideoIntelligenceServiceClient
"""
if not self._conn:
self._conn = VideoIntelligenceServiceClient(credentials=self._get_credentials())
return self._conn | python | {
"resource": ""
} |
q266157 | CloudVideoIntelligenceHook.annotate_video | test | def annotate_video(
self,
input_uri=None,
input_content=None,
features=None,
video_context=None,
output_uri=None,
location=None,
retry=None,
timeout=None,
metadata=None,
):
"""
Performs video annotation.
:param input_uri: Input video location. Currently, only Google Cloud Storage URIs are supported,
which must be specified in the following format: ``gs://bucket-id/object-id``.
:type input_uri: str
:param input_content: The video data bytes.
If unset, the input video(s) should be specified via ``input_uri``.
If set, ``input_uri`` should be unset.
:type input_content: bytes
:param features: Requested video annotation features.
:type features: list[google.cloud.videointelligence_v1.VideoIntelligenceServiceClient.enums.Feature]
:param output_uri: Optional, location where the output (in JSON format) should be stored. Currently,
only Google Cloud Storage URIs are supported, which must be specified in the following format:
``gs://bucket-id/object-id``.
:type output_uri: str
:param video_context: Optional, Additional video context and/or feature-specific parameters.
:type video_context: dict or google.cloud.videointelligence_v1.types.VideoContext
:param location: Optional, cloud region where annotation should take place. Supported cloud regions:
us-east1, us-west1, europe-west1, asia-east1.
If no region is specified, a region will be determined based on video file location.
:type location: str
:param retry: Retry object used to determine when/if to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: Optional, The amount of time, in seconds, to wait for the request to complete.
Note that if retry is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Optional, Additional metadata that is provided to the method.
:type metadata: seq[tuple[str, str]]
"""
client = self.get_conn()
return client.annotate_video(
input_uri=input_uri,
input_content=input_content,
features=features,
video_context=video_context,
output_uri=output_uri,
location_id=location,
retry=retry,
timeout=timeout,
metadata=metadata,
) | python | {
"resource": ""
} |
q266158 | OpsgenieAlertHook._get_api_key | test | def _get_api_key(self):
"""
Get Opsgenie api_key for creating alert
"""
conn = self.get_connection(self.http_conn_id)
api_key = conn.password
if not api_key:
raise AirflowException('Opsgenie API Key is required for this hook, '
'please check your conn_id configuration.')
return api_key | python | {
"resource": ""
} |
q266159 | OpsgenieAlertHook.get_conn | test | def get_conn(self, headers=None):
"""
Overwrite HttpHook get_conn because this hook just needs base_url
and headers, and does not need generic params
:param headers: additional headers to be passed through as a dictionary
:type headers: dict
"""
conn = self.get_connection(self.http_conn_id)
self.base_url = conn.host if conn.host else 'https://api.opsgenie.com'
session = requests.Session()
if headers:
session.headers.update(headers)
return session | python | {
"resource": ""
} |
q266160 | OpsgenieAlertHook.execute | test | def execute(self, payload={}):
"""
Execute the Opsgenie Alert call
:param payload: Opsgenie API Create Alert payload values
See https://docs.opsgenie.com/docs/alert-api#section-create-alert
:type payload: dict
"""
api_key = self._get_api_key()
return self.run(endpoint='v2/alerts',
data=json.dumps(payload),
headers={'Content-Type': 'application/json',
'Authorization': 'GenieKey %s' % api_key}) | python | {
"resource": ""
} |
q266161 | OpsgenieAlertOperator._build_opsgenie_payload | test | def _build_opsgenie_payload(self):
"""
Construct the Opsgenie JSON payload. All relevant parameters are combined here
to a valid Opsgenie JSON payload.
:return: Opsgenie payload (dict) to send
"""
payload = {}
for key in [
"message", "alias", "description", "responders",
"visibleTo", "actions", "tags", "details", "entity",
"source", "priority", "user", "note"
]:
val = getattr(self, key)
if val:
payload[key] = val
return payload | python | {
"resource": ""
} |
q266162 | OpsgenieAlertOperator.execute | test | def execute(self, context):
"""
Call the OpsgenieAlertHook to post message
"""
self.hook = OpsgenieAlertHook(self.opsgenie_conn_id)
self.hook.execute(self._build_opsgenie_payload()) | python | {
"resource": ""
} |
q266163 | AWSAthenaHook.get_conn | test | def get_conn(self):
"""
check if aws conn exists already or create one and return it
:return: boto3 session
"""
if not self.conn:
self.conn = self.get_client_type('athena')
return self.conn | python | {
"resource": ""
} |
q266164 | AWSAthenaHook.run_query | test | def run_query(self, query, query_context, result_configuration, client_request_token=None):
"""
Run Presto query on athena with provided config and return submitted query_execution_id
:param query: Presto query to run
:type query: str
:param query_context: Context in which query need to be run
:type query_context: dict
:param result_configuration: Dict with path to store results in and config related to encryption
:type result_configuration: dict
:param client_request_token: Unique token created by user to avoid multiple executions of same query
:type client_request_token: str
:return: str
"""
response = self.conn.start_query_execution(QueryString=query,
ClientRequestToken=client_request_token,
QueryExecutionContext=query_context,
ResultConfiguration=result_configuration)
query_execution_id = response['QueryExecutionId']
return query_execution_id | python | {
"resource": ""
} |
q266165 | AWSAthenaHook.check_query_status | test | def check_query_status(self, query_execution_id):
"""
Fetch the status of submitted athena query. Returns None or one of valid query states.
:param query_execution_id: Id of submitted athena query
:type query_execution_id: str
:return: str
"""
response = self.conn.get_query_execution(QueryExecutionId=query_execution_id)
state = None
try:
state = response['QueryExecution']['Status']['State']
except Exception as ex:
self.log.error('Exception while getting query state', ex)
finally:
return state | python | {
"resource": ""
} |
q266166 | AWSAthenaHook.poll_query_status | test | def poll_query_status(self, query_execution_id, max_tries=None):
"""
Poll the status of submitted athena query until query state reaches final state.
Returns one of the final states
:param query_execution_id: Id of submitted athena query
:type query_execution_id: str
:param max_tries: Number of times to poll for query state before function exits
:type max_tries: int
:return: str
"""
try_number = 1
final_query_state = None # Query state when query reaches final state or max_tries reached
while True:
query_state = self.check_query_status(query_execution_id)
if query_state is None:
self.log.info('Trial {try_number}: Invalid query state. Retrying again'.format(
try_number=try_number))
elif query_state in self.INTERMEDIATE_STATES:
self.log.info('Trial {try_number}: Query is still in an intermediate state - {state}'
.format(try_number=try_number, state=query_state))
else:
self.log.info('Trial {try_number}: Query execution completed. Final state is {state}'
.format(try_number=try_number, state=query_state))
final_query_state = query_state
break
if max_tries and try_number >= max_tries: # Break loop if max_tries reached
final_query_state = query_state
break
try_number += 1
sleep(self.sleep_time)
return final_query_state | python | {
"resource": ""
} |
q266167 | SFTPHook.get_conn | test | def get_conn(self):
"""
Returns an SFTP connection object
"""
if self.conn is None:
cnopts = pysftp.CnOpts()
if self.no_host_key_check:
cnopts.hostkeys = None
cnopts.compression = self.compress
conn_params = {
'host': self.remote_host,
'port': self.port,
'username': self.username,
'cnopts': cnopts
}
if self.password and self.password.strip():
conn_params['password'] = self.password
if self.key_file:
conn_params['private_key'] = self.key_file
if self.private_key_pass:
conn_params['private_key_pass'] = self.private_key_pass
self.conn = pysftp.Connection(**conn_params)
return self.conn | python | {
"resource": ""
} |
q266168 | ZendeskHook.__handle_rate_limit_exception | test | def __handle_rate_limit_exception(self, rate_limit_exception):
"""
Sleep for the time specified in the exception. If not specified, wait
for 60 seconds.
"""
retry_after = int(
rate_limit_exception.response.headers.get('Retry-After', 60))
self.log.info(
"Hit Zendesk API rate limit. Pausing for %s seconds",
retry_after
)
time.sleep(retry_after) | python | {
"resource": ""
} |
q266169 | ZendeskHook.call | test | def call(self, path, query=None, get_all_pages=True, side_loading=False):
"""
Call Zendesk API and return results
:param path: The Zendesk API to call
:param query: Query parameters
:param get_all_pages: Accumulate results over all pages before
returning. Due to strict rate limiting, this can often timeout.
Waits for recommended period between tries after a timeout.
:param side_loading: Retrieve related records as part of a single
request. In order to enable side-loading, add an 'include'
query parameter containing a comma-separated list of resources
to load. For more information on side-loading see
https://developer.zendesk.com/rest_api/docs/core/side_loading
"""
zendesk = self.get_conn()
first_request_successful = False
while not first_request_successful:
try:
results = zendesk.call(path, query)
first_request_successful = True
except RateLimitError as rle:
self.__handle_rate_limit_exception(rle)
# Find the key with the results
keys = [path.split("/")[-1].split(".json")[0]]
next_page = results['next_page']
if side_loading:
keys += query['include'].split(',')
results = {key: results[key] for key in keys}
if get_all_pages:
while next_page is not None:
try:
# Need to split because the next page URL has
# `github.zendesk...`
# in it, but the call function needs it removed.
next_url = next_page.split(self.__url)[1]
self.log.info("Calling %s", next_url)
more_res = zendesk.call(next_url)
for key in results:
results[key].extend(more_res[key])
if next_page == more_res['next_page']:
# Unfortunately zdesk doesn't always throw ZendeskError
# when we are done getting all the data. Sometimes the
# next just refers to the current set of results.
# Hence, need to deal with this special case
break
else:
next_page = more_res['next_page']
except RateLimitError as rle:
self.__handle_rate_limit_exception(rle)
except ZendeskError as ze:
if b"Use a start_time older than 5 minutes" in ze.msg:
# We have pretty up to date data
break
else:
raise ze
return results | python | {
"resource": ""
} |
q266170 | AwsGlueCatalogHook.get_partitions | test | def get_partitions(self,
database_name,
table_name,
expression='',
page_size=None,
max_items=None):
"""
Retrieves the partition values for a table.
:param database_name: The name of the catalog database where the partitions reside.
:type database_name: str
:param table_name: The name of the partitions' table.
:type table_name: str
:param expression: An expression filtering the partitions to be returned.
Please see official AWS documentation for further information.
https://docs.aws.amazon.com/glue/latest/dg/aws-glue-api-catalog-partitions.html#aws-glue-api-catalog-partitions-GetPartitions
:type expression: str
:param page_size: pagination size
:type page_size: int
:param max_items: maximum items to return
:type max_items: int
:return: set of partition values where each value is a tuple since
a partition may be composed of multiple columns. For example:
``{('2018-01-01','1'), ('2018-01-01','2')}``
"""
config = {
'PageSize': page_size,
'MaxItems': max_items,
}
paginator = self.get_conn().get_paginator('get_partitions')
response = paginator.paginate(
DatabaseName=database_name,
TableName=table_name,
Expression=expression,
PaginationConfig=config
)
partitions = set()
for page in response:
for p in page['Partitions']:
partitions.add(tuple(p['Values']))
return partitions | python | {
"resource": ""
} |
q266171 | AwsGlueCatalogHook.get_table | test | def get_table(self, database_name, table_name):
"""
Get the information of the table
:param database_name: Name of hive database (schema) @table belongs to
:type database_name: str
:param table_name: Name of hive table
:type table_name: str
:rtype: dict
>>> hook = AwsGlueCatalogHook()
>>> r = hook.get_table('db', 'table_foo')
>>> r['Name'] = 'table_foo'
"""
result = self.get_conn().get_table(DatabaseName=database_name, Name=table_name)
return result['Table'] | python | {
"resource": ""
} |
q266172 | AwsGlueCatalogHook.get_table_location | test | def get_table_location(self, database_name, table_name):
"""
Get the physical location of the table
:param database_name: Name of hive database (schema) @table belongs to
:type database_name: str
:param table_name: Name of hive table
:type table_name: str
:return: str
"""
table = self.get_table(database_name, table_name)
return table['StorageDescriptor']['Location'] | python | {
"resource": ""
} |
q266173 | RedshiftHook.cluster_status | test | def cluster_status(self, cluster_identifier):
"""
Return status of a cluster
:param cluster_identifier: unique identifier of a cluster
:type cluster_identifier: str
"""
conn = self.get_conn()
try:
response = conn.describe_clusters(
ClusterIdentifier=cluster_identifier)['Clusters']
return response[0]['ClusterStatus'] if response else None
except conn.exceptions.ClusterNotFoundFault:
return 'cluster_not_found' | python | {
"resource": ""
} |
q266174 | RedshiftHook.delete_cluster | test | def delete_cluster(
self,
cluster_identifier,
skip_final_cluster_snapshot=True,
final_cluster_snapshot_identifier=''):
"""
Delete a cluster and optionally create a snapshot
:param cluster_identifier: unique identifier of a cluster
:type cluster_identifier: str
:param skip_final_cluster_snapshot: determines cluster snapshot creation
:type skip_final_cluster_snapshot: bool
:param final_cluster_snapshot_identifier: name of final cluster snapshot
:type final_cluster_snapshot_identifier: str
"""
response = self.get_conn().delete_cluster(
ClusterIdentifier=cluster_identifier,
SkipFinalClusterSnapshot=skip_final_cluster_snapshot,
FinalClusterSnapshotIdentifier=final_cluster_snapshot_identifier
)
return response['Cluster'] if response['Cluster'] else None | python | {
"resource": ""
} |
q266175 | RedshiftHook.describe_cluster_snapshots | test | def describe_cluster_snapshots(self, cluster_identifier):
"""
Gets a list of snapshots for a cluster
:param cluster_identifier: unique identifier of a cluster
:type cluster_identifier: str
"""
response = self.get_conn().describe_cluster_snapshots(
ClusterIdentifier=cluster_identifier
)
if 'Snapshots' not in response:
return None
snapshots = response['Snapshots']
snapshots = filter(lambda x: x['Status'], snapshots)
snapshots.sort(key=lambda x: x['SnapshotCreateTime'], reverse=True)
return snapshots | python | {
"resource": ""
} |
q266176 | RedshiftHook.restore_from_cluster_snapshot | test | def restore_from_cluster_snapshot(self, cluster_identifier, snapshot_identifier):
"""
Restores a cluster from its snapshot
:param cluster_identifier: unique identifier of a cluster
:type cluster_identifier: str
:param snapshot_identifier: unique identifier for a snapshot of a cluster
:type snapshot_identifier: str
"""
response = self.get_conn().restore_from_cluster_snapshot(
ClusterIdentifier=cluster_identifier,
SnapshotIdentifier=snapshot_identifier
)
return response['Cluster'] if response['Cluster'] else None | python | {
"resource": ""
} |
q266177 | RedshiftHook.create_cluster_snapshot | test | def create_cluster_snapshot(self, snapshot_identifier, cluster_identifier):
"""
Creates a snapshot of a cluster
:param snapshot_identifier: unique identifier for a snapshot of a cluster
:type snapshot_identifier: str
:param cluster_identifier: unique identifier of a cluster
:type cluster_identifier: str
"""
response = self.get_conn().create_cluster_snapshot(
SnapshotIdentifier=snapshot_identifier,
ClusterIdentifier=cluster_identifier,
)
return response['Snapshot'] if response['Snapshot'] else None | python | {
"resource": ""
} |
q266178 | SlackAPIOperator.execute | test | def execute(self, **kwargs):
"""
SlackAPIOperator calls will not fail even if the call is not unsuccessful.
It should not prevent a DAG from completing in success
"""
if not self.api_params:
self.construct_api_call_params()
slack = SlackHook(token=self.token, slack_conn_id=self.slack_conn_id)
slack.call(self.method, self.api_params) | python | {
"resource": ""
} |
q266179 | EmrHook.create_job_flow | test | def create_job_flow(self, job_flow_overrides):
"""
Creates a job flow using the config from the EMR connection.
Keys of the json extra hash may have the arguments of the boto3
run_job_flow method.
Overrides for this config may be passed as the job_flow_overrides.
"""
if not self.emr_conn_id:
raise AirflowException('emr_conn_id must be present to use create_job_flow')
emr_conn = self.get_connection(self.emr_conn_id)
config = emr_conn.extra_dejson.copy()
config.update(job_flow_overrides)
response = self.get_conn().run_job_flow(**config)
return response | python | {
"resource": ""
} |
q266180 | HdfsSensor.filter_for_filesize | test | def filter_for_filesize(result, size=None):
"""
Will test the filepath result and test if its size is at least self.filesize
:param result: a list of dicts returned by Snakebite ls
:param size: the file size in MB a file should be at least to trigger True
:return: (bool) depending on the matching criteria
"""
if size:
log = LoggingMixin().log
log.debug(
'Filtering for file size >= %s in files: %s',
size, map(lambda x: x['path'], result)
)
size *= settings.MEGABYTE
result = [x for x in result if x['length'] >= size]
log.debug('HdfsSensor.poke: after size filter result is %s', result)
return result | python | {
"resource": ""
} |
q266181 | HdfsSensor.filter_for_ignored_ext | test | def filter_for_ignored_ext(result, ignored_ext, ignore_copying):
"""
Will filter if instructed to do so the result to remove matching criteria
:param result: list of dicts returned by Snakebite ls
:type result: list[dict]
:param ignored_ext: list of ignored extensions
:type ignored_ext: list
:param ignore_copying: shall we ignore ?
:type ignore_copying: bool
:return: list of dicts which were not removed
:rtype: list[dict]
"""
if ignore_copying:
log = LoggingMixin().log
regex_builder = r"^.*\.(%s$)$" % '$|'.join(ignored_ext)
ignored_extensions_regex = re.compile(regex_builder)
log.debug(
'Filtering result for ignored extensions: %s in files %s',
ignored_extensions_regex.pattern, map(lambda x: x['path'], result)
)
result = [x for x in result if not ignored_extensions_regex.match(x['path'])]
log.debug('HdfsSensor.poke: after ext filter result is %s', result)
return result | python | {
"resource": ""
} |
q266182 | MongoToS3Operator.execute | test | def execute(self, context):
"""
Executed by task_instance at runtime
"""
s3_conn = S3Hook(self.s3_conn_id)
# Grab collection and execute query according to whether or not it is a pipeline
if self.is_pipeline:
results = MongoHook(self.mongo_conn_id).aggregate(
mongo_collection=self.mongo_collection,
aggregate_query=self.mongo_query,
mongo_db=self.mongo_db
)
else:
results = MongoHook(self.mongo_conn_id).find(
mongo_collection=self.mongo_collection,
query=self.mongo_query,
mongo_db=self.mongo_db
)
# Performs transform then stringifies the docs results into json format
docs_str = self._stringify(self.transform(results))
# Load Into S3
s3_conn.load_string(
string_data=docs_str,
key=self.s3_key,
bucket_name=self.s3_bucket,
replace=self.replace
)
return True | python | {
"resource": ""
} |
q266183 | get_pool | test | def get_pool(name, session=None):
"""Get pool by a given name."""
if not (name and name.strip()):
raise AirflowBadRequest("Pool name shouldn't be empty")
pool = session.query(Pool).filter_by(pool=name).first()
if pool is None:
raise PoolNotFound("Pool '%s' doesn't exist" % name)
return pool | python | {
"resource": ""
} |
q266184 | create_pool | test | def create_pool(name, slots, description, session=None):
"""Create a pool with a given parameters."""
if not (name and name.strip()):
raise AirflowBadRequest("Pool name shouldn't be empty")
try:
slots = int(slots)
except ValueError:
raise AirflowBadRequest("Bad value for `slots`: %s" % slots)
session.expire_on_commit = False
pool = session.query(Pool).filter_by(pool=name).first()
if pool is None:
pool = Pool(pool=name, slots=slots, description=description)
session.add(pool)
else:
pool.slots = slots
pool.description = description
session.commit()
return pool | python | {
"resource": ""
} |
q266185 | delete_pool | test | def delete_pool(name, session=None):
"""Delete pool by a given name."""
if not (name and name.strip()):
raise AirflowBadRequest("Pool name shouldn't be empty")
pool = session.query(Pool).filter_by(pool=name).first()
if pool is None:
raise PoolNotFound("Pool '%s' doesn't exist" % name)
session.delete(pool)
session.commit()
return pool | python | {
"resource": ""
} |
q266186 | GKEClusterHook._dict_to_proto | test | def _dict_to_proto(py_dict, proto):
"""
Converts a python dictionary to the proto supplied
:param py_dict: The dictionary to convert
:type py_dict: dict
:param proto: The proto object to merge with dictionary
:type proto: protobuf
:return: A parsed python dictionary in provided proto format
:raises:
ParseError: On JSON parsing problems.
"""
dict_json_str = json.dumps(py_dict)
return json_format.Parse(dict_json_str, proto) | python | {
"resource": ""
} |
q266187 | GKEClusterHook.wait_for_operation | test | def wait_for_operation(self, operation, project_id=None):
"""
Given an operation, continuously fetches the status from Google Cloud until either
completion or an error occurring
:param operation: The Operation to wait for
:type operation: google.cloud.container_V1.gapic.enums.Operation
:param project_id: Google Cloud Platform project ID
:type project_id: str
:return: A new, updated operation fetched from Google Cloud
"""
self.log.info("Waiting for OPERATION_NAME %s", operation.name)
time.sleep(OPERATIONAL_POLL_INTERVAL)
while operation.status != Operation.Status.DONE:
if operation.status == Operation.Status.RUNNING or operation.status == \
Operation.Status.PENDING:
time.sleep(OPERATIONAL_POLL_INTERVAL)
else:
raise exceptions.GoogleCloudError(
"Operation has failed with status: %s" % operation.status)
# To update status of operation
operation = self.get_operation(operation.name, project_id=project_id or self.project_id)
return operation | python | {
"resource": ""
} |
q266188 | GKEClusterHook.get_operation | test | def get_operation(self, operation_name, project_id=None):
"""
Fetches the operation from Google Cloud
:param operation_name: Name of operation to fetch
:type operation_name: str
:param project_id: Google Cloud Platform project ID
:type project_id: str
:return: The new, updated operation from Google Cloud
"""
return self.get_client().get_operation(project_id=project_id or self.project_id,
zone=self.location,
operation_id=operation_name) | python | {
"resource": ""
} |
q266189 | GKEClusterHook._append_label | test | def _append_label(cluster_proto, key, val):
"""
Append labels to provided Cluster Protobuf
Labels must fit the regex ``[a-z]([-a-z0-9]*[a-z0-9])?`` (current
airflow version string follows semantic versioning spec: x.y.z).
:param cluster_proto: The proto to append resource_label airflow
version to
:type cluster_proto: google.cloud.container_v1.types.Cluster
:param key: The key label
:type key: str
:param val:
:type val: str
:return: The cluster proto updated with new label
"""
val = val.replace('.', '-').replace('+', '-')
cluster_proto.resource_labels.update({key: val})
return cluster_proto | python | {
"resource": ""
} |
q266190 | GKEClusterHook.create_cluster | test | def create_cluster(self, cluster, project_id=None, retry=DEFAULT, timeout=DEFAULT):
"""
Creates a cluster, consisting of the specified number and type of Google Compute
Engine instances.
:param cluster: A Cluster protobuf or dict. If dict is provided, it must
be of the same form as the protobuf message
:class:`google.cloud.container_v1.types.Cluster`
:type cluster: dict or google.cloud.container_v1.types.Cluster
:param project_id: Google Cloud Platform project ID
:type project_id: str
:param retry: A retry object (``google.api_core.retry.Retry``) used to
retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:return: The full url to the new, or existing, cluster
:raises:
ParseError: On JSON parsing problems when trying to convert dict
AirflowException: cluster is not dict type nor Cluster proto type
"""
if isinstance(cluster, dict):
cluster_proto = Cluster()
cluster = self._dict_to_proto(py_dict=cluster, proto=cluster_proto)
elif not isinstance(cluster, Cluster):
raise AirflowException(
"cluster is not instance of Cluster proto or python dict")
self._append_label(cluster, 'airflow-version', 'v' + version.version)
self.log.info(
"Creating (project_id=%s, zone=%s, cluster_name=%s)",
self.project_id, self.location, cluster.name
)
try:
op = self.get_client().create_cluster(project_id=project_id or self.project_id,
zone=self.location,
cluster=cluster,
retry=retry,
timeout=timeout)
op = self.wait_for_operation(op)
return op.target_link
except AlreadyExists as error:
self.log.info('Assuming Success: %s', error.message)
return self.get_cluster(name=cluster.name).self_link | python | {
"resource": ""
} |
q266191 | GKEClusterHook.get_cluster | test | def get_cluster(self, name, project_id=None, retry=DEFAULT, timeout=DEFAULT):
"""
Gets details of specified cluster
:param name: The name of the cluster to retrieve
:type name: str
:param project_id: Google Cloud Platform project ID
:type project_id: str
:param retry: A retry object used to retry requests. If None is specified,
requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:return: google.cloud.container_v1.types.Cluster
"""
self.log.info(
"Fetching cluster (project_id=%s, zone=%s, cluster_name=%s)",
project_id or self.project_id, self.location, name
)
return self.get_client().get_cluster(project_id=project_id or self.project_id,
zone=self.location,
cluster_id=name,
retry=retry,
timeout=timeout).self_link | python | {
"resource": ""
} |
q266192 | DiscordWebhookHook._get_webhook_endpoint | test | def _get_webhook_endpoint(self, http_conn_id, webhook_endpoint):
"""
Given a Discord http_conn_id, return the default webhook endpoint or override if a
webhook_endpoint is manually supplied.
:param http_conn_id: The provided connection ID
:param webhook_endpoint: The manually provided webhook endpoint
:return: Webhook endpoint (str) to use
"""
if webhook_endpoint:
endpoint = webhook_endpoint
elif http_conn_id:
conn = self.get_connection(http_conn_id)
extra = conn.extra_dejson
endpoint = extra.get('webhook_endpoint', '')
else:
raise AirflowException('Cannot get webhook endpoint: No valid Discord '
'webhook endpoint or http_conn_id supplied.')
# make sure endpoint matches the expected Discord webhook format
if not re.match('^webhooks/[0-9]+/[a-zA-Z0-9_-]+$', endpoint):
raise AirflowException('Expected Discord webhook endpoint in the form '
'of "webhooks/{webhook.id}/{webhook.token}".')
return endpoint | python | {
"resource": ""
} |
q266193 | DiscordWebhookHook._build_discord_payload | test | def _build_discord_payload(self):
"""
Construct the Discord JSON payload. All relevant parameters are combined here
to a valid Discord JSON payload.
:return: Discord payload (str) to send
"""
payload = {}
if self.username:
payload['username'] = self.username
if self.avatar_url:
payload['avatar_url'] = self.avatar_url
payload['tts'] = self.tts
if len(self.message) <= 2000:
payload['content'] = self.message
else:
raise AirflowException('Discord message length must be 2000 or fewer '
'characters.')
return json.dumps(payload) | python | {
"resource": ""
} |
q266194 | DiscordWebhookHook.execute | test | def execute(self):
"""
Execute the Discord webhook call
"""
proxies = {}
if self.proxy:
# we only need https proxy for Discord
proxies = {'https': self.proxy}
discord_payload = self._build_discord_payload()
self.run(endpoint=self.webhook_endpoint,
data=discord_payload,
headers={'Content-type': 'application/json'},
extra_options={'proxies': proxies}) | python | {
"resource": ""
} |
q266195 | GoogleCloudKMSHook.encrypt | test | def encrypt(self, key_name, plaintext, authenticated_data=None):
"""
Encrypts a plaintext message using Google Cloud KMS.
:param key_name: The Resource Name for the key (or key version)
to be used for encyption. Of the form
``projects/*/locations/*/keyRings/*/cryptoKeys/**``
:type key_name: str
:param plaintext: The message to be encrypted.
:type plaintext: bytes
:param authenticated_data: Optional additional authenticated data that
must also be provided to decrypt the message.
:type authenticated_data: bytes
:return: The base 64 encoded ciphertext of the original message.
:rtype: str
"""
keys = self.get_conn().projects().locations().keyRings().cryptoKeys()
body = {'plaintext': _b64encode(plaintext)}
if authenticated_data:
body['additionalAuthenticatedData'] = _b64encode(authenticated_data)
request = keys.encrypt(name=key_name, body=body)
response = request.execute(num_retries=self.num_retries)
ciphertext = response['ciphertext']
return ciphertext | python | {
"resource": ""
} |
q266196 | SqoopHook.import_table | test | def import_table(self, table, target_dir=None, append=False, file_type="text",
columns=None, split_by=None, where=None, direct=False,
driver=None, extra_import_options=None):
"""
Imports table from remote location to target dir. Arguments are
copies of direct sqoop command line arguments
:param table: Table to read
:param target_dir: HDFS destination dir
:param append: Append data to an existing dataset in HDFS
:param file_type: "avro", "sequence", "text" or "parquet".
Imports data to into the specified format. Defaults to text.
:param columns: <col,col,col…> Columns to import from table
:param split_by: Column of the table used to split work units
:param where: WHERE clause to use during import
:param direct: Use direct connector if exists for the database
:param driver: Manually specify JDBC driver class to use
:param extra_import_options: Extra import options to pass as dict.
If a key doesn't have a value, just pass an empty string to it.
Don't include prefix of -- for sqoop options.
"""
cmd = self._import_cmd(target_dir, append, file_type, split_by, direct,
driver, extra_import_options)
cmd += ["--table", table]
if columns:
cmd += ["--columns", columns]
if where:
cmd += ["--where", where]
self.Popen(cmd) | python | {
"resource": ""
} |
q266197 | SqoopHook.import_query | test | def import_query(self, query, target_dir, append=False, file_type="text",
split_by=None, direct=None, driver=None, extra_import_options=None):
"""
Imports a specific query from the rdbms to hdfs
:param query: Free format query to run
:param target_dir: HDFS destination dir
:param append: Append data to an existing dataset in HDFS
:param file_type: "avro", "sequence", "text" or "parquet"
Imports data to hdfs into the specified format. Defaults to text.
:param split_by: Column of the table used to split work units
:param direct: Use direct import fast path
:param driver: Manually specify JDBC driver class to use
:param extra_import_options: Extra import options to pass as dict.
If a key doesn't have a value, just pass an empty string to it.
Don't include prefix of -- for sqoop options.
"""
cmd = self._import_cmd(target_dir, append, file_type, split_by, direct,
driver, extra_import_options)
cmd += ["--query", query]
self.Popen(cmd) | python | {
"resource": ""
} |
q266198 | SqoopHook.export_table | test | def export_table(self, table, export_dir, input_null_string,
input_null_non_string, staging_table,
clear_staging_table, enclosed_by,
escaped_by, input_fields_terminated_by,
input_lines_terminated_by,
input_optionally_enclosed_by, batch,
relaxed_isolation, extra_export_options=None):
"""
Exports Hive table to remote location. Arguments are copies of direct
sqoop command line Arguments
:param table: Table remote destination
:param export_dir: Hive table to export
:param input_null_string: The string to be interpreted as null for
string columns
:param input_null_non_string: The string to be interpreted as null
for non-string columns
:param staging_table: The table in which data will be staged before
being inserted into the destination table
:param clear_staging_table: Indicate that any data present in the
staging table can be deleted
:param enclosed_by: Sets a required field enclosing character
:param escaped_by: Sets the escape character
:param input_fields_terminated_by: Sets the field separator character
:param input_lines_terminated_by: Sets the end-of-line character
:param input_optionally_enclosed_by: Sets a field enclosing character
:param batch: Use batch mode for underlying statement execution
:param relaxed_isolation: Transaction isolation to read uncommitted
for the mappers
:param extra_export_options: Extra export options to pass as dict.
If a key doesn't have a value, just pass an empty string to it.
Don't include prefix of -- for sqoop options.
"""
cmd = self._export_cmd(table, export_dir, input_null_string,
input_null_non_string, staging_table,
clear_staging_table, enclosed_by, escaped_by,
input_fields_terminated_by,
input_lines_terminated_by,
input_optionally_enclosed_by, batch,
relaxed_isolation, extra_export_options)
self.Popen(cmd) | python | {
"resource": ""
} |
q266199 | GCPTextToSpeechHook.get_conn | test | def get_conn(self):
"""
Retrieves connection to Cloud Text to Speech.
:return: Google Cloud Text to Speech client object.
:rtype: google.cloud.texttospeech_v1.TextToSpeechClient
"""
if not self._client:
self._client = TextToSpeechClient(credentials=self._get_credentials())
return self._client | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.