_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
31
13.1k
language
stringclasses
1 value
meta_information
dict
q266100
BigtableHook.update_cluster
test
def update_cluster(instance, cluster_id, nodes): """ Updates number of nodes in the specified Cloud Bigtable cluster. Raises google.api_core.exceptions.NotFound if the cluster does not exist. :type instance: Instance :param instance: The Cloud Bigtable instance that owns the cluster. :type cluster_id: str
python
{ "resource": "" }
q266101
HiveCliHook._prepare_cli_cmd
test
def _prepare_cli_cmd(self): """ This function creates the command list from available information """ conn = self.conn hive_bin = 'hive' cmd_extra = [] if self.use_beeline: hive_bin = 'beeline' jdbc_url = "jdbc:hive2://{host}:{port}/{schema}".format( host=conn.host, port=conn.port, schema=conn.schema) if configuration.conf.get('core', 'security') == 'kerberos': template = conn.extra_dejson.get( 'principal', "hive/_HOST@EXAMPLE.COM") if "_HOST" in template: template = utils.replace_hostname_pattern( utils.get_components(template)) proxy_user = "" # noqa if conn.extra_dejson.get('proxy_user') == "login" and conn.login: proxy_user = "hive.server2.proxy.user={0}".format(conn.login) elif conn.extra_dejson.get('proxy_user') == "owner" and self.run_as: proxy_user = "hive.server2.proxy.user={0}".format(self.run_as)
python
{ "resource": "" }
q266102
HiveCliHook._prepare_hiveconf
test
def _prepare_hiveconf(d): """ This function prepares a list of hiveconf params from a dictionary of key value pairs. :param d: :type d: dict >>> hh = HiveCliHook() >>> hive_conf = {"hive.exec.dynamic.partition": "true",
python
{ "resource": "" }
q266103
HiveCliHook.load_df
test
def load_df( self, df, table, field_dict=None, delimiter=',', encoding='utf8', pandas_kwargs=None, **kwargs): """ Loads a pandas DataFrame into hive. Hive data types will be inferred if not passed but column names will not be sanitized. :param df: DataFrame to load into a Hive table :type df: pandas.DataFrame :param table: target Hive table, use dot notation to target a specific database :type table: str :param field_dict: mapping from column name to hive data type. Note that it must be OrderedDict so as to keep columns' order. :type field_dict: collections.OrderedDict :param delimiter: field delimiter in the file :type delimiter: str :param encoding: str encoding to use when writing DataFrame to file :type encoding: str :param pandas_kwargs: passed to DataFrame.to_csv :type pandas_kwargs: dict :param kwargs: passed to self.load_file """ def _infer_field_types_from_df(df): DTYPE_KIND_HIVE_TYPE = { 'b': 'BOOLEAN', # boolean 'i': 'BIGINT', # signed integer 'u': 'BIGINT', # unsigned integer 'f': 'DOUBLE', # floating-point 'c': 'STRING', # complex floating-point 'M': 'TIMESTAMP', # datetime 'O': 'STRING', # object 'S': 'STRING', # (byte-)string 'U': 'STRING', # Unicode 'V': 'STRING' # void } d = OrderedDict()
python
{ "resource": "" }
q266104
HiveCliHook.load_file
test
def load_file( self, filepath, table, delimiter=",", field_dict=None, create=True, overwrite=True, partition=None, recreate=False, tblproperties=None): """ Loads a local file into Hive Note that the table generated in Hive uses ``STORED AS textfile`` which isn't the most efficient serialization format. If a large amount of data is loaded and/or if the tables gets queried considerably, you may want to use this operator only to stage the data into a temporary table before loading it into its final destination using a ``HiveOperator``. :param filepath: local filepath of the file to load :type filepath: str :param table: target Hive table, use dot notation to target a specific database :type table: str :param delimiter: field delimiter in the file :type delimiter: str :param field_dict: A dictionary of the fields name in the file as keys and their Hive types as values. Note that it must be OrderedDict so as to keep columns' order. :type field_dict: collections.OrderedDict :param create: whether to create the table if it doesn't exist :type create: bool :param overwrite: whether to overwrite the data in table or partition :type overwrite: bool :param partition: target partition as a dict of partition columns and values :type partition: dict :param recreate: whether to drop and recreate the table at every execution :type recreate: bool :param tblproperties: TBLPROPERTIES of the hive table being created :type tblproperties: dict """ hql = '' if recreate: hql += "DROP TABLE IF EXISTS {table};\n".format(table=table) if create or recreate: if field_dict is None: raise ValueError("Must provide a field dict when creating a table") fields = ",\n ".join( [k + ' ' + v for k, v in field_dict.items()]) hql += "CREATE TABLE
python
{ "resource": "" }
q266105
HiveMetastoreHook.get_metastore_client
test
def get_metastore_client(self): """ Returns a Hive thrift client. """ import hmsclient from thrift.transport import TSocket, TTransport from thrift.protocol import TBinaryProtocol ms = self.metastore_conn auth_mechanism = ms.extra_dejson.get('authMechanism', 'NOSASL') if configuration.conf.get('core', 'security') == 'kerberos': auth_mechanism = ms.extra_dejson.get('authMechanism', 'GSSAPI') kerberos_service_name = ms.extra_dejson.get('kerberos_service_name', 'hive') socket = TSocket.TSocket(ms.host, ms.port) if configuration.conf.get('core', 'security') == 'kerberos' \ and auth_mechanism == 'GSSAPI': try: import saslwrapper as sasl except ImportError: import sasl def sasl_factory(): sasl_client = sasl.Client()
python
{ "resource": "" }
q266106
HiveMetastoreHook.check_for_named_partition
test
def check_for_named_partition(self, schema, table, partition_name): """ Checks whether a partition with a given name exists :param schema: Name of hive schema (database) @table belongs to :type schema: str :param table: Name of hive table @partition belongs to :type schema: str
python
{ "resource": "" }
q266107
HiveMetastoreHook.table_exists
test
def table_exists(self, table_name, db='default'): """ Check if table exists >>> hh = HiveMetastoreHook() >>> hh.table_exists(db='airflow', table_name='static_babynames') True >>> hh.table_exists(db='airflow', table_name='does_not_exist') False
python
{ "resource": "" }
q266108
HiveServer2Hook.get_conn
test
def get_conn(self, schema=None): """ Returns a Hive connection object. """ db = self.get_connection(self.hiveserver2_conn_id) auth_mechanism = db.extra_dejson.get('authMechanism', 'NONE') if auth_mechanism == 'NONE' and db.login is None: # we need to give a username username = 'airflow' kerberos_service_name = None if configuration.conf.get('core', 'security') == 'kerberos': auth_mechanism = db.extra_dejson.get('authMechanism', 'KERBEROS') kerberos_service_name = db.extra_dejson.get('kerberos_service_name', 'hive') # pyhive uses GSSAPI instead of KERBEROS as a auth_mechanism identifier if auth_mechanism == 'GSSAPI': self.log.warning(
python
{ "resource": "" }
q266109
HiveServer2Hook.get_results
test
def get_results(self, hql, schema='default', fetch_size=None, hive_conf=None): """ Get results of the provided hql in target schema. :param hql: hql to be executed. :type hql: str or list :param schema: target schema, default to 'default'. :type schema: str :param fetch_size: max size of result to fetch. :type fetch_size: int :param hive_conf: hive_conf to execute alone with the hql. :type hive_conf: dict :return: results of hql execution, dict with data (list of results) and header :rtype: dict
python
{ "resource": "" }
q266110
HiveServer2Hook.to_csv
test
def to_csv( self, hql, csv_filepath, schema='default', delimiter=',', lineterminator='\r\n', output_header=True, fetch_size=1000, hive_conf=None): """ Execute hql in target schema and write results to a csv file. :param hql: hql to be executed. :type hql: str or list :param csv_filepath: filepath of csv to write results into. :type csv_filepath: str :param schema: target schema, default to 'default'. :type schema: str :param delimiter: delimiter of the csv file, default to ','. :type delimiter: str :param lineterminator: lineterminator of the csv file. :type lineterminator: str :param output_header: header of the csv file, default to True. :type output_header: bool :param fetch_size: number of result rows to write into the csv file, default to 1000.
python
{ "resource": "" }
q266111
HiveServer2Hook.get_records
test
def get_records(self, hql, schema='default', hive_conf=None): """ Get a set of records from a Hive query. :param hql: hql to be executed. :type hql: str or list :param schema: target schema, default to 'default'. :type schema: str
python
{ "resource": "" }
q266112
HiveServer2Hook.get_pandas_df
test
def get_pandas_df(self, hql, schema='default'): """ Get a pandas dataframe from a Hive query :param hql: hql to be executed. :type hql: str or list :param schema: target schema, default to 'default'. :type schema: str :return: result of hql execution :rtype: DataFrame >>> hh = HiveServer2Hook() >>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100" >>> df = hh.get_pandas_df(sql) >>> len(df.index) 100
python
{ "resource": "" }
q266113
CloudVisionHook.get_conn
test
def get_conn(self): """ Retrieves connection to Cloud Vision. :return: Google Cloud Vision client object.
python
{ "resource": "" }
q266114
DingdingHook._get_endpoint
test
def _get_endpoint(self): """ Get Dingding endpoint for sending message. """ conn = self.get_connection(self.http_conn_id) token = conn.password if not token: raise AirflowException('Dingding token is requests but get nothing, '
python
{ "resource": "" }
q266115
DingdingHook.send
test
def send(self): """ Send Dingding message """ support_type = ['text', 'link', 'markdown', 'actionCard', 'feedCard'] if self.message_type not in support_type: raise ValueError('DingdingWebhookHook only support {} ' 'so far, but receive {}'.format(support_type, self.message_type)) data = self._build_message() self.log.info('Sending Dingding type %s message %s', self.message_type, data) resp = self.run(endpoint=self._get_endpoint(), data=data,
python
{ "resource": "" }
q266116
_bind_parameters
test
def _bind_parameters(operation, parameters): """ Helper method that binds parameters to a SQL query. """ # inspired by MySQL Python Connector (conversion.py) string_parameters = {} for (name, value) in iteritems(parameters): if value is None: string_parameters[name] = 'NULL'
python
{ "resource": "" }
q266117
_escape
test
def _escape(s): """ Helper method that escapes parameters to a SQL query. """ e = s e = e.replace('\\', '\\\\')
python
{ "resource": "" }
q266118
_bq_cast
test
def _bq_cast(string_field, bq_type): """ Helper method that casts a BigQuery row to the appropriate data types. This is useful because BigQuery returns all fields as strings. """ if string_field is None: return None elif bq_type == 'INTEGER': return int(string_field) elif bq_type == 'FLOAT' or bq_type == 'TIMESTAMP': return float(string_field)
python
{ "resource": "" }
q266119
_validate_value
test
def _validate_value(key, value, expected_type): """ function to check expected type and raise error if type is not correct """ if not isinstance(value, expected_type):
python
{ "resource": "" }
q266120
BigQueryHook.get_conn
test
def get_conn(self): """ Returns a BigQuery PEP 249 connection object. """ service = self.get_service() project = self._get_field('project') return BigQueryConnection( service=service, project_id=project,
python
{ "resource": "" }
q266121
BigQueryHook.get_service
test
def get_service(self): """ Returns a BigQuery service object. """ http_authorized = self._authorize() return build(
python
{ "resource": "" }
q266122
BigQueryHook.table_exists
test
def table_exists(self, project_id, dataset_id, table_id): """ Checks for the existence of a table in Google BigQuery. :param project_id: The Google cloud project in which to look for the table. The connection supplied to the hook must provide access to the specified project. :type project_id: str :param dataset_id: The name of the dataset in which to look for the table. :type dataset_id: str :param table_id: The name of the table to check the existence of. :type table_id: str
python
{ "resource": "" }
q266123
BigQueryBaseCursor.create_empty_table
test
def create_empty_table(self, project_id, dataset_id, table_id, schema_fields=None, time_partitioning=None, cluster_fields=None, labels=None, view=None, num_retries=None): """ Creates a new, empty table in the dataset. To create a view, which is defined by a SQL query, parse a dictionary to 'view' kwarg :param project_id: The project to create the table into. :type project_id: str :param dataset_id: The dataset to create the table into. :type dataset_id: str :param table_id: The Name of the table to be created. :type table_id: str :param schema_fields: If set, the schema field list as defined here: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schema :type schema_fields: list :param labels: a dictionary containing labels for the table, passed to BigQuery :type labels: dict **Example**: :: schema_fields=[{"name": "emp_name", "type": "STRING", "mode": "REQUIRED"}, {"name": "salary", "type": "INTEGER", "mode": "NULLABLE"}] :param time_partitioning: configure optional time partitioning fields i.e. partition by field, type and expiration as per API specifications. .. seealso:: https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#timePartitioning :type time_partitioning: dict :param cluster_fields: [Optional] The fields used for clustering. Must be specified with time_partitioning, data in the table will be first partitioned and subsequently clustered. https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#clustering.fields :type cluster_fields: list :param view: [Optional] A dictionary containing definition for the view. If set, it will create a view instead of a table: https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#view :type view: dict **Example**: :: view = { "query": "SELECT * FROM `test-project-id.test_dataset_id.test_table_prefix*` LIMIT 1000", "useLegacySql": False } :return: None """
python
{ "resource": "" }
q266124
BigQueryBaseCursor.patch_table
test
def patch_table(self, dataset_id, table_id, project_id=None, description=None, expiration_time=None, external_data_configuration=None, friendly_name=None, labels=None, schema=None, time_partitioning=None, view=None, require_partition_filter=None): """ Patch information in an existing table. It only updates fileds that are provided in the request object. Reference: https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/patch :param dataset_id: The dataset containing the table to be patched. :type dataset_id: str :param table_id: The Name of the table to be patched. :type table_id: str :param project_id: The project containing the table to be patched. :type project_id: str :param description: [Optional] A user-friendly description of this table. :type description: str :param expiration_time: [Optional] The time when this table expires, in milliseconds since the epoch. :type expiration_time: int :param external_data_configuration: [Optional] A dictionary containing properties of a table stored outside of BigQuery. :type external_data_configuration: dict :param friendly_name: [Optional] A descriptive name for this table. :type friendly_name: str :param labels: [Optional] A dictionary containing labels associated with this table. :type labels: dict :param schema: [Optional] If set, the schema field list as defined here: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schema The supported schema modifications and unsupported schema modification are listed here: https://cloud.google.com/bigquery/docs/managing-table-schemas **Example**: :: schema=[{"name": "emp_name", "type": "STRING", "mode": "REQUIRED"}, {"name": "salary", "type": "INTEGER", "mode": "NULLABLE"}] :type schema: list :param time_partitioning: [Optional] A dictionary containing time-based partitioning definition for the table. :type time_partitioning: dict :param view: [Optional] A dictionary containing definition for the view. If set, it will patch a view instead of a table: https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#view **Example**: :: view = { "query": "SELECT * FROM `test-project-id.test_dataset_id.test_table_prefix*` LIMIT 500", "useLegacySql": False
python
{ "resource": "" }
q266125
BigQueryBaseCursor.cancel_query
test
def cancel_query(self): """ Cancel all started queries that have not yet completed """ jobs = self.service.jobs() if (self.running_job_id and not self.poll_job_complete(self.running_job_id)): self.log.info('Attempting to cancel job : %s, %s', self.project_id, self.running_job_id) if self.location: jobs.cancel( projectId=self.project_id, jobId=self.running_job_id, location=self.location).execute(num_retries=self.num_retries) else: jobs.cancel( projectId=self.project_id, jobId=self.running_job_id).execute(num_retries=self.num_retries) else: self.log.info('No running BigQuery jobs to cancel.') return # Wait for all the calls to cancel to finish max_polling_attempts = 12 polling_attempts = 0 job_complete = False while polling_attempts < max_polling_attempts and not job_complete: polling_attempts = polling_attempts + 1 job_complete = self.poll_job_complete(self.running_job_id)
python
{ "resource": "" }
q266126
BigQueryBaseCursor.run_table_delete
test
def run_table_delete(self, deletion_dataset_table, ignore_if_missing=False): """ Delete an existing table from the dataset; If the table does not exist, return an error unless ignore_if_missing is set to True. :param deletion_dataset_table: A dotted ``(<project>.|<project>:)<dataset>.<table>`` that indicates which table will be deleted. :type deletion_dataset_table: str :param ignore_if_missing: if True, then return success even if the requested table does not exist. :type ignore_if_missing: bool :return: """ deletion_project, deletion_dataset, deletion_table = \ _split_tablename(table_input=deletion_dataset_table, default_project_id=self.project_id) try: self.service.tables() \ .delete(projectId=deletion_project,
python
{ "resource": "" }
q266127
BigQueryBaseCursor.run_table_upsert
test
def run_table_upsert(self, dataset_id, table_resource, project_id=None): """ creates a new, empty table in the dataset; If the table already exists, update the existing table. Since BigQuery does not natively allow table upserts, this is not an atomic operation. :param dataset_id: the dataset to upsert the table into. :type dataset_id: str :param table_resource: a table resource. see https://cloud.google.com/bigquery/docs/reference/v2/tables#resource :type table_resource: dict :param project_id: the project to upsert the table into. If None, project will be self.project_id. :return: """ # check to see if the table exists table_id = table_resource['tableReference']['tableId'] project_id = project_id if project_id is not None else self.project_id tables_list_resp = self.service.tables().list( projectId=project_id, datasetId=dataset_id).execute(num_retries=self.num_retries) while True: for table in tables_list_resp.get('tables', []): if table['tableReference']['tableId'] == table_id: # found the table, do update self.log.info('Table %s:%s.%s exists, updating.', project_id, dataset_id, table_id) return self.service.tables().update( projectId=project_id, datasetId=dataset_id, tableId=table_id, body=table_resource).execute(num_retries=self.num_retries) # If there is a next page, we need to check the next page.
python
{ "resource": "" }
q266128
BigQueryBaseCursor.run_grant_dataset_view_access
test
def run_grant_dataset_view_access(self, source_dataset, view_dataset, view_table, source_project=None, view_project=None): """ Grant authorized view access of a dataset to a view table. If this view has already been granted access to the dataset, do nothing. This method is not atomic. Running it may clobber a simultaneous update. :param source_dataset: the source dataset :type source_dataset: str :param view_dataset: the dataset that the view is in :type view_dataset: str :param view_table: the table of the view :type view_table: str :param source_project: the project of the source dataset. If None, self.project_id will be used. :type source_project: str :param view_project: the project that the view is in. If None, self.project_id will be used. :type view_project: str :return: the datasets resource of the source dataset. """
python
{ "resource": "" }
q266129
BigQueryBaseCursor.get_dataset
test
def get_dataset(self, dataset_id, project_id=None): """ Method returns dataset_resource if dataset exist and raised 404 error if dataset does not exist :param dataset_id: The BigQuery Dataset ID :type dataset_id: str :param project_id: The GCP Project ID :type project_id: str :return: dataset_resource .. seealso:: For more information, see Dataset Resource content: https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource """ if not dataset_id or not isinstance(dataset_id, str): raise ValueError("dataset_id argument must be provided and has "
python
{ "resource": "" }
q266130
BigQueryBaseCursor.get_datasets_list
test
def get_datasets_list(self, project_id=None): """ Method returns full list of BigQuery datasets in the current project .. seealso:: For more information, see: https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list :param project_id: Google Cloud Project for which you try to get all datasets :type project_id: str :return: datasets_list Example of returned datasets_list: :: { "kind":"bigquery#dataset", "location":"US", "id":"your-project:dataset_2_test", "datasetReference":{ "projectId":"your-project", "datasetId":"dataset_2_test" } }, { "kind":"bigquery#dataset", "location":"US", "id":"your-project:dataset_1_test", "datasetReference":{ "projectId":"your-project", "datasetId":"dataset_1_test" }
python
{ "resource": "" }
q266131
BigQueryBaseCursor.insert_all
test
def insert_all(self, project_id, dataset_id, table_id, rows, ignore_unknown_values=False, skip_invalid_rows=False, fail_on_error=False): """ Method to stream data into BigQuery one record at a time without needing to run a load job .. seealso:: For more information, see: https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/insertAll :param project_id: The name of the project where we have the table :type project_id: str :param dataset_id: The name of the dataset where we have the table :type dataset_id: str :param table_id: The name of the table :type table_id: str :param rows: the rows to insert :type rows: list **Example or rows**: rows=[{"json": {"a_key": "a_value_0"}}, {"json": {"a_key": "a_value_1"}}] :param ignore_unknown_values: [Optional] Accept rows that contain values that do not match the schema. The unknown values are ignored. The default value is false, which treats unknown values as errors. :type ignore_unknown_values: bool :param skip_invalid_rows: [Optional] Insert all valid rows of a request, even if invalid rows exist. The default value is false, which causes the entire request to fail if any invalid rows exist. :type skip_invalid_rows: bool :param fail_on_error: [Optional] Force the task to fail if any errors occur. The default value is false, which indicates the task should not fail even if any insertion errors occur. :type fail_on_error: bool """ dataset_project_id = project_id if project_id else self.project_id body = { "rows": rows, "ignoreUnknownValues": ignore_unknown_values, "kind": "bigquery#tableDataInsertAllRequest", "skipInvalidRows": skip_invalid_rows, } try: self.log.info( 'Inserting %s row(s) into Table %s:%s.%s', len(rows), dataset_project_id, dataset_id, table_id
python
{ "resource": "" }
q266132
BigQueryCursor.execute
test
def execute(self, operation, parameters=None): """ Executes a BigQuery query, and returns the job ID. :param operation: The query to execute. :type operation: str :param parameters: Parameters to substitute into the query. :type parameters: dict
python
{ "resource": "" }
q266133
BigQueryCursor.executemany
test
def executemany(self, operation, seq_of_parameters): """ Execute a BigQuery query multiple times with different parameters. :param operation: The query to execute. :type operation: str :param seq_of_parameters: List of dictionary parameters to substitute into the
python
{ "resource": "" }
q266134
BigQueryCursor.next
test
def next(self): """ Helper method for fetchone, which returns the next row from a buffer. If the buffer is empty, attempts to paginate through the result set for the next page, and load it into the buffer. """ if not self.job_id: return None if len(self.buffer) == 0: if self.all_pages_loaded: return None query_results = (self.service.jobs().getQueryResults( projectId=self.project_id, jobId=self.job_id, pageToken=self.page_token).execute(num_retries=self.num_retries)) if 'rows' in query_results and query_results['rows']: self.page_token = query_results.get('pageToken') fields = query_results['schema']['fields'] col_types = [field['type'] for field in fields] rows = query_results['rows'] for dict_row in rows: typed_row = ([
python
{ "resource": "" }
q266135
PostgresToGoogleCloudStorageOperator._query_postgres
test
def _query_postgres(self): """ Queries Postgres and returns a cursor to the results. """ postgres = PostgresHook(postgres_conn_id=self.postgres_conn_id) conn = postgres.get_conn()
python
{ "resource": "" }
q266136
_make_intermediate_dirs
test
def _make_intermediate_dirs(sftp_client, remote_directory): """ Create all the intermediate directories in a remote host :param sftp_client: A Paramiko SFTP client. :param remote_directory: Absolute Path of the directory containing the file :return: """
python
{ "resource": "" }
q266137
SQSHook.create_queue
test
def create_queue(self, queue_name, attributes=None): """ Create queue using connection object :param queue_name: name of the queue. :type queue_name: str :param attributes: additional attributes for the queue (default: None) For details of the attributes parameter see :py:meth:`botocore.client.SQS.create_queue` :type attributes: dict :return: dict
python
{ "resource": "" }
q266138
SQSHook.send_message
test
def send_message(self, queue_url, message_body, delay_seconds=0, message_attributes=None): """ Send message to the queue :param queue_url: queue url :type queue_url: str :param message_body: the contents of the message :type message_body: str :param delay_seconds: seconds to delay the message :type delay_seconds: int :param message_attributes: additional attributes for the message (default: None) For details of the attributes parameter see :py:meth:`botocore.client.SQS.send_message` :type message_attributes: dict :return: dict with the information about the message sent For details of the returned value see
python
{ "resource": "" }
q266139
BaseTaskRunner.run_command
test
def run_command(self, run_with=None, join_args=False): """ Run the task command. :param run_with: list of tokens to run the task command with e.g. ``['bash', '-c']`` :type run_with: list :param join_args: whether to concatenate the list of command tokens e.g. ``['airflow', 'run']`` vs ``['airflow run']`` :param join_args: bool :return: the process that was run :rtype: subprocess.Popen """ run_with = run_with or [] cmd = [" ".join(self._command)] if join_args else self._command full_cmd = run_with + cmd self.log.info('Running: %s', full_cmd) proc = subprocess.Popen( full_cmd,
python
{ "resource": "" }
q266140
BaseTaskRunner.on_finish
test
def on_finish(self): """ A callback that should be called when this is done running. """ if self._cfg_path and os.path.isfile(self._cfg_path):
python
{ "resource": "" }
q266141
_main
test
def _main(): """ Parse options and process commands """ # Parse arguments usage = "usage: nvd3.py [options]" parser = OptionParser(usage=usage, version=("python-nvd3 - Charts generator with
python
{ "resource": "" }
q266142
NVD3Chart.buildhtmlheader
test
def buildhtmlheader(self): """generate HTML header content""" self.htmlheader = '' # If the JavaScript assets have already been injected, don't bother re-sourcing them. global _js_initialized if '_js_initialized' not in globals() or not _js_initialized:
python
{ "resource": "" }
q266143
NVD3Chart.buildcontainer
test
def buildcontainer(self): """generate HTML div""" if self.container: return # Create SVG div with style if self.width: if self.width[-1] != '%': self.style += 'width:%spx;' % self.width else: self.style += 'width:%s;' % self.width if self.height: if self.height[-1] != '%': self.style += 'height:%spx;' % self.height else:
python
{ "resource": "" }
q266144
NVD3Chart.buildjschart
test
def buildjschart(self): """generate javascript code for the chart""" self.jschart = '' # add custom tooltip string in jschart # default condition (if build_custom_tooltip is
python
{ "resource": "" }
q266145
NVD3Chart.create_x_axis
test
def create_x_axis(self, name, label=None, format=None, date=False, custom_format=False): """Create X-axis""" axis = {} if custom_format and format: axis['tickFormat'] = format elif format: if format == 'AM_PM': axis['tickFormat'] = "function(d) { return get_am_pm(parseInt(d)); }" else: axis['tickFormat'] = "d3.format(',%s')" % format if label: axis['axisLabel'] = "'" + label +
python
{ "resource": "" }
q266146
NVD3Chart.create_y_axis
test
def create_y_axis(self, name, label=None, format=None, custom_format=False): """ Create Y-axis """ axis = {} if custom_format and format: axis['tickFormat'] = format elif format:
python
{ "resource": "" }
q266147
SqliteHook.get_conn
test
def get_conn(self): """ Returns a sqlite connection object """
python
{ "resource": "" }
q266148
action_logging
test
def action_logging(f): """ Decorator to log user actions """ @functools.wraps(f) def wrapper(*args, **kwargs): with create_session() as session: if g.user.is_anonymous: user = 'anonymous' else: user = g.user.username log = Log( event=f.__name__, task_instance=None, owner=user, extra=str(list(request.args.items())),
python
{ "resource": "" }
q266149
gzipped
test
def gzipped(f): """ Decorator to make a view compressed """ @functools.wraps(f) def view_func(*args, **kwargs): @after_this_request def zipper(response): accept_encoding = request.headers.get('Accept-Encoding', '') if 'gzip' not in accept_encoding.lower(): return response response.direct_passthrough = False if (response.status_code < 200 or response.status_code >= 300 or
python
{ "resource": "" }
q266150
get_last_dagrun
test
def get_last_dagrun(dag_id, session, include_externally_triggered=False): """ Returns the last dag run for a dag, None if there was none. Last dag run can be any type of run eg. scheduled or backfilled.
python
{ "resource": "" }
q266151
DagModel.create_dagrun
test
def create_dagrun(self, run_id, state, execution_date, start_date=None, external_trigger=False, conf=None, session=None): """ Creates a dag run from this dag including the tasks associated with this dag. Returns the dag run. :param run_id: defines the the run id for this dag run :type run_id: str :param execution_date: the execution date of this dag run :type execution_date: datetime.datetime :param state: the state of the dag run :type state: airflow.utils.state.State :param start_date: the date this dag run should be evaluated :type start_date: datetime.datetime :param external_trigger: whether this dag run is externally triggered
python
{ "resource": "" }
q266152
SQSPublishOperator.execute
test
def execute(self, context): """ Publish the message to SQS queue :param context: the context object :type context: dict :return: dict with information about the message sent For details of the returned dict see :py:meth:`botocore.client.SQS.send_message` :rtype: dict """ hook = SQSHook(aws_conn_id=self.aws_conn_id) result = hook.send_message(queue_url=self.sqs_queue,
python
{ "resource": "" }
q266153
json_response
test
def json_response(obj): """ returns a json response from a json serializable python object """ return Response( response=json.dumps(
python
{ "resource": "" }
q266154
open_maybe_zipped
test
def open_maybe_zipped(f, mode='r'): """ Opens the given file. If the path contains a folder with a .zip suffix, then the folder is treated as a zip archive, opening the file inside the archive. :return: a file object, as in `open`, or as in `ZipFile.open`. """ _, archive, filename = ZIP_REGEX.search(f).groups() if
python
{ "resource": "" }
q266155
make_cache_key
test
def make_cache_key(*args, **kwargs): """ Used by cache to get a unique key per URL """ path = request.path args =
python
{ "resource": "" }
q266156
CloudVideoIntelligenceHook.get_conn
test
def get_conn(self): """ Returns Gcp Video Intelligence Service client :rtype: google.cloud.videointelligence_v1.VideoIntelligenceServiceClient
python
{ "resource": "" }
q266157
CloudVideoIntelligenceHook.annotate_video
test
def annotate_video( self, input_uri=None, input_content=None, features=None, video_context=None, output_uri=None, location=None, retry=None, timeout=None, metadata=None, ): """ Performs video annotation. :param input_uri: Input video location. Currently, only Google Cloud Storage URIs are supported, which must be specified in the following format: ``gs://bucket-id/object-id``. :type input_uri: str :param input_content: The video data bytes. If unset, the input video(s) should be specified via ``input_uri``. If set, ``input_uri`` should be unset. :type input_content: bytes :param features: Requested video annotation features. :type features: list[google.cloud.videointelligence_v1.VideoIntelligenceServiceClient.enums.Feature] :param output_uri: Optional, location where the output (in JSON format) should be stored. Currently, only Google Cloud Storage URIs are supported, which must be specified in the following format: ``gs://bucket-id/object-id``. :type output_uri: str :param video_context: Optional, Additional video context and/or feature-specific parameters. :type video_context: dict or google.cloud.videointelligence_v1.types.VideoContext :param location: Optional, cloud region where annotation should take place. Supported cloud regions: us-east1, us-west1, europe-west1, asia-east1. If no region is specified, a region will be determined based on video file location. :type location: str
python
{ "resource": "" }
q266158
OpsgenieAlertHook._get_api_key
test
def _get_api_key(self): """ Get Opsgenie api_key for creating alert """ conn = self.get_connection(self.http_conn_id)
python
{ "resource": "" }
q266159
OpsgenieAlertHook.get_conn
test
def get_conn(self, headers=None): """ Overwrite HttpHook get_conn because this hook just needs base_url and headers, and does not need generic params :param headers: additional headers to be passed through as a dictionary :type headers: dict """ conn =
python
{ "resource": "" }
q266160
OpsgenieAlertHook.execute
test
def execute(self, payload={}): """ Execute the Opsgenie Alert call :param payload: Opsgenie API Create Alert payload values See https://docs.opsgenie.com/docs/alert-api#section-create-alert
python
{ "resource": "" }
q266161
OpsgenieAlertOperator._build_opsgenie_payload
test
def _build_opsgenie_payload(self): """ Construct the Opsgenie JSON payload. All relevant parameters are combined here to a valid Opsgenie JSON payload. :return: Opsgenie payload (dict) to send """ payload = {} for key in [
python
{ "resource": "" }
q266162
OpsgenieAlertOperator.execute
test
def execute(self, context): """ Call the OpsgenieAlertHook to post message """ self.hook
python
{ "resource": "" }
q266163
AWSAthenaHook.get_conn
test
def get_conn(self): """ check if aws conn exists already or create one and return it :return: boto3 session """ if not self.conn:
python
{ "resource": "" }
q266164
AWSAthenaHook.run_query
test
def run_query(self, query, query_context, result_configuration, client_request_token=None): """ Run Presto query on athena with provided config and return submitted query_execution_id :param query: Presto query to run :type query: str :param query_context: Context in which query need to be run :type query_context: dict :param result_configuration: Dict with path to store results in and config related to encryption :type result_configuration: dict :param client_request_token: Unique token created by user to avoid multiple executions of same query :type client_request_token: str :return: str """
python
{ "resource": "" }
q266165
AWSAthenaHook.check_query_status
test
def check_query_status(self, query_execution_id): """ Fetch the status of submitted athena query. Returns None or one of valid query states. :param query_execution_id: Id of submitted athena query :type query_execution_id: str
python
{ "resource": "" }
q266166
AWSAthenaHook.poll_query_status
test
def poll_query_status(self, query_execution_id, max_tries=None): """ Poll the status of submitted athena query until query state reaches final state. Returns one of the final states :param query_execution_id: Id of submitted athena query :type query_execution_id: str :param max_tries: Number of times to poll for query state before function exits :type max_tries: int :return: str """ try_number = 1 final_query_state = None # Query state when query reaches final state or max_tries reached while True: query_state = self.check_query_status(query_execution_id) if query_state is None: self.log.info('Trial {try_number}: Invalid query state. Retrying again'.format( try_number=try_number)) elif query_state in self.INTERMEDIATE_STATES: self.log.info('Trial {try_number}: Query is still in an intermediate state - {state}' .format(try_number=try_number, state=query_state))
python
{ "resource": "" }
q266167
SFTPHook.get_conn
test
def get_conn(self): """ Returns an SFTP connection object """ if self.conn is None: cnopts = pysftp.CnOpts() if self.no_host_key_check: cnopts.hostkeys = None cnopts.compression = self.compress conn_params = { 'host': self.remote_host, 'port': self.port, 'username': self.username, 'cnopts': cnopts }
python
{ "resource": "" }
q266168
ZendeskHook.__handle_rate_limit_exception
test
def __handle_rate_limit_exception(self, rate_limit_exception): """ Sleep for the time specified in the exception. If not specified, wait for 60 seconds. """ retry_after = int( rate_limit_exception.response.headers.get('Retry-After', 60))
python
{ "resource": "" }
q266169
ZendeskHook.call
test
def call(self, path, query=None, get_all_pages=True, side_loading=False): """ Call Zendesk API and return results :param path: The Zendesk API to call :param query: Query parameters :param get_all_pages: Accumulate results over all pages before returning. Due to strict rate limiting, this can often timeout. Waits for recommended period between tries after a timeout. :param side_loading: Retrieve related records as part of a single request. In order to enable side-loading, add an 'include' query parameter containing a comma-separated list of resources to load. For more information on side-loading see https://developer.zendesk.com/rest_api/docs/core/side_loading """ zendesk = self.get_conn() first_request_successful = False while not first_request_successful: try: results = zendesk.call(path, query) first_request_successful = True except RateLimitError as rle: self.__handle_rate_limit_exception(rle) # Find the key with the results keys = [path.split("/")[-1].split(".json")[0]] next_page = results['next_page'] if side_loading: keys += query['include'].split(',') results = {key: results[key] for key in keys} if get_all_pages: while next_page is not None: try: # Need to split because the next page URL has # `github.zendesk...` # in it, but the call function needs it removed. next_url = next_page.split(self.__url)[1] self.log.info("Calling %s", next_url) more_res = zendesk.call(next_url)
python
{ "resource": "" }
q266170
AwsGlueCatalogHook.get_partitions
test
def get_partitions(self, database_name, table_name, expression='', page_size=None, max_items=None): """ Retrieves the partition values for a table. :param database_name: The name of the catalog database where the partitions reside. :type database_name: str :param table_name: The name of the partitions' table. :type table_name: str :param expression: An expression filtering the partitions to be returned. Please see official AWS documentation for further information. https://docs.aws.amazon.com/glue/latest/dg/aws-glue-api-catalog-partitions.html#aws-glue-api-catalog-partitions-GetPartitions
python
{ "resource": "" }
q266171
AwsGlueCatalogHook.get_table
test
def get_table(self, database_name, table_name): """ Get the information of the table :param database_name: Name of hive database (schema) @table belongs to :type database_name: str :param table_name: Name of hive table :type table_name: str :rtype: dict >>>
python
{ "resource": "" }
q266172
AwsGlueCatalogHook.get_table_location
test
def get_table_location(self, database_name, table_name): """ Get the physical location of the table :param database_name: Name of hive database (schema) @table belongs to :type database_name: str :param table_name: Name of hive table :type
python
{ "resource": "" }
q266173
RedshiftHook.cluster_status
test
def cluster_status(self, cluster_identifier): """ Return status of a cluster :param cluster_identifier: unique identifier of a cluster :type cluster_identifier: str """ conn = self.get_conn() try: response = conn.describe_clusters(
python
{ "resource": "" }
q266174
RedshiftHook.delete_cluster
test
def delete_cluster( self, cluster_identifier, skip_final_cluster_snapshot=True, final_cluster_snapshot_identifier=''): """ Delete a cluster and optionally create a snapshot :param cluster_identifier: unique identifier of a cluster :type cluster_identifier: str :param skip_final_cluster_snapshot: determines cluster snapshot creation :type skip_final_cluster_snapshot: bool :param final_cluster_snapshot_identifier: name of final cluster snapshot
python
{ "resource": "" }
q266175
RedshiftHook.describe_cluster_snapshots
test
def describe_cluster_snapshots(self, cluster_identifier): """ Gets a list of snapshots for a cluster :param cluster_identifier: unique identifier of a cluster :type cluster_identifier: str """ response = self.get_conn().describe_cluster_snapshots( ClusterIdentifier=cluster_identifier ) if 'Snapshots' not in
python
{ "resource": "" }
q266176
RedshiftHook.restore_from_cluster_snapshot
test
def restore_from_cluster_snapshot(self, cluster_identifier, snapshot_identifier): """ Restores a cluster from its snapshot :param cluster_identifier: unique identifier of a cluster :type cluster_identifier: str :param snapshot_identifier: unique identifier for a snapshot of a cluster :type snapshot_identifier: str """
python
{ "resource": "" }
q266177
RedshiftHook.create_cluster_snapshot
test
def create_cluster_snapshot(self, snapshot_identifier, cluster_identifier): """ Creates a snapshot of a cluster :param snapshot_identifier: unique identifier for a snapshot of a cluster :type snapshot_identifier: str :param cluster_identifier: unique identifier of a cluster :type cluster_identifier: str """
python
{ "resource": "" }
q266178
SlackAPIOperator.execute
test
def execute(self, **kwargs): """ SlackAPIOperator calls will not fail even if the call is not unsuccessful. It should not prevent a DAG from completing in success """ if not self.api_params: self.construct_api_call_params()
python
{ "resource": "" }
q266179
EmrHook.create_job_flow
test
def create_job_flow(self, job_flow_overrides): """ Creates a job flow using the config from the EMR connection. Keys of the json extra hash may have the arguments of the boto3 run_job_flow method. Overrides for this config may be passed as the job_flow_overrides. """ if not self.emr_conn_id:
python
{ "resource": "" }
q266180
HdfsSensor.filter_for_filesize
test
def filter_for_filesize(result, size=None): """ Will test the filepath result and test if its size is at least self.filesize :param result: a list of dicts returned by Snakebite ls :param size: the file size in MB a file should be at least to trigger True :return: (bool) depending on the matching criteria """ if size: log = LoggingMixin().log log.debug( 'Filtering for file size >=
python
{ "resource": "" }
q266181
HdfsSensor.filter_for_ignored_ext
test
def filter_for_ignored_ext(result, ignored_ext, ignore_copying): """ Will filter if instructed to do so the result to remove matching criteria :param result: list of dicts returned by Snakebite ls :type result: list[dict] :param ignored_ext: list of ignored extensions :type ignored_ext: list :param ignore_copying: shall we ignore ? :type ignore_copying: bool :return: list of dicts which were not removed :rtype: list[dict] """ if ignore_copying: log = LoggingMixin().log regex_builder = r"^.*\.(%s$)$" % '$|'.join(ignored_ext)
python
{ "resource": "" }
q266182
MongoToS3Operator.execute
test
def execute(self, context): """ Executed by task_instance at runtime """ s3_conn = S3Hook(self.s3_conn_id) # Grab collection and execute query according to whether or not it is a pipeline if self.is_pipeline: results = MongoHook(self.mongo_conn_id).aggregate( mongo_collection=self.mongo_collection, aggregate_query=self.mongo_query,
python
{ "resource": "" }
q266183
get_pool
test
def get_pool(name, session=None): """Get pool by a given name.""" if not (name and name.strip()): raise AirflowBadRequest("Pool name shouldn't be empty")
python
{ "resource": "" }
q266184
create_pool
test
def create_pool(name, slots, description, session=None): """Create a pool with a given parameters.""" if not (name and name.strip()): raise AirflowBadRequest("Pool name shouldn't be empty") try: slots = int(slots) except ValueError: raise AirflowBadRequest("Bad value for `slots`: %s" % slots) session.expire_on_commit = False pool = session.query(Pool).filter_by(pool=name).first() if
python
{ "resource": "" }
q266185
delete_pool
test
def delete_pool(name, session=None): """Delete pool by a given name.""" if not (name and name.strip()): raise AirflowBadRequest("Pool name shouldn't be empty") pool = session.query(Pool).filter_by(pool=name).first() if pool is
python
{ "resource": "" }
q266186
GKEClusterHook._dict_to_proto
test
def _dict_to_proto(py_dict, proto): """ Converts a python dictionary to the proto supplied :param py_dict: The dictionary to convert :type py_dict: dict :param proto: The proto object to merge with dictionary :type proto: protobuf :return: A parsed python dictionary in provided proto format
python
{ "resource": "" }
q266187
GKEClusterHook.wait_for_operation
test
def wait_for_operation(self, operation, project_id=None): """ Given an operation, continuously fetches the status from Google Cloud until either completion or an error occurring :param operation: The Operation to wait for :type operation: google.cloud.container_V1.gapic.enums.Operation :param project_id: Google Cloud Platform project ID :type project_id: str :return: A new, updated operation fetched from Google Cloud """ self.log.info("Waiting for OPERATION_NAME %s", operation.name) time.sleep(OPERATIONAL_POLL_INTERVAL) while operation.status != Operation.Status.DONE: if operation.status == Operation.Status.RUNNING or operation.status == \
python
{ "resource": "" }
q266188
GKEClusterHook.get_operation
test
def get_operation(self, operation_name, project_id=None): """ Fetches the operation from Google Cloud :param operation_name: Name of operation to fetch :type operation_name: str :param project_id: Google Cloud Platform project ID :type project_id: str :return: The new, updated operation from Google Cloud
python
{ "resource": "" }
q266189
GKEClusterHook._append_label
test
def _append_label(cluster_proto, key, val): """ Append labels to provided Cluster Protobuf Labels must fit the regex ``[a-z]([-a-z0-9]*[a-z0-9])?`` (current airflow version string follows semantic versioning spec: x.y.z). :param cluster_proto: The proto to append resource_label airflow
python
{ "resource": "" }
q266190
GKEClusterHook.create_cluster
test
def create_cluster(self, cluster, project_id=None, retry=DEFAULT, timeout=DEFAULT): """ Creates a cluster, consisting of the specified number and type of Google Compute Engine instances. :param cluster: A Cluster protobuf or dict. If dict is provided, it must be of the same form as the protobuf message :class:`google.cloud.container_v1.types.Cluster` :type cluster: dict or google.cloud.container_v1.types.Cluster :param project_id: Google Cloud Platform project ID :type project_id: str :param retry: A retry object (``google.api_core.retry.Retry``) used to retry requests. If None is specified, requests will not be retried. :type retry: google.api_core.retry.Retry :param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :type timeout: float :return: The full url to the new, or existing, cluster :raises: ParseError: On JSON parsing problems when trying to convert dict AirflowException: cluster is not dict type nor Cluster proto type """ if isinstance(cluster, dict): cluster_proto = Cluster() cluster = self._dict_to_proto(py_dict=cluster, proto=cluster_proto) elif not isinstance(cluster,
python
{ "resource": "" }
q266191
GKEClusterHook.get_cluster
test
def get_cluster(self, name, project_id=None, retry=DEFAULT, timeout=DEFAULT): """ Gets details of specified cluster :param name: The name of the cluster to retrieve :type name: str :param project_id: Google Cloud Platform project ID :type project_id: str :param retry: A retry object used to retry requests. If None is specified, requests will not be retried. :type retry: google.api_core.retry.Retry :param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :type timeout: float :return: google.cloud.container_v1.types.Cluster
python
{ "resource": "" }
q266192
DiscordWebhookHook._get_webhook_endpoint
test
def _get_webhook_endpoint(self, http_conn_id, webhook_endpoint): """ Given a Discord http_conn_id, return the default webhook endpoint or override if a webhook_endpoint is manually supplied. :param http_conn_id: The provided connection ID :param webhook_endpoint: The manually provided webhook endpoint :return: Webhook endpoint (str) to use """ if webhook_endpoint: endpoint = webhook_endpoint elif http_conn_id: conn = self.get_connection(http_conn_id) extra = conn.extra_dejson endpoint = extra.get('webhook_endpoint', '') else:
python
{ "resource": "" }
q266193
DiscordWebhookHook._build_discord_payload
test
def _build_discord_payload(self): """ Construct the Discord JSON payload. All relevant parameters are combined here to a valid Discord JSON payload. :return: Discord payload (str) to send """ payload = {} if self.username: payload['username'] = self.username if self.avatar_url: payload['avatar_url'] = self.avatar_url payload['tts'] = self.tts if len(self.message) <= 2000:
python
{ "resource": "" }
q266194
DiscordWebhookHook.execute
test
def execute(self): """ Execute the Discord webhook call """ proxies = {} if self.proxy: # we only need https proxy for Discord proxies = {'https':
python
{ "resource": "" }
q266195
GoogleCloudKMSHook.encrypt
test
def encrypt(self, key_name, plaintext, authenticated_data=None): """ Encrypts a plaintext message using Google Cloud KMS. :param key_name: The Resource Name for the key (or key version) to be used for encyption. Of the form ``projects/*/locations/*/keyRings/*/cryptoKeys/**`` :type key_name: str :param plaintext: The message to be encrypted. :type plaintext: bytes :param authenticated_data: Optional additional authenticated data that must also be provided to decrypt the message. :type authenticated_data: bytes :return: The base 64 encoded ciphertext
python
{ "resource": "" }
q266196
SqoopHook.import_table
test
def import_table(self, table, target_dir=None, append=False, file_type="text", columns=None, split_by=None, where=None, direct=False, driver=None, extra_import_options=None): """ Imports table from remote location to target dir. Arguments are copies of direct sqoop command line arguments :param table: Table to read :param target_dir: HDFS destination dir :param append: Append data to an existing dataset in HDFS :param file_type: "avro", "sequence", "text" or "parquet". Imports data to into the specified format. Defaults to text. :param columns: <col,col,col…> Columns to import from table :param split_by: Column of the table used to split work units :param where: WHERE clause to use during import :param direct: Use direct connector if exists for the
python
{ "resource": "" }
q266197
SqoopHook.import_query
test
def import_query(self, query, target_dir, append=False, file_type="text", split_by=None, direct=None, driver=None, extra_import_options=None): """ Imports a specific query from the rdbms to hdfs :param query: Free format query to run :param target_dir: HDFS destination dir :param append: Append data to an existing dataset in HDFS :param file_type: "avro", "sequence", "text" or "parquet" Imports data to hdfs into the specified format. Defaults to text. :param split_by: Column of the table used to split work units :param direct: Use direct import fast path :param driver: Manually specify JDBC driver class to
python
{ "resource": "" }
q266198
SqoopHook.export_table
test
def export_table(self, table, export_dir, input_null_string, input_null_non_string, staging_table, clear_staging_table, enclosed_by, escaped_by, input_fields_terminated_by, input_lines_terminated_by, input_optionally_enclosed_by, batch, relaxed_isolation, extra_export_options=None): """ Exports Hive table to remote location. Arguments are copies of direct sqoop command line Arguments :param table: Table remote destination :param export_dir: Hive table to export :param input_null_string: The string to be interpreted as null for string columns :param input_null_non_string: The string to be interpreted as null for non-string columns :param staging_table: The table in which data will be staged before being inserted into the destination table :param clear_staging_table: Indicate that any data present in the staging table can be deleted :param enclosed_by: Sets a required field enclosing character :param escaped_by: Sets the escape character :param input_fields_terminated_by: Sets the field separator character :param input_lines_terminated_by: Sets the end-of-line character :param input_optionally_enclosed_by: Sets a field enclosing character :param batch: Use batch mode for underlying statement execution :param relaxed_isolation: Transaction isolation to read uncommitted
python
{ "resource": "" }
q266199
GCPTextToSpeechHook.get_conn
test
def get_conn(self): """ Retrieves connection to Cloud Text to Speech. :return: Google Cloud Text to Speech client object.
python
{ "resource": "" }