_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 31 13.1k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q266000 | S3Hook.load_file | test | def load_file(self,
filename,
key,
bucket_name=None,
replace=False,
encrypt=False):
"""
Loads a local file to S3
:param filename: name of the file to load.
:type filename: str
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which to store the file
:type bucket_name: str
:param replace: A flag to decide whether or not to overwrite the key
if it already exists. If replace is False and the key exists, an
error will be raised.
:type replace: bool
:param encrypt: If True, the file will be encrypted on the server-side
by S3 and will be stored in an encrypted form while at rest in S3.
:type encrypt: bool
"""
if not bucket_name:
| python | {
"resource": ""
} |
q266001 | S3Hook.load_string | test | def load_string(self,
string_data,
key,
bucket_name=None,
replace=False,
encrypt=False,
encoding='utf-8'):
"""
Loads a string to S3
This is provided as a convenience to drop a string in S3. It uses the
boto infrastructure to ship a file to s3.
:param string_data: str to set as content for the key.
:type string_data: str
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which to store the file
:type bucket_name: str
:param replace: A flag to | python | {
"resource": ""
} |
q266002 | S3Hook.load_bytes | test | def load_bytes(self,
bytes_data,
key,
bucket_name=None,
replace=False,
encrypt=False):
"""
Loads bytes to S3
This is provided as a convenience to drop a string in S3. It uses the
boto infrastructure to ship a file to s3.
:param bytes_data: bytes to set as content for the key.
:type bytes_data: bytes
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which to store the file
:type bucket_name: str
:param replace: A flag to decide whether or not to overwrite the key
if it already exists
:type replace: bool
:param encrypt: If True, the file will be encrypted on the server-side
by S3 and will be stored in an encrypted form while at rest in S3.
:type encrypt: bool
"""
if not bucket_name:
| python | {
"resource": ""
} |
q266003 | S3Hook.load_file_obj | test | def load_file_obj(self,
file_obj,
key,
bucket_name=None,
replace=False,
encrypt=False):
"""
Loads a file object to S3
:param file_obj: The file-like object to set as the content for the S3 key.
:type file_obj: file-like object
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which to store the file
:type bucket_name: str | python | {
"resource": ""
} |
q266004 | S3Hook.copy_object | test | def copy_object(self,
source_bucket_key,
dest_bucket_key,
source_bucket_name=None,
dest_bucket_name=None,
source_version_id=None):
"""
Creates a copy of an object that is already stored in S3.
Note: the S3 connection used here needs to have access to both
source and destination bucket/key.
:param source_bucket_key: The key of the source object.
It can be either full s3:// style url or relative path from root level.
When it's specified as a full s3:// url, please omit source_bucket_name.
:type source_bucket_key: str
:param dest_bucket_key: The key of the object to copy to.
The convention to specify `dest_bucket_key` is the same
as `source_bucket_key`.
:type dest_bucket_key: str
:param source_bucket_name: Name of the S3 bucket where the source object is in.
It should be omitted when `source_bucket_key` is provided as a full s3:// url.
:type source_bucket_name: str
:param dest_bucket_name: Name of the S3 bucket to where the object is copied.
It should be omitted when `dest_bucket_key` is provided as a full s3:// url.
:type dest_bucket_name: str
:param source_version_id: Version ID of the source object (OPTIONAL)
:type source_version_id: str
"""
if dest_bucket_name is None:
dest_bucket_name, dest_bucket_key = self.parse_s3_url(dest_bucket_key)
else:
parsed_url = urlparse(dest_bucket_key)
if parsed_url.scheme != '' or parsed_url.netloc != '':
raise AirflowException('If dest_bucket_name is provided, ' +
'dest_bucket_key should be relative path ' +
'from root level, rather than a full s3:// url')
if source_bucket_name is None:
| python | {
"resource": ""
} |
q266005 | CassandraToGoogleCloudStorageOperator._query_cassandra | test | def _query_cassandra(self):
"""
Queries cassandra and returns a cursor to the results.
"""
| python | {
"resource": ""
} |
q266006 | CassandraToGoogleCloudStorageOperator.convert_user_type | test | def convert_user_type(cls, name, value):
"""
Converts a user type to RECORD that contains n fields, where n is the
number of attributes. Each element in the user type class will be converted to its
corresponding data type in BQ.
"""
| python | {
"resource": ""
} |
q266007 | send_email | test | def send_email(to, subject, html_content, files=None, dryrun=False, cc=None,
bcc=None, mime_subtype='mixed', sandbox_mode=False, **kwargs):
"""
Send an email with html content using sendgrid.
To use this plugin:
0. include sendgrid subpackage as part of your Airflow installation, e.g.,
pip install 'apache-airflow[sendgrid]'
1. update [email] backend in airflow.cfg, i.e.,
[email]
email_backend = airflow.contrib.utils.sendgrid.send_email
2. configure Sendgrid specific environment variables at all Airflow instances:
SENDGRID_MAIL_FROM={your-mail-from}
SENDGRID_API_KEY={your-sendgrid-api-key}.
"""
if files is None:
files = []
mail = Mail()
from_email = kwargs.get('from_email') or os.environ.get('SENDGRID_MAIL_FROM')
from_name = kwargs.get('from_name') or os.environ.get('SENDGRID_MAIL_SENDER')
mail.from_email = Email(from_email, from_name)
mail.subject = subject
mail.mail_settings = MailSettings()
if sandbox_mode:
mail.mail_settings.sandbox_mode = SandBoxMode(enable=True)
# Add the recipient list of to emails.
personalization = Personalization()
to = get_email_address_list(to)
for to_address in to:
personalization.add_to(Email(to_address))
if cc:
cc = get_email_address_list(cc)
for cc_address in cc:
personalization.add_cc(Email(cc_address))
if bcc:
bcc = get_email_address_list(bcc)
for bcc_address in bcc:
personalization.add_bcc(Email(bcc_address))
# Add custom_args to personalization if present
| python | {
"resource": ""
} |
q266008 | GCPSpeechToTextHook.get_conn | test | def get_conn(self):
"""
Retrieves connection to Cloud Speech.
:return: Google Cloud Speech client object.
| python | {
"resource": ""
} |
q266009 | GCPSpeechToTextHook.recognize_speech | test | def recognize_speech(self, config, audio, retry=None, timeout=None):
"""
Recognizes audio input
:param config: information to the recognizer that specifies how to process the request.
https://googleapis.github.io/google-cloud-python/latest/speech/gapic/v1/types.html#google.cloud.speech_v1.types.RecognitionConfig
:type config: dict or google.cloud.speech_v1.types.RecognitionConfig
:param audio: audio data to be recognized
https://googleapis.github.io/google-cloud-python/latest/speech/gapic/v1/types.html#google.cloud.speech_v1.types.RecognitionAudio
:type audio: dict or google.cloud.speech_v1.types.RecognitionAudio
:param retry: (Optional) A retry object used to retry requests. If None is specified,
requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: | python | {
"resource": ""
} |
q266010 | SparkSqlOperator.execute | test | def execute(self, context):
"""
Call the SparkSqlHook to run the provided sql query
"""
self._hook = SparkSqlHook(sql=self._sql,
conf=self._conf,
conn_id=self._conn_id,
total_executor_cores=self._total_executor_cores,
executor_cores=self._executor_cores,
executor_memory=self._executor_memory,
keytab=self._keytab,
principal=self._principal,
| python | {
"resource": ""
} |
q266011 | load_entrypoint_plugins | test | def load_entrypoint_plugins(entry_points, airflow_plugins):
"""
Load AirflowPlugin subclasses from the entrypoints
provided. The entry_point group should be 'airflow.plugins'.
:param entry_points: A collection of entrypoints to search for plugins
:type entry_points: Generator[setuptools.EntryPoint, None, None]
:param airflow_plugins: A collection of existing airflow plugins to
ensure we don't load duplicates
:type airflow_plugins: list[type[airflow.plugins_manager.AirflowPlugin]]
:rtype: list[airflow.plugins_manager.AirflowPlugin]
"""
| python | {
"resource": ""
} |
q266012 | is_valid_plugin | test | def is_valid_plugin(plugin_obj, existing_plugins):
"""
Check whether a potential object is a subclass of
the AirflowPlugin class.
:param plugin_obj: potential subclass of AirflowPlugin
:param existing_plugins: Existing list of AirflowPlugin subclasses
:return: Whether or not the obj is a valid subclass of
AirflowPlugin
"""
| python | {
"resource": ""
} |
q266013 | SkipMixin.skip | test | def skip(self, dag_run, execution_date, tasks, session=None):
"""
Sets tasks instances to skipped from the same dag run.
:param dag_run: the DagRun for which to set the tasks to skipped
:param execution_date: execution_date
:param tasks: tasks to skip (not task_ids)
:param session: db session to use
"""
if not tasks:
return
task_ids = [d.task_id for d in tasks]
now = timezone.utcnow()
if dag_run:
session.query(TaskInstance).filter(
TaskInstance.dag_id == dag_run.dag_id,
TaskInstance.execution_date == dag_run.execution_date,
TaskInstance.task_id.in_(task_ids)
).update({TaskInstance.state: State.SKIPPED,
TaskInstance.start_date: now,
TaskInstance.end_date: now},
synchronize_session=False)
session.commit()
else:
| python | {
"resource": ""
} |
q266014 | AzureDataLakeHook.get_conn | test | def get_conn(self):
"""Return a AzureDLFileSystem object."""
conn = self.get_connection(self.conn_id)
service_options = conn.extra_dejson
self.account_name = service_options.get('account_name')
adlCreds = lib.auth(tenant_id=service_options.get('tenant'),
| python | {
"resource": ""
} |
q266015 | AzureDataLakeHook.check_for_file | test | def check_for_file(self, file_path):
"""
Check if a file exists on Azure Data Lake.
:param file_path: Path and name of the file.
:type file_path: str
:return: True if the file exists, False otherwise.
:rtype: bool
"""
| python | {
"resource": ""
} |
q266016 | AzureDataLakeHook.upload_file | test | def upload_file(self, local_path, remote_path, nthreads=64, overwrite=True,
buffersize=4194304, blocksize=4194304):
"""
Upload a file to Azure Data Lake.
:param local_path: local path. Can be single file, directory (in which case,
upload recursively) or glob pattern. Recursive glob patterns using `**`
are not supported.
:type local_path: str
:param remote_path: Remote path to upload to; if multiple files, this is the
directory root to write within.
:type remote_path: str
:param nthreads: Number of threads to use. If None, uses the number of cores.
:type nthreads: int
:param overwrite: Whether to forcibly overwrite existing files/directories.
If False and remote path is a directory, will quit regardless if any files
would be overwritten or not. If True, only matching filenames are actually
overwritten.
:type overwrite: bool
:param buffersize: int [2**22]
Number of bytes for internal buffer. This block cannot be bigger than
a chunk and cannot be smaller than a block.
:type buffersize: | python | {
"resource": ""
} |
q266017 | AzureDataLakeHook.list | test | def list(self, path):
"""
List files in Azure Data Lake Storage
:param path: full path/globstring to use to list files in ADLS
:type path: str
"""
if "*" in | python | {
"resource": ""
} |
q266018 | AWSAthenaOperator.execute | test | def execute(self, context):
"""
Run Presto Query on Athena
"""
self.hook = self.get_hook()
self.hook.get_conn()
self.query_execution_context['Database'] = self.database
self.result_configuration['OutputLocation'] = self.output_location
self.query_execution_id = self.hook.run_query(self.query, self.query_execution_context,
self.result_configuration, self.client_request_token)
query_status = self.hook.poll_query_status(self.query_execution_id, self.max_tries)
if query_status in AWSAthenaHook.FAILURE_STATES:
raise Exception(
'Final state of Athena job is {}, query_execution_id is {}.'
| python | {
"resource": ""
} |
q266019 | uncompress_file | test | def uncompress_file(input_file_name, file_extension, dest_dir):
"""
Uncompress gz and bz2 files
"""
if file_extension.lower() not in ('.gz', '.bz2'):
raise NotImplementedError("Received {} format. Only gz and bz2 "
"files can currently be uncompressed."
| python | {
"resource": ""
} |
q266020 | MsSqlToGoogleCloudStorageOperator._query_mssql | test | def _query_mssql(self):
"""
Queries MSSQL and returns a cursor of results.
:return: mssql cursor
"""
mssql = MsSqlHook(mssql_conn_id=self.mssql_conn_id)
| python | {
"resource": ""
} |
q266021 | action_logging | test | def action_logging(f):
"""
Decorates function to execute function at the same time submitting action_logging
but in CLI context. It will call action logger callbacks twice,
one for pre-execution and the other one for post-execution.
Action logger will be called with below keyword parameters:
sub_command : name of sub-command
start_datetime : start datetime instance by utc
end_datetime : end datetime instance by utc
full_command : full command line arguments
user : current user
log : airflow.models.log.Log ORM instance
dag_id : dag id (optional)
task_id : task_id (optional)
execution_date : execution date (optional)
error : exception instance if there's an exception
:param f: function instance
:return: wrapped function
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
"""
An wrapper for cli functions. It assumes to have Namespace instance
at 1st positional argument
:param args: Positional argument. It assumes to have Namespace instance
at 1st positional argument
:param kwargs: A passthrough keyword argument
"""
| python | {
"resource": ""
} |
q266022 | _build_metrics | test | def _build_metrics(func_name, namespace):
"""
Builds metrics dict from function args
It assumes that function arguments is from airflow.bin.cli module's function
and has Namespace instance where it optionally contains "dag_id", "task_id",
and "execution_date".
:param func_name: name of function
:param namespace: Namespace instance from argparse
:return: dict with metrics
"""
metrics = {'sub_command': func_name, 'start_datetime': datetime.utcnow(),
'full_command': '{}'.format(list(sys.argv)), 'user': getpass.getuser()}
assert isinstance(namespace, Namespace)
tmp_dic = vars(namespace)
metrics['dag_id'] = tmp_dic.get('dag_id')
metrics['task_id'] | python | {
"resource": ""
} |
q266023 | CgroupTaskRunner._create_cgroup | test | def _create_cgroup(self, path):
"""
Create the specified cgroup.
:param path: The path of the cgroup to create.
E.g. cpu/mygroup/mysubgroup
:return: the Node associated with the created cgroup.
:rtype: cgroupspy.nodes.Node
"""
node = trees.Tree().root
path_split = path.split(os.sep)
for path_element in path_split:
name_to_node = {x.name: x for x in node.children}
if path_element not in name_to_node:
self.log.debug("Creating cgroup %s in %s", path_element, node.path)
| python | {
"resource": ""
} |
q266024 | CgroupTaskRunner._delete_cgroup | test | def _delete_cgroup(self, path):
"""
Delete the specified cgroup.
:param path: The path of the cgroup to delete.
E.g. cpu/mygroup/mysubgroup
"""
node = trees.Tree().root
path_split = path.split("/")
for path_element in path_split:
name_to_node = {x.name: x for x in node.children}
if path_element not in name_to_node:
self.log.warning("Cgroup does not exist: %s", path)
| python | {
"resource": ""
} |
q266025 | DatabricksHook._parse_host | test | def _parse_host(host):
"""
The purpose of this function is to be robust to improper connections
settings provided by users, specifically in the host field.
For example -- when users supply ``https://xx.cloud.databricks.com`` as the
host, we must strip out the protocol to get the host.::
h = DatabricksHook()
assert h._parse_host('https://xx.cloud.databricks.com') == \
| python | {
"resource": ""
} |
q266026 | DatabricksHook._do_api_call | test | def _do_api_call(self, endpoint_info, json):
"""
Utility function to perform an API call with retries
:param endpoint_info: Tuple of method and endpoint
:type endpoint_info: tuple[string, string]
:param json: Parameters for this API call.
:type json: dict
:return: If the api call returns a OK status code,
this function returns the response in JSON. Otherwise,
we throw an AirflowException.
:rtype: dict
"""
method, endpoint = endpoint_info
url = 'https://{host}/{endpoint}'.format(
host=self._parse_host(self.databricks_conn.host),
endpoint=endpoint)
if 'token' in self.databricks_conn.extra_dejson:
self.log.info('Using token auth.')
auth = _TokenAuth(self.databricks_conn.extra_dejson['token'])
else:
self.log.info('Using basic auth.')
auth = (self.databricks_conn.login, self.databricks_conn.password)
if method == 'GET':
request_func = requests.get
elif method == 'POST':
request_func = requests.post
else:
raise AirflowException('Unexpected HTTP Method: ' + method)
attempt_num = 1
while True:
try:
response = request_func(
url,
json=json,
auth=auth,
headers=USER_AGENT_HEADER,
timeout=self.timeout_seconds)
response.raise_for_status()
return response.json() | python | {
"resource": ""
} |
q266027 | SalesforceHook.get_conn | test | def get_conn(self):
"""
Sign into Salesforce, only if we are not already signed in.
"""
if not self.conn:
connection = self.get_connection(self.conn_id)
extras = connection.extra_dejson
| python | {
"resource": ""
} |
q266028 | SalesforceHook.make_query | test | def make_query(self, query):
"""
Make a query to Salesforce.
:param query: The query to make to Salesforce.
:type query: str
:return: The query result.
:rtype: dict
"""
conn = self.get_conn()
self.log.info("Querying for all objects")
| python | {
"resource": ""
} |
q266029 | SalesforceHook.describe_object | test | def describe_object(self, obj):
"""
Get the description of an object from Salesforce.
This description is the object's schema and
some extra metadata that Salesforce stores for each object.
:param obj: The name of the Salesforce object that we are getting a description of.
:type | python | {
"resource": ""
} |
q266030 | SalesforceHook.get_available_fields | test | def get_available_fields(self, obj):
"""
Get a list of all available fields for an object.
:param obj: The name of the Salesforce object that we are getting a description of.
:type obj: str
:return: the names of the fields.
| python | {
"resource": ""
} |
q266031 | SalesforceHook.get_object_from_salesforce | test | def get_object_from_salesforce(self, obj, fields):
"""
Get all instances of the `object` from Salesforce.
For each model, only get the fields specified in fields.
All we really do underneath the hood is run:
SELECT <fields> FROM <obj>;
:param obj: The object name to get from Salesforce.
:type obj: str
:param fields: The fields to get from the object.
:type fields: iterable
:return: all instances of the object from Salesforce.
:rtype: dict
| python | {
"resource": ""
} |
q266032 | SalesforceHook._to_timestamp | test | def _to_timestamp(cls, column):
"""
Convert a column of a dataframe to UNIX timestamps if applicable
:param column: A Series object representing a column of a dataframe.
:type column: pd.Series
:return: a new series that maintains the same index as the original
:rtype: pd.Series
"""
# try and convert the column to datetimes
# the column MUST have a four digit year somewhere in the string
# there should be a better way to do this,
# but just letting pandas try and convert every column without a format
# caused it to convert floats as well
# For example, a column of integers
# between 0 and 10 are turned into timestamps
# if the column cannot be converted,
# just return the original column untouched
try:
column = pd.to_datetime(column)
except ValueError:
log = LoggingMixin().log
log.warning("Could not convert field to timestamps: %s", column.name)
| python | {
"resource": ""
} |
q266033 | SalesforceHook.write_object_to_file | test | def write_object_to_file(self,
query_results,
filename,
fmt="csv",
coerce_to_timestamp=False,
record_time_added=False):
"""
Write query results to file.
Acceptable formats are:
- csv:
comma-separated-values file. This is the default format.
- json:
JSON array. Each element in the array is a different row.
- ndjson:
JSON array but each element is new-line delimited instead of comma delimited like in `json`
This requires a significant amount of cleanup.
Pandas doesn't handle output to CSV and json in a uniform way.
This is especially painful for datetime types.
Pandas wants to write them as strings in CSV, but as millisecond Unix timestamps.
By default, this function will try and leave all values as they are represented in Salesforce.
You use the `coerce_to_timestamp` flag to force all datetimes to become Unix timestamps (UTC).
This is can be greatly beneficial as it will make all of your datetime fields look the same,
and makes it easier to work with in other database environments
:param query_results: the results from a SQL query
:type query_results: list of dict
:param filename: the name of the file where the data should be dumped to
:type filename: str
:param fmt: the format you want the output in. Default: 'csv'
:type fmt: str
:param coerce_to_timestamp: True if you want all datetime fields to be converted into Unix timestamps.
False if you want them to be left in the same format as they were in Salesforce.
Leaving the value as False will result in datetimes being strings. Default: False
:type coerce_to_timestamp: bool
:param record_time_added: True if you want to add a Unix timestamp field
to the resulting data that marks when the data was fetched from Salesforce. Default: False
:type record_time_added: bool
:return: the dataframe that gets written to the file.
:rtype: pd.Dataframe
"""
fmt = fmt.lower()
if fmt not in ['csv', 'json', 'ndjson']:
raise ValueError("Format value is not recognized: {}".format(fmt))
# this line right here will convert all integers to floats
# if there are any None/np.nan values in the column
# that's because None/np.nan cannot exist in an integer column
# we should write all of our timestamps as FLOATS in our final schema
df = pd.DataFrame.from_records(query_results, exclude=["attributes"])
df.columns = [column.lower() for column in df.columns]
# convert columns with datetime strings to datetimes
# not all strings will be datetimes, so we ignore any errors that occur
# we get the object's definition at this point and only consider
# features that are DATE or DATETIME
if coerce_to_timestamp and df.shape[0] > 0:
# get the object | python | {
"resource": ""
} |
q266034 | MongoHook.get_conn | test | def get_conn(self):
"""
Fetches PyMongo Client
"""
if self.client is not None:
return self.client
# Mongo Connection Options dict that is unpacked when passed to MongoClient
options = self.extras
# If we are using SSL disable requiring certs from specific hostname
| python | {
"resource": ""
} |
q266035 | MongoHook.get_collection | test | def get_collection(self, mongo_collection, mongo_db=None):
"""
Fetches a mongo collection object for querying.
Uses connection schema as DB | python | {
"resource": ""
} |
q266036 | MongoHook.replace_many | test | def replace_many(self, mongo_collection, docs,
filter_docs=None, mongo_db=None, upsert=False, collation=None,
**kwargs):
"""
Replaces many documents in a mongo collection.
Uses bulk_write with multiple ReplaceOne operations
https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.bulk_write
.. note::
If no ``filter_docs``are given, it is assumed that all
replacement documents contain the ``_id`` field which are then
used as filters.
:param mongo_collection: The name of the collection to update.
:type mongo_collection: str
:param docs: The new documents.
:type docs: list[dict]
:param filter_docs: A list of queries that match the documents to replace.
Can be omitted; then the _id fields from docs will be used.
:type filter_docs: list[dict]
:param mongo_db: The name of the database to use.
Can be omitted; then the database from the connection string is used.
:type mongo_db: str
:param upsert: If ``True``, perform an insert if no documents
match the filters for the replace operation.
:type upsert: bool | python | {
"resource": ""
} |
q266037 | ImapHook.has_mail_attachment | test | def has_mail_attachment(self, name, mail_folder='INBOX', check_regex=False):
"""
Checks the mail folder for mails containing attachments with the given name.
:param name: The name of the attachment that will be searched for.
:type name: str
:param mail_folder: The mail folder where to look at.
:type mail_folder: str
:param check_regex: Checks the name for a regular expression.
:type check_regex: bool
:returns: True if there is an attachment with the given name and False if not.
:rtype: bool
"""
mail_attachments = self._retrieve_mails_attachments_by_name(name,
| python | {
"resource": ""
} |
q266038 | ImapHook.retrieve_mail_attachments | test | def retrieve_mail_attachments(self,
name,
mail_folder='INBOX',
check_regex=False,
latest_only=False,
not_found_mode='raise'):
"""
Retrieves mail's attachments in the mail folder by its name.
:param name: The name of the attachment that will be downloaded.
:type name: str
:param mail_folder: The mail folder where to look at.
:type mail_folder: str
:param check_regex: Checks the name for a regular expression.
:type check_regex: bool
:param latest_only: If set to True it will only retrieve
the first matched attachment.
:type latest_only: bool
:param not_found_mode: Specify what should happen if no attachment has been found.
Supported values are 'raise', 'warn' and 'ignore'.
If it is set to 'raise' it will raise an exception,
| python | {
"resource": ""
} |
q266039 | ImapHook.download_mail_attachments | test | def download_mail_attachments(self,
name,
local_output_directory,
mail_folder='INBOX',
check_regex=False,
latest_only=False,
not_found_mode='raise'):
"""
Downloads mail's attachments in the mail folder by its name to the local directory.
:param name: The name of the attachment that will be downloaded.
:type name: str
:param local_output_directory: The output directory on the local machine
where the files will be downloaded to.
:type local_output_directory: str
:param mail_folder: The mail folder where to look at.
:type mail_folder: str
:param check_regex: Checks the name for a regular expression.
:type check_regex: bool
:param latest_only: If set to True it will only download
the first matched attachment.
:type latest_only: bool
:param not_found_mode: Specify what should happen if no attachment has been found.
| python | {
"resource": ""
} |
q266040 | Mail.get_attachments_by_name | test | def get_attachments_by_name(self, name, check_regex, find_first=False):
"""
Gets all attachments by name for the mail.
:param name: The name of the attachment to look for.
:type name: str
:param check_regex: Checks the name for a regular expression.
:type check_regex: bool
:param find_first: If set to True it will only find the first match and then quit.
:type find_first: bool
:returns: a list of tuples each containing name and payload
| python | {
"resource": ""
} |
q266041 | MailPart.get_file | test | def get_file(self):
"""
Gets the file including name and payload.
:returns: the part's name and payload.
:rtype: tuple
| python | {
"resource": ""
} |
q266042 | AwsFirehoseHook.put_records | test | def put_records(self, records):
"""
Write batch records to Kinesis Firehose
"""
firehose_conn = self.get_conn()
response = firehose_conn.put_record_batch(
| python | {
"resource": ""
} |
q266043 | ReadyToRescheduleDep._get_dep_statuses | test | def _get_dep_statuses(self, ti, session, dep_context):
"""
Determines whether a task is ready to be rescheduled. Only tasks in
NONE state with at least one row in task_reschedule table are
handled by this dependency class, otherwise this dependency is
considered as passed. This dependency fails if the latest reschedule
request's reschedule date is still in future.
"""
if dep_context.ignore_in_reschedule_period:
yield self._passing_status(
reason="The context specified that being in a reschedule period was "
"permitted.")
return
if ti.state not in self.RESCHEDULEABLE_STATES:
yield self._passing_status(
reason="The task instance is not in State_UP_FOR_RESCHEDULE or NONE state.")
return
task_reschedules = TaskReschedule.find_for_task_instance(task_instance=ti)
if not task_reschedules:
yield self._passing_status(
reason="There is no reschedule request for this task instance.")
return
now = timezone.utcnow()
| python | {
"resource": ""
} |
q266044 | send_email | test | def send_email(to, subject, html_content,
files=None, dryrun=False, cc=None, bcc=None,
mime_subtype='mixed', mime_charset='utf-8', **kwargs):
"""
Send email using backend specified in EMAIL_BACKEND.
"""
path, attr = configuration.conf.get('email', 'EMAIL_BACKEND').rsplit('.', 1) | python | {
"resource": ""
} |
q266045 | send_email_smtp | test | def send_email_smtp(to, subject, html_content, files=None,
dryrun=False, cc=None, bcc=None,
mime_subtype='mixed', mime_charset='utf-8',
**kwargs):
"""
Send an email with html content
>>> send_email('test@example.com', 'foo', '<b>Foo</b> bar', ['/dev/null'], dryrun=True)
"""
smtp_mail_from = configuration.conf.get('smtp', 'SMTP_MAIL_FROM')
to = get_email_address_list(to)
msg = MIMEMultipart(mime_subtype)
msg['Subject'] = subject
msg['From'] = smtp_mail_from
msg['To'] = ", ".join(to)
recipients = to
if cc:
cc = get_email_address_list(cc)
msg['CC'] = ", ".join(cc)
recipients = recipients + cc
if bcc:
# don't add bcc in header | python | {
"resource": ""
} |
q266046 | UtcDateTime.process_result_value | test | def process_result_value(self, value, dialect):
"""
Processes DateTimes from the DB making sure it is always
returning UTC. Not using timezone.convert_to_utc as that
converts to configured TIMEZONE while the DB might be
running with some other setting. We assume UTC datetimes
in the database.
| python | {
"resource": ""
} |
q266047 | WasbHook.check_for_blob | test | def check_for_blob(self, container_name, blob_name, **kwargs):
"""
Check if a blob exists on Azure Blob Storage.
:param container_name: Name of the container.
:type container_name: str
:param blob_name: Name of the blob.
:type blob_name: str
:param kwargs: Optional keyword arguments that
| python | {
"resource": ""
} |
q266048 | WasbHook.check_for_prefix | test | def check_for_prefix(self, container_name, prefix, **kwargs):
"""
Check if a prefix exists on Azure Blob storage.
:param container_name: Name of the container.
:type container_name: str
:param prefix: Prefix of the blob.
:type prefix: str
:param kwargs: Optional keyword arguments that
`BlockBlobService.list_blobs()` takes.
| python | {
"resource": ""
} |
q266049 | WasbHook.load_string | test | def load_string(self, string_data, container_name, blob_name, **kwargs):
"""
Upload a string to Azure Blob Storage.
:param string_data: String to load.
:type string_data: str
:param container_name: Name of the container.
:type container_name: str
| python | {
"resource": ""
} |
q266050 | WasbHook.read_file | test | def read_file(self, container_name, blob_name, **kwargs):
"""
Read a file from Azure Blob Storage and return as a string.
:param container_name: Name of the container.
:type container_name: str
:param blob_name: Name of the blob.
:type blob_name: str
:param kwargs: Optional keyword arguments that
| python | {
"resource": ""
} |
q266051 | WasbHook.delete_file | test | def delete_file(self, container_name, blob_name, is_prefix=False,
ignore_if_missing=False, **kwargs):
"""
Delete a file from Azure Blob Storage.
:param container_name: Name of the container.
:type container_name: str
:param blob_name: Name of the blob.
:type blob_name: str
:param is_prefix: If blob_name is a prefix, delete all matching files
:type is_prefix: bool
:param ignore_if_missing: if True, then return success even if the
blob does not exist.
:type ignore_if_missing: bool
:param kwargs: Optional keyword arguments that
`BlockBlobService.create_blob_from_path()` takes.
:type kwargs: object
"""
if is_prefix:
blobs_to_delete = [
blob.name for blob in self.connection.list_blobs(
container_name, prefix=blob_name, **kwargs
)
]
elif self.check_for_blob(container_name, | python | {
"resource": ""
} |
q266052 | mlsd | test | def mlsd(conn, path="", facts=None):
"""
BACKPORT FROM PYTHON3 FTPLIB.
List a directory in a standardized format by using MLSD
command (RFC-3659). If path is omitted the current directory
is assumed. "facts" is a list of strings representing the type
of information desired (e.g. ["type", "size", "perm"]).
Return a generator object yielding a tuple of two elements
for every file found in path.
First element is the file name, the second one is a dictionary
including a variable number of "facts" depending on the server
and whether "facts" argument has been provided.
"""
facts = facts or []
if facts:
conn.sendcmd("OPTS MLST " + ";".join(facts) + ";")
| python | {
"resource": ""
} |
q266053 | FTPHook.get_conn | test | def get_conn(self):
"""
Returns a FTP connection object
"""
if self.conn is None:
params = self.get_connection(self.ftp_conn_id)
pasv = params.extra_dejson.get("passive", True)
| python | {
"resource": ""
} |
q266054 | FTPHook.list_directory | test | def list_directory(self, path, nlst=False):
"""
Returns a list of files on the remote system.
:param path: full path to the remote directory to list
:type path: str
"""
| python | {
"resource": ""
} |
q266055 | FTPHook.retrieve_file | test | def retrieve_file(
self,
remote_full_path,
local_full_path_or_buffer,
callback=None):
"""
Transfers the remote file to a local location.
If local_full_path_or_buffer is a string path, the file will be put
at that location; if it is a file-like buffer, the file will
be written to the buffer but not closed.
:param remote_full_path: full path to the remote file
:type remote_full_path: str
:param local_full_path_or_buffer: full path to the local file or a
file-like buffer
:type local_full_path_or_buffer: str or file-like buffer
:param callback: callback which is called each time a block of data
is read. if you do not use a callback, these blocks will be written
to the file or buffer passed in. if you do pass in a callback, note
that writing to a file or buffer will need to be handled inside the
callback.
[default: output_handle.write()]
:type callback: callable
:Example::
hook = FTPHook(ftp_conn_id='my_conn')
remote_path = '/path/to/remote/file'
local_path = '/path/to/local/file'
| python | {
"resource": ""
} |
q266056 | FTPHook.store_file | test | def store_file(self, remote_full_path, local_full_path_or_buffer):
"""
Transfers a local file to the remote location.
If local_full_path_or_buffer is a string path, the file will be read
from that location; if it is a file-like buffer, the file will
be read from the buffer but not closed.
:param remote_full_path: full path to the remote file
:type remote_full_path: str
:param local_full_path_or_buffer: full path to the local file or a
file-like buffer
:type local_full_path_or_buffer: str or file-like buffer
"""
conn = self.get_conn()
is_path = isinstance(local_full_path_or_buffer, | python | {
"resource": ""
} |
q266057 | FTPHook.get_mod_time | test | def get_mod_time(self, path):
"""
Returns a datetime object representing the last time the file was modified
:param path: remote file path
:type path: string
"""
conn = self.get_conn()
ftp_mdtm = conn.sendcmd('MDTM ' + path)
time_val = ftp_mdtm[4:]
# time_val optionally | python | {
"resource": ""
} |
q266058 | DiscordWebhookOperator.execute | test | def execute(self, context):
"""
Call the DiscordWebhookHook to post message
"""
self.hook = DiscordWebhookHook(
self.http_conn_id,
self.webhook_endpoint,
self.message,
| python | {
"resource": ""
} |
q266059 | AzureFileShareHook.get_conn | test | def get_conn(self):
"""Return the FileService object."""
conn = self.get_connection(self.conn_id)
service_options = conn.extra_dejson
| python | {
"resource": ""
} |
q266060 | AzureFileShareHook.check_for_directory | test | def check_for_directory(self, share_name, directory_name, **kwargs):
"""
Check if a directory exists on Azure File Share.
:param share_name: Name of the share.
:type share_name: str
:param directory_name: Name of the directory.
:type directory_name: str
:param kwargs: Optional keyword arguments that
| python | {
"resource": ""
} |
q266061 | AzureFileShareHook.check_for_file | test | def check_for_file(self, share_name, directory_name, file_name, **kwargs):
"""
Check if a file exists on Azure File Share.
:param share_name: Name of the share.
:type share_name: str
:param directory_name: Name of the directory.
:type directory_name: str
| python | {
"resource": ""
} |
q266062 | AzureFileShareHook.list_directories_and_files | test | def list_directories_and_files(self, share_name, directory_name=None, **kwargs):
"""
Return the list of directories and files stored on a Azure File Share.
:param share_name: Name of the share.
:type share_name: str
:param directory_name: Name of the directory.
:type directory_name: str
| python | {
"resource": ""
} |
q266063 | AzureFileShareHook.create_directory | test | def create_directory(self, share_name, directory_name, **kwargs):
"""
Create a new directory on a Azure File Share.
:param share_name: Name of the share.
:type share_name: str
:param directory_name: Name of the directory.
:type directory_name: str
:param kwargs: Optional keyword arguments that
| python | {
"resource": ""
} |
q266064 | AzureFileShareHook.load_file | test | def load_file(self, file_path, share_name, directory_name, file_name, **kwargs):
"""
Upload a file to Azure File Share.
:param file_path: Path to the file to load.
:type file_path: str
:param share_name: Name of the share.
:type share_name: str
:param directory_name: Name of the | python | {
"resource": ""
} |
q266065 | AzureFileShareHook.load_string | test | def load_string(self, string_data, share_name, directory_name, file_name, **kwargs):
"""
Upload a string to Azure File Share.
:param string_data: String to load.
:type string_data: str
:param share_name: Name of the share.
:type share_name: str
:param directory_name: Name of the | python | {
"resource": ""
} |
q266066 | AzureFileShareHook.load_stream | test | def load_stream(self, stream, share_name, directory_name, file_name, count, **kwargs):
"""
Upload a stream to Azure File Share.
:param stream: Opened file/stream to upload as the file content.
:type stream: file-like
:param share_name: Name of the share.
:type share_name: str
:param directory_name: Name of the directory.
:type directory_name: str
:param file_name: Name of the file.
:type file_name: str
:param count: Size of the stream in bytes
:type count: int
| python | {
"resource": ""
} |
q266067 | GoogleCloudStorageHook.get_conn | test | def get_conn(self):
"""
Returns a Google Cloud Storage service object.
"""
if not self._conn:
| python | {
"resource": ""
} |
q266068 | GoogleCloudStorageHook.copy | test | def copy(self, source_bucket, source_object, destination_bucket=None,
destination_object=None):
"""
Copies an object from a bucket to another, with renaming if requested.
destination_bucket or destination_object can be omitted, in which case
source bucket/object is used, but not both.
:param source_bucket: The bucket of the object to copy from.
:type source_bucket: str
:param source_object: The object to copy.
:type source_object: str
:param destination_bucket: The destination of the object to copied to.
Can be omitted; then the same bucket is used.
:type destination_bucket: str
:param destination_object: The (renamed) path of the object if given.
Can be omitted; then the same name is used.
:type destination_object: str
"""
destination_bucket = destination_bucket or source_bucket
destination_object = destination_object or source_object
if source_bucket == destination_bucket and \
source_object == destination_object:
raise ValueError(
'Either source/destination bucket or source/destination object '
'must be different, not both the same: bucket=%s, object=%s' %
| python | {
"resource": ""
} |
q266069 | GoogleCloudStorageHook.download | test | def download(self, bucket_name, object_name, filename=None):
"""
Get a file from Google Cloud Storage.
:param bucket_name: The bucket to fetch from.
:type bucket_name: str
:param object_name: The object to fetch.
:type object_name: str
| python | {
"resource": ""
} |
q266070 | GoogleCloudStorageHook.upload | test | def upload(self, bucket_name, object_name, filename,
mime_type='application/octet-stream', gzip=False):
"""
Uploads a local file to Google Cloud Storage.
:param bucket_name: The bucket to upload to.
:type bucket_name: str
:param object_name: The object name to set when uploading the local file.
:type object_name: str
:param filename: The local file path to the file to be uploaded.
:type filename: str
:param mime_type: The MIME type to set when uploading the file.
:type mime_type: str
:param gzip: Option to compress file for upload
:type gzip: bool
"""
if gzip:
filename_gz = filename + '.gz'
with open(filename, 'rb') as f_in:
with gz.open(filename_gz, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
| python | {
"resource": ""
} |
q266071 | GoogleCloudStorageHook.exists | test | def exists(self, bucket_name, object_name):
"""
Checks for the existence of a file in Google Cloud Storage.
:param bucket_name: The Google cloud storage bucket where the object is.
:type bucket_name: str
:param object_name: The name of the blob_name to check in the Google cloud
storage bucket. | python | {
"resource": ""
} |
q266072 | GoogleCloudStorageHook.is_updated_after | test | def is_updated_after(self, bucket_name, object_name, ts):
"""
Checks if an blob_name is updated in Google Cloud Storage.
:param bucket_name: The Google cloud storage bucket where the object is.
:type bucket_name: str
:param object_name: The name of the object to check in the Google cloud
storage bucket.
:type object_name: str
:param ts: The timestamp to check against.
:type ts: datetime.datetime
"""
client = self.get_conn()
bucket = storage.Bucket(client=client, name=bucket_name)
blob = bucket.get_blob(blob_name=object_name)
blob.reload()
| python | {
"resource": ""
} |
q266073 | GoogleCloudStorageHook.delete | test | def delete(self, bucket_name, object_name):
"""
Deletes an object from the bucket.
:param bucket_name: name of the bucket, where the object resides
:type bucket_name: str
:param object_name: name of the object to delete
:type object_name: str
"""
| python | {
"resource": ""
} |
q266074 | GoogleCloudStorageHook.list | test | def list(self, bucket_name, versions=None, max_results=None, prefix=None, delimiter=None):
"""
List all objects from the bucket with the give string prefix in name
:param bucket_name: bucket name
:type bucket_name: str
:param versions: if true, list all versions of the objects
:type versions: bool
:param max_results: max count of items to return in a single page of responses
:type max_results: int
:param prefix: prefix string which filters objects whose name begin with
this prefix
:type prefix: str
:param delimiter: filters objects based on the delimiter (for e.g '.csv')
:type delimiter: str
:return: a stream of object names matching the filtering criteria
"""
client = self.get_conn()
bucket = client.get_bucket(bucket_name=bucket_name)
ids = []
pageToken = None
while True:
blobs = bucket.list_blobs(
max_results=max_results,
| python | {
"resource": ""
} |
q266075 | GoogleCloudStorageHook.get_size | test | def get_size(self, bucket_name, object_name):
"""
Gets the size of a file in Google Cloud Storage.
:param bucket_name: The Google cloud storage bucket where the blob_name is.
:type bucket_name: str
:param object_name: The name of the object to check in the Google
cloud storage bucket_name.
:type object_name: str
"""
self.log.info('Checking the file size of object: %s in bucket_name: %s',
object_name,
| python | {
"resource": ""
} |
q266076 | GoogleCloudStorageHook.get_crc32c | test | def get_crc32c(self, bucket_name, object_name):
"""
Gets the CRC32c checksum of an object in Google Cloud Storage.
:param bucket_name: The Google cloud storage bucket where the blob_name is.
:type bucket_name: str
:param object_name: The name of the object to check in the Google cloud
storage bucket_name.
:type object_name: str
"""
self.log.info('Retrieving the crc32c checksum of '
'object_name: %s in bucket_name: %s', object_name, bucket_name)
client | python | {
"resource": ""
} |
q266077 | GoogleCloudStorageHook.get_md5hash | test | def get_md5hash(self, bucket_name, object_name):
"""
Gets the MD5 hash of an object in Google Cloud Storage.
:param bucket_name: The Google cloud storage bucket where the blob_name is.
:type bucket_name: str
:param object_name: The name of the object to check in the Google cloud
storage bucket_name.
:type object_name: str
"""
self.log.info('Retrieving the MD5 hash of '
'object: %s in bucket: %s', object_name, bucket_name)
client | python | {
"resource": ""
} |
q266078 | GoogleCloudStorageHook.create_bucket | test | def create_bucket(self,
bucket_name,
resource=None,
storage_class='MULTI_REGIONAL',
location='US',
project_id=None,
labels=None
):
"""
Creates a new bucket. Google Cloud Storage uses a flat namespace, so
you can't create a bucket with a name that is already in use.
.. seealso::
For more information, see Bucket Naming Guidelines:
https://cloud.google.com/storage/docs/bucketnaming.html#requirements
:param bucket_name: The name of the bucket.
:type bucket_name: str
:param resource: An optional dict with parameters for creating the bucket.
For information on available parameters, see Cloud Storage API doc:
https://cloud.google.com/storage/docs/json_api/v1/buckets/insert
:type resource: dict
:param storage_class: This defines how objects in the bucket are stored
| python | {
"resource": ""
} |
q266079 | GoogleCloudStorageHook.compose | test | def compose(self, bucket_name, source_objects, destination_object):
"""
Composes a list of existing object into a new object in the same storage bucket_name
Currently it only supports up to 32 objects that can be concatenated
in a single operation
https://cloud.google.com/storage/docs/json_api/v1/objects/compose
:param bucket_name: The name of the bucket containing the source objects.
This is also the same bucket to store the composed destination object.
:type bucket_name: str
:param source_objects: The list of source objects that will be composed
| python | {
"resource": ""
} |
q266080 | secondary_training_status_changed | test | def secondary_training_status_changed(current_job_description, prev_job_description):
"""
Returns true if training job's secondary status message has changed.
:param current_job_description: Current job description, returned from DescribeTrainingJob call.
:type current_job_description: dict
:param prev_job_description: Previous job description, returned from DescribeTrainingJob call.
:type prev_job_description: dict
:return: Whether the secondary status message of a training job changed or not.
"""
current_secondary_status_transitions = current_job_description.get('SecondaryStatusTransitions')
if current_secondary_status_transitions is None or len(current_secondary_status_transitions) == 0:
| python | {
"resource": ""
} |
q266081 | secondary_training_status_message | test | def secondary_training_status_message(job_description, prev_description):
"""
Returns a string contains start time and the secondary training job status message.
:param job_description: Returned response from DescribeTrainingJob call
:type job_description: dict
:param prev_description: Previous job description from DescribeTrainingJob call
:type prev_description: dict
:return: Job status string to be printed.
"""
if job_description is None or job_description.get('SecondaryStatusTransitions') is None\
or len(job_description.get('SecondaryStatusTransitions')) == 0:
| python | {
"resource": ""
} |
q266082 | SageMakerHook.tar_and_s3_upload | test | def tar_and_s3_upload(self, path, key, bucket):
"""
Tar the local file or directory and upload to s3
:param path: local file or directory
:type path: str
:param key: s3 key
:type key: str
:param bucket: s3 bucket
:type bucket: str
:return: None
"""
with tempfile.TemporaryFile() as temp_file:
if os.path.isdir(path):
files = [os.path.join(path, name) for name in os.listdir(path)]
else:
| python | {
"resource": ""
} |
q266083 | SageMakerHook.configure_s3_resources | test | def configure_s3_resources(self, config):
"""
Extract the S3 operations from the configuration and execute them.
:param config: config of SageMaker operation
:type config: dict
:rtype: dict
"""
s3_operations = config.pop('S3Operations', None)
if s3_operations is not None:
create_bucket_ops = s3_operations.get('S3CreateBucket', [])
upload_ops = s3_operations.get('S3Upload', [])
for op in create_bucket_ops:
self.s3_hook.create_bucket(bucket_name=op['Bucket'])
for op in upload_ops:
| python | {
"resource": ""
} |
q266084 | SageMakerHook.check_s3_url | test | def check_s3_url(self, s3url):
"""
Check if an S3 URL exists
:param s3url: S3 url
:type s3url: str
:rtype: bool
"""
bucket, key = S3Hook.parse_s3_url(s3url)
if not self.s3_hook.check_for_bucket(bucket_name=bucket):
raise AirflowException(
"The input S3 Bucket {} does not exist ".format(bucket))
if key and not self.s3_hook.check_for_key(key=key, bucket_name=bucket)\
and not self.s3_hook.check_for_prefix(
prefix=key, bucket_name=bucket, delimiter='/'):
# check if s3 key exists in the case user provides a single file
| python | {
"resource": ""
} |
q266085 | SageMakerHook.get_log_conn | test | def get_log_conn(self):
"""
Establish an AWS connection for retrieving logs during training
:rtype: CloudWatchLogs.Client
"""
| python | {
"resource": ""
} |
q266086 | SageMakerHook.create_training_job | test | def create_training_job(self, config, wait_for_completion=True, print_log=True,
check_interval=30, max_ingestion_time=None):
"""
Create a training job
:param config: the config for training
:type config: dict
:param wait_for_completion: if the program should keep running until job finishes
:type wait_for_completion: bool
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:type check_interval: int
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:type max_ingestion_time: int
:return: A response to training job creation
"""
self.check_training_config(config)
response = self.get_conn().create_training_job(**config)
if print_log:
self.check_training_status_with_log(config['TrainingJobName'],
self.non_terminal_states,
self.failed_states,
wait_for_completion,
check_interval, max_ingestion_time
| python | {
"resource": ""
} |
q266087 | SageMakerHook.create_tuning_job | test | def create_tuning_job(self, config, wait_for_completion=True,
check_interval=30, max_ingestion_time=None):
"""
Create a tuning job
:param config: the config for tuning
:type config: dict
:param wait_for_completion: if the program should keep running until job finishes
:type wait_for_completion: bool
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:type check_interval: int
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:type max_ingestion_time: int | python | {
"resource": ""
} |
q266088 | SageMakerHook.create_transform_job | test | def create_transform_job(self, config, wait_for_completion=True,
check_interval=30, max_ingestion_time=None):
"""
Create a transform job
:param config: the config for transform job
:type config: dict
:param wait_for_completion: if the program should keep running until job finishes
:type wait_for_completion: bool
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:type check_interval: int
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:type max_ingestion_time: int | python | {
"resource": ""
} |
q266089 | SageMakerHook.create_endpoint | test | def create_endpoint(self, config, wait_for_completion=True,
check_interval=30, max_ingestion_time=None):
"""
Create an endpoint
:param config: the config for endpoint
:type config: dict
:param wait_for_completion: if the program should keep running until job finishes
:type wait_for_completion: bool
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:type check_interval: int
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:type max_ingestion_time: int | python | {
"resource": ""
} |
q266090 | SageMakerHook.describe_training_job_with_log | test | def describe_training_job_with_log(self, job_name, positions, stream_names,
instance_count, state, last_description,
last_describe_job_call):
"""
Return the training job info associated with job_name and print CloudWatch logs
"""
log_group = '/aws/sagemaker/TrainingJobs'
if len(stream_names) < instance_count:
# Log streams are created whenever a container starts writing to stdout/err, so this list
# may be dynamic until we have a stream for every instance.
logs_conn = self.get_log_conn()
try:
streams = logs_conn.describe_log_streams(
logGroupName=log_group,
logStreamNamePrefix=job_name + '/',
orderBy='LogStreamName',
limit=instance_count
)
stream_names = [s['logStreamName'] for s in streams['logStreams']]
positions.update([(s, Position(timestamp=0, skip=0))
| python | {
"resource": ""
} |
q266091 | SageMakerHook.check_status | test | def check_status(self, job_name, key,
describe_function, check_interval,
max_ingestion_time,
non_terminal_states=None):
"""
Check status of a SageMaker job
:param job_name: name of the job to check status
:type job_name: str
:param key: the key of the response dict
that points to the state
:type key: str
:param describe_function: the function used to retrieve the status
:type describe_function: python callable
:param args: the arguments for the function
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:type check_interval: int
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:type max_ingestion_time: int
:param non_terminal_states: the set of nonterminal states
| python | {
"resource": ""
} |
q266092 | SageMakerHook.check_training_status_with_log | test | def check_training_status_with_log(self, job_name, non_terminal_states, failed_states,
wait_for_completion, check_interval, max_ingestion_time):
"""
Display the logs for a given training job, optionally tailing them until the
job is complete.
:param job_name: name of the training job to check status and display logs for
:type job_name: str
:param non_terminal_states: the set of non_terminal states
:type non_terminal_states: set
:param failed_states: the set of failed states
:type failed_states: set
:param wait_for_completion: Whether to keep looking for new log entries
until the job completes
:type wait_for_completion: bool
:param check_interval: The interval in seconds between polling for new log entries and job completion
:type check_interval: int
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:type max_ingestion_time: int
:return: None
"""
sec = 0
description = self.describe_training_job(job_name)
self.log.info(secondary_training_status_message(description, None))
instance_count = description['ResourceConfig']['InstanceCount']
status = description['TrainingJobStatus']
stream_names = [] # The list of log streams
positions = {} # The current position in each stream, map of stream name -> position
job_already_completed = status not in non_terminal_states
state = LogState.TAILING if wait_for_completion and not job_already_completed else LogState.COMPLETE
# The loop below implements a state machine that alternates between checking the job status and
| python | {
"resource": ""
} |
q266093 | DataFlowPythonOperator.execute | test | def execute(self, context):
"""Execute the python dataflow job."""
bucket_helper = GoogleCloudBucketHelper(
self.gcp_conn_id, self.delegate_to)
self.py_file = bucket_helper.google_cloud_to_local(self.py_file)
hook = DataFlowHook(gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
| python | {
"resource": ""
} |
q266094 | run_migrations_offline | test | def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
| python | {
"resource": ""
} |
q266095 | run_migrations_online | test | def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = settings.engine
with connectable.connect() as connection:
context.configure(
connection=connection,
| python | {
"resource": ""
} |
q266096 | BigtableHook.delete_instance | test | def delete_instance(self, instance_id, project_id=None):
"""
Deletes the specified Cloud Bigtable instance.
Raises google.api_core.exceptions.NotFound if the Cloud Bigtable instance does
not exist.
:param project_id: Optional, Google Cloud Platform project ID where the
BigTable exists. If set to None or missing,
the default project_id from the GCP connection is used.
:type project_id: str
:param instance_id: The ID of the Cloud Bigtable instance.
:type instance_id: str
""" | python | {
"resource": ""
} |
q266097 | BigtableHook.create_instance | test | def create_instance(self,
instance_id,
main_cluster_id,
main_cluster_zone,
project_id=None,
replica_cluster_id=None,
replica_cluster_zone=None,
instance_display_name=None,
instance_type=enums.Instance.Type.TYPE_UNSPECIFIED,
instance_labels=None,
cluster_nodes=None,
cluster_storage_type=enums.StorageType.STORAGE_TYPE_UNSPECIFIED,
timeout=None):
"""
Creates new instance.
:type instance_id: str
:param instance_id: The ID for the new instance.
:type main_cluster_id: str
:param main_cluster_id: The ID for main cluster for the new instance.
:type main_cluster_zone: str
:param main_cluster_zone: The zone for main cluster.
See https://cloud.google.com/bigtable/docs/locations for more details.
:type project_id: str
:param project_id: Optional, Google Cloud Platform project ID where the
BigTable exists. If set to None or missing,
the default project_id from the GCP connection is used.
:type replica_cluster_id: str
:param replica_cluster_id: (optional) The ID for replica cluster for the new
| python | {
"resource": ""
} |
q266098 | BigtableHook.create_table | test | def create_table(instance,
table_id,
initial_split_keys=None,
column_families=None):
"""
Creates the specified Cloud Bigtable table.
Raises ``google.api_core.exceptions.AlreadyExists`` if the table exists.
:type instance: Instance
:param instance: The Cloud Bigtable instance that owns the table.
:type table_id: str
:param table_id: The ID of the table to create in Cloud Bigtable.
:type initial_split_keys: list
:param initial_split_keys: (Optional) A list of row keys in bytes to use to
initially split the table.
:type column_families: dict
| python | {
"resource": ""
} |
q266099 | BigtableHook.delete_table | test | def delete_table(self, instance_id, table_id, project_id=None):
"""
Deletes the specified table in Cloud Bigtable.
Raises google.api_core.exceptions.NotFound if the table does not exist.
:type instance_id: str
:param instance_id: The ID of the Cloud Bigtable instance.
| python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.