_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 75 19.8k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q266200 | GCPTextToSpeechHook.synthesize_speech | test | def synthesize_speech(self, input_data, voice, audio_config, retry=None, timeout=None):
"""
Synthesizes text input
:param input_data: text input to be synthesized. See more:
https://googleapis.github.io/google-cloud-python/latest/texttospeech/gapic/v1/types.html#google.cloud.texttospeech_v1.types.SynthesisInput
:type input_data: dict or google.cloud.texttospeech_v1.types.SynthesisInput
:param voice: configuration of voice to be used in synthesis. See more:
https://googleapis.github.io/google-cloud-python/latest/texttospeech/gapic/v1/types.html#google.cloud.texttospeech_v1.types.VoiceSelectionParams
:type voice: dict or google.cloud.texttospeech_v1.types.VoiceSelectionParams
:param audio_config: configuration of the synthesized audio. See more:
https://googleapis.github.io/google-cloud-python/latest/texttospeech/gapic/v1/types.html#google.cloud.texttospeech_v1.types.AudioConfig
:type audio_config: dict or google.cloud.texttospeech_v1.types.AudioConfig
:return: SynthesizeSpeechResponse See more:
https://googleapis.github.io/google-cloud-python/latest/texttospeech/gapic/v1/types.html#google.cloud.texttospeech_v1.types.SynthesizeSpeechResponse
:rtype: object
:param retry: (Optional) A retry object used to retry requests. If None is specified,
requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete.
Note that if retry is specified, the timeout applies to each individual attempt.
:type timeout: float
"""
client = self.get_conn()
self.log.info("Synthesizing input: %s" % input_data)
return client.synthesize_speech(
input_=input_data, voice=voice, audio_config=audio_config, retry=retry, timeout=timeout
) | python | {
"resource": ""
} |
q266201 | S3TaskHandler.close | test | def close(self):
"""
Close and upload local log file to remote storage S3.
"""
# When application exit, system shuts down all handlers by
# calling close method. Here we check if logger is already
# closed to prevent uploading the log to remote storage multiple
# times when `logging.shutdown` is called.
if self.closed:
return
super().close()
if not self.upload_on_close:
return
local_loc = os.path.join(self.local_base, self.log_relative_path)
remote_loc = os.path.join(self.remote_base, self.log_relative_path)
if os.path.exists(local_loc):
# read log and remove old logs to get just the latest additions
with open(local_loc, 'r') as logfile:
log = logfile.read()
self.s3_write(log, remote_loc)
# Mark closed so we don't double write if close is called twice
self.closed = True | python | {
"resource": ""
} |
q266202 | WorkerConfiguration._get_init_containers | test | def _get_init_containers(self):
"""When using git to retrieve the DAGs, use the GitSync Init Container"""
# If we're using volume claims to mount the dags, no init container is needed
if self.kube_config.dags_volume_claim or \
self.kube_config.dags_volume_host or self.kube_config.dags_in_image:
return []
# Otherwise, define a git-sync init container
init_environment = [{
'name': 'GIT_SYNC_REPO',
'value': self.kube_config.git_repo
}, {
'name': 'GIT_SYNC_BRANCH',
'value': self.kube_config.git_branch
}, {
'name': 'GIT_SYNC_ROOT',
'value': self.kube_config.git_sync_root
}, {
'name': 'GIT_SYNC_DEST',
'value': self.kube_config.git_sync_dest
}, {
'name': 'GIT_SYNC_DEPTH',
'value': '1'
}, {
'name': 'GIT_SYNC_ONE_TIME',
'value': 'true'
}]
if self.kube_config.git_user:
init_environment.append({
'name': 'GIT_SYNC_USERNAME',
'value': self.kube_config.git_user
})
if self.kube_config.git_password:
init_environment.append({
'name': 'GIT_SYNC_PASSWORD',
'value': self.kube_config.git_password
})
volume_mounts = [{
'mountPath': self.kube_config.git_sync_root,
'name': self.dags_volume_name,
'readOnly': False
}]
if self.kube_config.git_ssh_key_secret_name:
volume_mounts.append({
'name': self.git_sync_ssh_secret_volume_name,
'mountPath': '/etc/git-secret/ssh',
'subPath': 'ssh'
})
init_environment.extend([
{
'name': 'GIT_SSH_KEY_FILE',
'value': '/etc/git-secret/ssh'
},
{
'name': 'GIT_SYNC_SSH',
'value': 'true'
}])
if self.kube_config.git_ssh_known_hosts_configmap_name:
volume_mounts.append({
'name': self.git_sync_ssh_known_hosts_volume_name,
'mountPath': '/etc/git-secret/known_hosts',
'subPath': 'known_hosts'
})
init_environment.extend([
{
'name': 'GIT_KNOWN_HOSTS',
'value': 'true'
},
{
'name': 'GIT_SSH_KNOWN_HOSTS_FILE',
'value': '/etc/git-secret/known_hosts'
}
])
else:
init_environment.append({
'name': 'GIT_KNOWN_HOSTS',
'value': 'false'
})
return [{
'name': self.kube_config.git_sync_init_container_name,
'image': self.kube_config.git_sync_container,
'securityContext': {'runAsUser': 65533}, # git-sync user
'env': init_environment,
'volumeMounts': volume_mounts
}] | python | {
"resource": ""
} |
q266203 | WorkerConfiguration._get_environment | test | def _get_environment(self):
"""Defines any necessary environment variables for the pod executor"""
env = {}
for env_var_name, env_var_val in six.iteritems(self.kube_config.kube_env_vars):
env[env_var_name] = env_var_val
env["AIRFLOW__CORE__EXECUTOR"] = "LocalExecutor"
if self.kube_config.airflow_configmap:
env['AIRFLOW_HOME'] = self.worker_airflow_home
env['AIRFLOW__CORE__DAGS_FOLDER'] = self.worker_airflow_dags
if (not self.kube_config.airflow_configmap and
'AIRFLOW__CORE__SQL_ALCHEMY_CONN' not in self.kube_config.kube_secrets):
env['AIRFLOW__CORE__SQL_ALCHEMY_CONN'] = conf.get("core", "SQL_ALCHEMY_CONN")
if self.kube_config.git_dags_folder_mount_point:
# /root/airflow/dags/repo/dags
dag_volume_mount_path = os.path.join(
self.kube_config.git_dags_folder_mount_point,
self.kube_config.git_sync_dest, # repo
self.kube_config.git_subpath # dags
)
env['AIRFLOW__CORE__DAGS_FOLDER'] = dag_volume_mount_path
return env | python | {
"resource": ""
} |
q266204 | WorkerConfiguration._get_secrets | test | def _get_secrets(self):
"""Defines any necessary secrets for the pod executor"""
worker_secrets = []
for env_var_name, obj_key_pair in six.iteritems(self.kube_config.kube_secrets):
k8s_secret_obj, k8s_secret_key = obj_key_pair.split('=')
worker_secrets.append(
Secret('env', env_var_name, k8s_secret_obj, k8s_secret_key)
)
if self.kube_config.env_from_secret_ref:
for secret_ref in self.kube_config.env_from_secret_ref.split(','):
worker_secrets.append(
Secret('env', None, secret_ref)
)
return worker_secrets | python | {
"resource": ""
} |
q266205 | WorkerConfiguration._get_security_context | test | def _get_security_context(self):
"""Defines the security context"""
security_context = {}
if self.kube_config.worker_run_as_user:
security_context['runAsUser'] = self.kube_config.worker_run_as_user
if self.kube_config.worker_fs_group:
security_context['fsGroup'] = self.kube_config.worker_fs_group
# set fs_group to 65533 if not explicitly specified and using git ssh keypair auth
if self.kube_config.git_ssh_key_secret_name and security_context.get('fsGroup') is None:
security_context['fsGroup'] = 65533
return security_context | python | {
"resource": ""
} |
q266206 | QuboleHook.get_extra_links | test | def get_extra_links(self, operator, dttm):
"""
Get link to qubole command result page.
:param operator: operator
:param dttm: datetime
:return: url link
"""
conn = BaseHook.get_connection(operator.kwargs['qubole_conn_id'])
if conn and conn.host:
host = re.sub(r'api$', 'v2/analyze?command_id=', conn.host)
else:
host = 'https://api.qubole.com/v2/analyze?command_id='
ti = TaskInstance(task=operator, execution_date=dttm)
qds_command_id = ti.xcom_pull(task_ids=operator.task_id, key='qbol_cmd_id')
url = host + str(qds_command_id) if qds_command_id else ''
return url | python | {
"resource": ""
} |
q266207 | BaseJob.heartbeat | test | def heartbeat(self):
"""
Heartbeats update the job's entry in the database with a timestamp
for the latest_heartbeat and allows for the job to be killed
externally. This allows at the system level to monitor what is
actually active.
For instance, an old heartbeat for SchedulerJob would mean something
is wrong.
This also allows for any job to be killed externally, regardless
of who is running it or on which machine it is running.
Note that if your heartbeat is set to 60 seconds and you call this
method after 10 seconds of processing since the last heartbeat, it
will sleep 50 seconds to complete the 60 seconds and keep a steady
heart rate. If you go over 60 seconds before calling it, it won't
sleep at all.
"""
try:
with create_session() as session:
job = session.query(BaseJob).filter_by(id=self.id).one()
make_transient(job)
session.commit()
if job.state == State.SHUTDOWN:
self.kill()
is_unit_test = conf.getboolean('core', 'unit_test_mode')
if not is_unit_test:
# Figure out how long to sleep for
sleep_for = 0
if job.latest_heartbeat:
seconds_remaining = self.heartrate - \
(timezone.utcnow() - job.latest_heartbeat)\
.total_seconds()
sleep_for = max(0, seconds_remaining)
sleep(sleep_for)
# Update last heartbeat time
with create_session() as session:
job = session.query(BaseJob).filter(BaseJob.id == self.id).first()
job.latest_heartbeat = timezone.utcnow()
session.merge(job)
session.commit()
self.heartbeat_callback(session=session)
self.log.debug('[heartbeat]')
except OperationalError as e:
self.log.error("Scheduler heartbeat got an exception: %s", str(e)) | python | {
"resource": ""
} |
q266208 | DagFileProcessor._launch_process | test | def _launch_process(result_queue,
file_path,
pickle_dags,
dag_id_white_list,
thread_name,
zombies):
"""
Launch a process to process the given file.
:param result_queue: the queue to use for passing back the result
:type result_queue: multiprocessing.Queue
:param file_path: the file to process
:type file_path: unicode
:param pickle_dags: whether to pickle the DAGs found in the file and
save them to the DB
:type pickle_dags: bool
:param dag_id_white_list: if specified, only examine DAG ID's that are
in this list
:type dag_id_white_list: list[unicode]
:param thread_name: the name to use for the process that is launched
:type thread_name: unicode
:return: the process that was launched
:rtype: multiprocessing.Process
:param zombies: zombie task instances to kill
:type zombies: list[airflow.utils.dag_processing.SimpleTaskInstance]
"""
def helper():
# This helper runs in the newly created process
log = logging.getLogger("airflow.processor")
stdout = StreamLogWriter(log, logging.INFO)
stderr = StreamLogWriter(log, logging.WARN)
set_context(log, file_path)
try:
# redirect stdout/stderr to log
sys.stdout = stdout
sys.stderr = stderr
# Re-configure the ORM engine as there are issues with multiple processes
settings.configure_orm()
# Change the thread name to differentiate log lines. This is
# really a separate process, but changing the name of the
# process doesn't work, so changing the thread name instead.
threading.current_thread().name = thread_name
start_time = time.time()
log.info("Started process (PID=%s) to work on %s",
os.getpid(), file_path)
scheduler_job = SchedulerJob(dag_ids=dag_id_white_list, log=log)
result = scheduler_job.process_file(file_path,
zombies,
pickle_dags)
result_queue.put(result)
end_time = time.time()
log.info(
"Processing %s took %.3f seconds", file_path, end_time - start_time
)
except Exception:
# Log exceptions through the logging framework.
log.exception("Got an exception! Propagating...")
raise
finally:
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
# We re-initialized the ORM within this Process above so we need to
# tear it down manually here
settings.dispose_orm()
p = multiprocessing.Process(target=helper,
args=(),
name="{}-Process".format(thread_name))
p.start()
return p | python | {
"resource": ""
} |
q266209 | DagFileProcessor.start | test | def start(self):
"""
Launch the process and start processing the DAG.
"""
self._process = DagFileProcessor._launch_process(
self._result_queue,
self.file_path,
self._pickle_dags,
self._dag_id_white_list,
"DagFileProcessor{}".format(self._instance_id),
self._zombies)
self._start_time = timezone.utcnow() | python | {
"resource": ""
} |
q266210 | DagFileProcessor.done | test | def done(self):
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
if self._process is None:
raise AirflowException("Tried to see if it's done before starting!")
if self._done:
return True
# In case result queue is corrupted.
if self._result_queue and not self._result_queue.empty():
self._result = self._result_queue.get_nowait()
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
return True
# Potential error case when process dies
if self._result_queue and not self._process.is_alive():
self._done = True
# Get the object from the queue or else join() can hang.
if not self._result_queue.empty():
self._result = self._result_queue.get_nowait()
self.log.debug("Waiting for %s", self._process)
self._process.join()
return True
return False | python | {
"resource": ""
} |
q266211 | SchedulerJob._exit_gracefully | test | def _exit_gracefully(self, signum, frame):
"""
Helper method to clean up processor_agent to avoid leaving orphan processes.
"""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
if self.processor_agent:
self.processor_agent.end()
sys.exit(os.EX_OK) | python | {
"resource": ""
} |
q266212 | SchedulerJob.update_import_errors | test | def update_import_errors(session, dagbag):
"""
For the DAGs in the given DagBag, record any associated import errors and clears
errors for files that no longer have them. These are usually displayed through the
Airflow UI so that users know that there are issues parsing DAGs.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
:param dagbag: DagBag containing DAGs with import errors
:type dagbag: airflow.models.DagBag
"""
# Clear the errors of the processed files
for dagbag_file in dagbag.file_last_changed:
session.query(errors.ImportError).filter(
errors.ImportError.filename == dagbag_file
).delete()
# Add the errors of the processed files
for filename, stacktrace in six.iteritems(dagbag.import_errors):
session.add(errors.ImportError(
filename=filename,
stacktrace=stacktrace))
session.commit() | python | {
"resource": ""
} |
q266213 | SchedulerJob._process_task_instances | test | def _process_task_instances(self, dag, queue, session=None):
"""
This method schedules the tasks for a single DAG by looking at the
active DAG runs and adding task instances that should run to the
queue.
"""
# update the state of the previously active dag runs
dag_runs = DagRun.find(dag_id=dag.dag_id, state=State.RUNNING, session=session)
active_dag_runs = []
for run in dag_runs:
self.log.info("Examining DAG run %s", run)
# don't consider runs that are executed in the future
if run.execution_date > timezone.utcnow():
self.log.error(
"Execution date is in future: %s",
run.execution_date
)
continue
if len(active_dag_runs) >= dag.max_active_runs:
self.log.info("Number of active dag runs reached max_active_run.")
break
# skip backfill dagruns for now as long as they are not really scheduled
if run.is_backfill:
continue
# todo: run.dag is transient but needs to be set
run.dag = dag
# todo: preferably the integrity check happens at dag collection time
run.verify_integrity(session=session)
run.update_state(session=session)
if run.state == State.RUNNING:
make_transient(run)
active_dag_runs.append(run)
for run in active_dag_runs:
self.log.debug("Examining active DAG run: %s", run)
# this needs a fresh session sometimes tis get detached
tis = run.get_task_instances(state=(State.NONE,
State.UP_FOR_RETRY,
State.UP_FOR_RESCHEDULE))
# this loop is quite slow as it uses are_dependencies_met for
# every task (in ti.is_runnable). This is also called in
# update_state above which has already checked these tasks
for ti in tis:
task = dag.get_task(ti.task_id)
# fixme: ti.task is transient but needs to be set
ti.task = task
if ti.are_dependencies_met(
dep_context=DepContext(flag_upstream_failed=True),
session=session):
self.log.debug('Queuing task: %s', ti)
queue.append(ti.key) | python | {
"resource": ""
} |
q266214 | SchedulerJob._change_state_for_tis_without_dagrun | test | def _change_state_for_tis_without_dagrun(self,
simple_dag_bag,
old_states,
new_state,
session=None):
"""
For all DAG IDs in the SimpleDagBag, look for task instances in the
old_states and set them to new_state if the corresponding DagRun
does not exist or exists but is not in the running state. This
normally should not happen, but it can if the state of DagRuns are
changed manually.
:param old_states: examine TaskInstances in this state
:type old_state: list[airflow.utils.state.State]
:param new_state: set TaskInstances to this state
:type new_state: airflow.utils.state.State
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag and with states in the old_state will be examined
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
"""
tis_changed = 0
query = session \
.query(models.TaskInstance) \
.outerjoin(models.DagRun, and_(
models.TaskInstance.dag_id == models.DagRun.dag_id,
models.TaskInstance.execution_date == models.DagRun.execution_date)) \
.filter(models.TaskInstance.dag_id.in_(simple_dag_bag.dag_ids)) \
.filter(models.TaskInstance.state.in_(old_states)) \
.filter(or_(
models.DagRun.state != State.RUNNING,
models.DagRun.state.is_(None)))
if self.using_sqlite:
tis_to_change = query \
.with_for_update() \
.all()
for ti in tis_to_change:
ti.set_state(new_state, session=session)
tis_changed += 1
else:
subq = query.subquery()
tis_changed = session \
.query(models.TaskInstance) \
.filter(and_(
models.TaskInstance.dag_id == subq.c.dag_id,
models.TaskInstance.task_id == subq.c.task_id,
models.TaskInstance.execution_date ==
subq.c.execution_date)) \
.update({models.TaskInstance.state: new_state},
synchronize_session=False)
session.commit()
if tis_changed > 0:
self.log.warning(
"Set %s task instances to state=%s as their associated DagRun was not in RUNNING state",
tis_changed, new_state
) | python | {
"resource": ""
} |
q266215 | SchedulerJob.__get_concurrency_maps | test | def __get_concurrency_maps(self, states, session=None):
"""
Get the concurrency maps.
:param states: List of states to query for
:type states: list[airflow.utils.state.State]
:return: A map from (dag_id, task_id) to # of task instances and
a map from (dag_id, task_id) to # of task instances in the given state list
:rtype: dict[tuple[str, str], int]
"""
TI = models.TaskInstance
ti_concurrency_query = (
session
.query(TI.task_id, TI.dag_id, func.count('*'))
.filter(TI.state.in_(states))
.group_by(TI.task_id, TI.dag_id)
).all()
dag_map = defaultdict(int)
task_map = defaultdict(int)
for result in ti_concurrency_query:
task_id, dag_id, count = result
dag_map[dag_id] += count
task_map[(dag_id, task_id)] = count
return dag_map, task_map | python | {
"resource": ""
} |
q266216 | SchedulerJob._change_state_for_executable_task_instances | test | def _change_state_for_executable_task_instances(self, task_instances,
acceptable_states, session=None):
"""
Changes the state of task instances in the list with one of the given states
to QUEUED atomically, and returns the TIs changed in SimpleTaskInstance format.
:param task_instances: TaskInstances to change the state of
:type task_instances: list[airflow.models.TaskInstance]
:param acceptable_states: Filters the TaskInstances updated to be in these states
:type acceptable_states: Iterable[State]
:rtype: list[airflow.utils.dag_processing.SimpleTaskInstance]
"""
if len(task_instances) == 0:
session.commit()
return []
TI = models.TaskInstance
filter_for_ti_state_change = (
[and_(
TI.dag_id == ti.dag_id,
TI.task_id == ti.task_id,
TI.execution_date == ti.execution_date)
for ti in task_instances])
ti_query = (
session
.query(TI)
.filter(or_(*filter_for_ti_state_change)))
if None in acceptable_states:
ti_query = ti_query.filter(
or_(TI.state == None, TI.state.in_(acceptable_states)) # noqa: E711
)
else:
ti_query = ti_query.filter(TI.state.in_(acceptable_states))
tis_to_set_to_queued = (
ti_query
.with_for_update()
.all())
if len(tis_to_set_to_queued) == 0:
self.log.info("No tasks were able to have their state changed to queued.")
session.commit()
return []
# set TIs to queued state
for task_instance in tis_to_set_to_queued:
task_instance.state = State.QUEUED
task_instance.queued_dttm = (timezone.utcnow()
if not task_instance.queued_dttm
else task_instance.queued_dttm)
session.merge(task_instance)
# Generate a list of SimpleTaskInstance for the use of queuing
# them in the executor.
simple_task_instances = [SimpleTaskInstance(ti) for ti in
tis_to_set_to_queued]
task_instance_str = "\n\t".join(
[repr(x) for x in tis_to_set_to_queued])
session.commit()
self.log.info("Setting the following %s tasks to queued state:\n\t%s",
len(tis_to_set_to_queued), task_instance_str)
return simple_task_instances | python | {
"resource": ""
} |
q266217 | SchedulerJob._enqueue_task_instances_with_queued_state | test | def _enqueue_task_instances_with_queued_state(self, simple_dag_bag,
simple_task_instances):
"""
Takes task_instances, which should have been set to queued, and enqueues them
with the executor.
:param simple_task_instances: TaskInstances to enqueue
:type simple_task_instances: list[SimpleTaskInstance]
:param simple_dag_bag: Should contains all of the task_instances' dags
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
"""
TI = models.TaskInstance
# actually enqueue them
for simple_task_instance in simple_task_instances:
simple_dag = simple_dag_bag.get_dag(simple_task_instance.dag_id)
command = TI.generate_command(
simple_task_instance.dag_id,
simple_task_instance.task_id,
simple_task_instance.execution_date,
local=True,
mark_success=False,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
pool=simple_task_instance.pool,
file_path=simple_dag.full_filepath,
pickle_id=simple_dag.pickle_id)
priority = simple_task_instance.priority_weight
queue = simple_task_instance.queue
self.log.info(
"Sending %s to executor with priority %s and queue %s",
simple_task_instance.key, priority, queue
)
self.executor.queue_command(
simple_task_instance,
command,
priority=priority,
queue=queue) | python | {
"resource": ""
} |
q266218 | SchedulerJob._execute_task_instances | test | def _execute_task_instances(self,
simple_dag_bag,
states,
session=None):
"""
Attempts to execute TaskInstances that should be executed by the scheduler.
There are three steps:
1. Pick TIs by priority with the constraint that they are in the expected states
and that we do exceed max_active_runs or pool limits.
2. Change the state for the TIs above atomically.
3. Enqueue the TIs in the executor.
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag will be fetched from the DB and executed
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
:param states: Execute TaskInstances in these states
:type states: tuple[airflow.utils.state.State]
:return: Number of task instance with state changed.
"""
executable_tis = self._find_executable_task_instances(simple_dag_bag, states,
session=session)
def query(result, items):
simple_tis_with_state_changed = \
self._change_state_for_executable_task_instances(items,
states,
session=session)
self._enqueue_task_instances_with_queued_state(
simple_dag_bag,
simple_tis_with_state_changed)
session.commit()
return result + len(simple_tis_with_state_changed)
return helpers.reduce_in_chunks(query, executable_tis, 0, self.max_tis_per_query) | python | {
"resource": ""
} |
q266219 | SchedulerJob._change_state_for_tasks_failed_to_execute | test | def _change_state_for_tasks_failed_to_execute(self, session):
"""
If there are tasks left over in the executor,
we set them back to SCHEDULED to avoid creating hanging tasks.
:param session: session for ORM operations
"""
if self.executor.queued_tasks:
TI = models.TaskInstance
filter_for_ti_state_change = (
[and_(
TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date,
# The TI.try_number will return raw try_number+1 since the
# ti is not running. And we need to -1 to match the DB record.
TI._try_number == try_number - 1,
TI.state == State.QUEUED)
for dag_id, task_id, execution_date, try_number
in self.executor.queued_tasks.keys()])
ti_query = (session.query(TI)
.filter(or_(*filter_for_ti_state_change)))
tis_to_set_to_scheduled = (ti_query
.with_for_update()
.all())
if len(tis_to_set_to_scheduled) == 0:
session.commit()
return
# set TIs to queued state
for task_instance in tis_to_set_to_scheduled:
task_instance.state = State.SCHEDULED
task_instance_str = "\n\t".join(
[repr(x) for x in tis_to_set_to_scheduled])
session.commit()
self.log.info("Set the following tasks to scheduled state:\n\t%s", task_instance_str) | python | {
"resource": ""
} |
q266220 | SchedulerJob._process_executor_events | test | def _process_executor_events(self, simple_dag_bag, session=None):
"""
Respond to executor events.
"""
# TODO: this shares quite a lot of code with _manage_executor_state
TI = models.TaskInstance
for key, state in list(self.executor.get_event_buffer(simple_dag_bag.dag_ids)
.items()):
dag_id, task_id, execution_date, try_number = key
self.log.info(
"Executor reports execution of %s.%s execution_date=%s "
"exited with status %s for try_number %s",
dag_id, task_id, execution_date, state, try_number
)
if state == State.FAILED or state == State.SUCCESS:
qry = session.query(TI).filter(TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date)
ti = qry.first()
if not ti:
self.log.warning("TaskInstance %s went missing from the database", ti)
continue
# TODO: should we fail RUNNING as well, as we do in Backfills?
if ti.try_number == try_number and ti.state == State.QUEUED:
msg = ("Executor reports task instance {} finished ({}) "
"although the task says its {}. Was the task "
"killed externally?".format(ti, state, ti.state))
self.log.error(msg)
try:
simple_dag = simple_dag_bag.get_dag(dag_id)
dagbag = models.DagBag(simple_dag.full_filepath)
dag = dagbag.get_dag(dag_id)
ti.task = dag.get_task(task_id)
ti.handle_failure(msg)
except Exception:
self.log.error("Cannot load the dag bag to handle failure for %s"
". Setting task to FAILED without callbacks or "
"retries. Do you have enough resources?", ti)
ti.state = State.FAILED
session.merge(ti)
session.commit() | python | {
"resource": ""
} |
q266221 | SchedulerJob.process_file | test | def process_file(self, file_path, zombies, pickle_dags=False, session=None):
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Pickle the DAG and save it to the DB (if necessary).
3. For each DAG, see what tasks should run and create appropriate task
instances in the DB.
4. Record any errors importing the file into ORM
5. Kill (in ORM) any task instances belonging to the DAGs that haven't
issued a heartbeat in a while.
Returns a list of SimpleDag objects that represent the DAGs found in
the file
:param file_path: the path to the Python file that should be executed
:type file_path: unicode
:param zombies: zombie task instances to kill.
:type zombies: list[airflow.utils.dag_processing.SimpleTaskInstance]
:param pickle_dags: whether serialize the DAGs found in the file and
save them to the db
:type pickle_dags: bool
:return: a list of SimpleDags made from the Dags found in the file
:rtype: list[airflow.utils.dag_processing.SimpleDagBag]
"""
self.log.info("Processing file %s for tasks to queue", file_path)
# As DAGs are parsed from this file, they will be converted into SimpleDags
simple_dags = []
try:
dagbag = models.DagBag(file_path, include_examples=False)
except Exception:
self.log.exception("Failed at reloading the DAG file %s", file_path)
Stats.incr('dag_file_refresh_error', 1, 1)
return []
if len(dagbag.dags) > 0:
self.log.info("DAG(s) %s retrieved from %s", dagbag.dags.keys(), file_path)
else:
self.log.warning("No viable dags retrieved from %s", file_path)
self.update_import_errors(session, dagbag)
return []
# Save individual DAGs in the ORM and update DagModel.last_scheduled_time
for dag in dagbag.dags.values():
dag.sync_to_db()
paused_dag_ids = [dag.dag_id for dag in dagbag.dags.values()
if dag.is_paused]
# Pickle the DAGs (if necessary) and put them into a SimpleDag
for dag_id in dagbag.dags:
# Only return DAGs that are not paused
if dag_id not in paused_dag_ids:
dag = dagbag.get_dag(dag_id)
pickle_id = None
if pickle_dags:
pickle_id = dag.pickle(session).id
simple_dags.append(SimpleDag(dag, pickle_id=pickle_id))
if len(self.dag_ids) > 0:
dags = [dag for dag in dagbag.dags.values()
if dag.dag_id in self.dag_ids and
dag.dag_id not in paused_dag_ids]
else:
dags = [dag for dag in dagbag.dags.values()
if not dag.parent_dag and
dag.dag_id not in paused_dag_ids]
# Not using multiprocessing.Queue() since it's no longer a separate
# process and due to some unusual behavior. (empty() incorrectly
# returns true?)
ti_keys_to_schedule = []
self._process_dags(dagbag, dags, ti_keys_to_schedule)
for ti_key in ti_keys_to_schedule:
dag = dagbag.dags[ti_key[0]]
task = dag.get_task(ti_key[1])
ti = models.TaskInstance(task, ti_key[2])
ti.refresh_from_db(session=session, lock_for_update=True)
# We can defer checking the task dependency checks to the worker themselves
# since they can be expensive to run in the scheduler.
dep_context = DepContext(deps=QUEUE_DEPS, ignore_task_deps=True)
# Only schedule tasks that have their dependencies met, e.g. to avoid
# a task that recently got its state changed to RUNNING from somewhere
# other than the scheduler from getting its state overwritten.
# TODO(aoen): It's not great that we have to check all the task instance
# dependencies twice; once to get the task scheduled, and again to actually
# run the task. We should try to come up with a way to only check them once.
if ti.are_dependencies_met(
dep_context=dep_context,
session=session,
verbose=True):
# Task starts out in the scheduled state. All tasks in the
# scheduled state will be sent to the executor
ti.state = State.SCHEDULED
# Also save this task instance to the DB.
self.log.info("Creating / updating %s in ORM", ti)
session.merge(ti)
# commit batch
session.commit()
# Record import errors into the ORM
try:
self.update_import_errors(session, dagbag)
except Exception:
self.log.exception("Error logging import errors!")
try:
dagbag.kill_zombies(zombies)
except Exception:
self.log.exception("Error killing zombies!")
return simple_dags | python | {
"resource": ""
} |
q266222 | BackfillJob._update_counters | test | def _update_counters(self, ti_status):
"""
Updates the counters per state of the tasks that were running. Can re-add
to tasks to run in case required.
:param ti_status: the internal status of the backfill job tasks
:type ti_status: BackfillJob._DagRunTaskStatus
"""
for key, ti in list(ti_status.running.items()):
ti.refresh_from_db()
if ti.state == State.SUCCESS:
ti_status.succeeded.add(key)
self.log.debug("Task instance %s succeeded. Don't rerun.", ti)
ti_status.running.pop(key)
continue
elif ti.state == State.SKIPPED:
ti_status.skipped.add(key)
self.log.debug("Task instance %s skipped. Don't rerun.", ti)
ti_status.running.pop(key)
continue
elif ti.state == State.FAILED:
self.log.error("Task instance %s failed", ti)
ti_status.failed.add(key)
ti_status.running.pop(key)
continue
# special case: if the task needs to run again put it back
elif ti.state == State.UP_FOR_RETRY:
self.log.warning("Task instance %s is up for retry", ti)
ti_status.running.pop(key)
ti_status.to_run[key] = ti
# special case: if the task needs to be rescheduled put it back
elif ti.state == State.UP_FOR_RESCHEDULE:
self.log.warning("Task instance %s is up for reschedule", ti)
ti_status.running.pop(key)
ti_status.to_run[key] = ti
# special case: The state of the task can be set to NONE by the task itself
# when it reaches concurrency limits. It could also happen when the state
# is changed externally, e.g. by clearing tasks from the ui. We need to cover
# for that as otherwise those tasks would fall outside of the scope of
# the backfill suddenly.
elif ti.state == State.NONE:
self.log.warning(
"FIXME: task instance %s state was set to none externally or "
"reaching concurrency limits. Re-adding task to queue.",
ti
)
ti.set_state(State.SCHEDULED)
ti_status.running.pop(key)
ti_status.to_run[key] = ti | python | {
"resource": ""
} |
q266223 | BackfillJob._manage_executor_state | test | def _manage_executor_state(self, running):
"""
Checks if the executor agrees with the state of task instances
that are running
:param running: dict of key, task to verify
"""
executor = self.executor
for key, state in list(executor.get_event_buffer().items()):
if key not in running:
self.log.warning(
"%s state %s not in running=%s",
key, state, running.values()
)
continue
ti = running[key]
ti.refresh_from_db()
self.log.debug("Executor state: %s task %s", state, ti)
if state == State.FAILED or state == State.SUCCESS:
if ti.state == State.RUNNING or ti.state == State.QUEUED:
msg = ("Executor reports task instance {} finished ({}) "
"although the task says its {}. Was the task "
"killed externally?".format(ti, state, ti.state))
self.log.error(msg)
ti.handle_failure(msg) | python | {
"resource": ""
} |
q266224 | BackfillJob._get_dag_run | test | def _get_dag_run(self, run_date, session=None):
"""
Returns a dag run for the given run date, which will be matched to an existing
dag run if available or create a new dag run otherwise. If the max_active_runs
limit is reached, this function will return None.
:param run_date: the execution date for the dag run
:type run_date: datetime.datetime
:param session: the database session object
:type session: sqlalchemy.orm.session.Session
:return: a DagRun in state RUNNING or None
"""
run_id = BackfillJob.ID_FORMAT_PREFIX.format(run_date.isoformat())
# consider max_active_runs but ignore when running subdags
respect_dag_max_active_limit = (True
if (self.dag.schedule_interval and
not self.dag.is_subdag)
else False)
current_active_dag_count = self.dag.get_num_active_runs(external_trigger=False)
# check if we are scheduling on top of a already existing dag_run
# we could find a "scheduled" run instead of a "backfill"
run = DagRun.find(dag_id=self.dag.dag_id,
execution_date=run_date,
session=session)
if run is not None and len(run) > 0:
run = run[0]
if run.state == State.RUNNING:
respect_dag_max_active_limit = False
else:
run = None
# enforce max_active_runs limit for dag, special cases already
# handled by respect_dag_max_active_limit
if (respect_dag_max_active_limit and
current_active_dag_count >= self.dag.max_active_runs):
return None
run = run or self.dag.create_dagrun(
run_id=run_id,
execution_date=run_date,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=False,
session=session,
conf=self.conf,
)
# set required transient field
run.dag = self.dag
# explicitly mark as backfill and running
run.state = State.RUNNING
run.run_id = run_id
run.verify_integrity(session=session)
return run | python | {
"resource": ""
} |
q266225 | BackfillJob._task_instances_for_dag_run | test | def _task_instances_for_dag_run(self, dag_run, session=None):
"""
Returns a map of task instance key to task instance object for the tasks to
run in the given dag run.
:param dag_run: the dag run to get the tasks from
:type dag_run: airflow.models.DagRun
:param session: the database session object
:type session: sqlalchemy.orm.session.Session
"""
tasks_to_run = {}
if dag_run is None:
return tasks_to_run
# check if we have orphaned tasks
self.reset_state_for_orphaned_tasks(filter_by_dag_run=dag_run, session=session)
# for some reason if we don't refresh the reference to run is lost
dag_run.refresh_from_db()
make_transient(dag_run)
# TODO(edgarRd): AIRFLOW-1464 change to batch query to improve perf
for ti in dag_run.get_task_instances():
# all tasks part of the backfill are scheduled to run
if ti.state == State.NONE:
ti.set_state(State.SCHEDULED, session=session)
if ti.state != State.REMOVED:
tasks_to_run[ti.key] = ti
return tasks_to_run | python | {
"resource": ""
} |
q266226 | BackfillJob._execute_for_run_dates | test | def _execute_for_run_dates(self, run_dates, ti_status, executor, pickle_id,
start_date, session=None):
"""
Computes the dag runs and their respective task instances for
the given run dates and executes the task instances.
Returns a list of execution dates of the dag runs that were executed.
:param run_dates: Execution dates for dag runs
:type run_dates: list
:param ti_status: internal BackfillJob status structure to tis track progress
:type ti_status: BackfillJob._DagRunTaskStatus
:param executor: the executor to use, it must be previously started
:type executor: BaseExecutor
:param pickle_id: numeric id of the pickled dag, None if not pickled
:type pickle_id: int
:param start_date: backfill start date
:type start_date: datetime.datetime
:param session: the current session object
:type session: sqlalchemy.orm.session.Session
"""
for next_run_date in run_dates:
dag_run = self._get_dag_run(next_run_date, session=session)
tis_map = self._task_instances_for_dag_run(dag_run,
session=session)
if dag_run is None:
continue
ti_status.active_runs.append(dag_run)
ti_status.to_run.update(tis_map or {})
processed_dag_run_dates = self._process_backfill_task_instances(
ti_status=ti_status,
executor=executor,
pickle_id=pickle_id,
start_date=start_date,
session=session)
ti_status.executed_dag_run_dates.update(processed_dag_run_dates) | python | {
"resource": ""
} |
q266227 | BackfillJob._set_unfinished_dag_runs_to_failed | test | def _set_unfinished_dag_runs_to_failed(self, dag_runs, session=None):
"""
Go through the dag_runs and update the state based on the task_instance state.
Then set DAG runs that are not finished to failed.
:param dag_runs: DAG runs
:param session: session
:return: None
"""
for dag_run in dag_runs:
dag_run.update_state()
if dag_run.state not in State.finished():
dag_run.set_state(State.FAILED)
session.merge(dag_run) | python | {
"resource": ""
} |
q266228 | BackfillJob._execute | test | def _execute(self, session=None):
"""
Initializes all components required to run a dag for a specified date range and
calls helper method to execute the tasks.
"""
ti_status = BackfillJob._DagRunTaskStatus()
start_date = self.bf_start_date
# Get intervals between the start/end dates, which will turn into dag runs
run_dates = self.dag.get_run_dates(start_date=start_date,
end_date=self.bf_end_date)
if self.run_backwards:
tasks_that_depend_on_past = [t.task_id for t in self.dag.task_dict.values() if t.depends_on_past]
if tasks_that_depend_on_past:
raise AirflowException(
'You cannot backfill backwards because one or more tasks depend_on_past: {}'.format(
",".join(tasks_that_depend_on_past)))
run_dates = run_dates[::-1]
if len(run_dates) == 0:
self.log.info("No run dates were found for the given dates and dag interval.")
return
# picklin'
pickle_id = None
if not self.donot_pickle and self.executor.__class__ not in (
executors.LocalExecutor, executors.SequentialExecutor):
pickle = DagPickle(self.dag)
session.add(pickle)
session.commit()
pickle_id = pickle.id
executor = self.executor
executor.start()
ti_status.total_runs = len(run_dates) # total dag runs in backfill
try:
remaining_dates = ti_status.total_runs
while remaining_dates > 0:
dates_to_process = [run_date for run_date in run_dates
if run_date not in ti_status.executed_dag_run_dates]
self._execute_for_run_dates(run_dates=dates_to_process,
ti_status=ti_status,
executor=executor,
pickle_id=pickle_id,
start_date=start_date,
session=session)
remaining_dates = (
ti_status.total_runs - len(ti_status.executed_dag_run_dates)
)
err = self._collect_errors(ti_status=ti_status, session=session)
if err:
raise AirflowException(err)
if remaining_dates > 0:
self.log.info(
"max_active_runs limit for dag %s has been reached "
" - waiting for other dag runs to finish",
self.dag_id
)
time.sleep(self.delay_on_limit_secs)
except (KeyboardInterrupt, SystemExit):
self.log.warning("Backfill terminated by user.")
# TODO: we will need to terminate running task instances and set the
# state to failed.
self._set_unfinished_dag_runs_to_failed(ti_status.active_runs)
finally:
session.commit()
executor.end()
self.log.info("Backfill done. Exiting.") | python | {
"resource": ""
} |
q266229 | LocalTaskJob.heartbeat_callback | test | def heartbeat_callback(self, session=None):
"""Self destruct task if state has been moved away from running externally"""
if self.terminating:
# ensure termination if processes are created later
self.task_runner.terminate()
return
self.task_instance.refresh_from_db()
ti = self.task_instance
fqdn = get_hostname()
same_hostname = fqdn == ti.hostname
same_process = ti.pid == os.getpid()
if ti.state == State.RUNNING:
if not same_hostname:
self.log.warning("The recorded hostname %s "
"does not match this instance's hostname "
"%s", ti.hostname, fqdn)
raise AirflowException("Hostname of job runner does not match")
elif not same_process:
current_pid = os.getpid()
self.log.warning("Recorded pid %s does not match "
"the current pid %s", ti.pid, current_pid)
raise AirflowException("PID of job runner does not match")
elif (
self.task_runner.return_code() is None and
hasattr(self.task_runner, 'process')
):
self.log.warning(
"State of this instance has been externally set to %s. "
"Taking the poison pill.",
ti.state
)
self.task_runner.terminate()
self.terminating = True | python | {
"resource": ""
} |
q266230 | CloudSpannerHook._get_client | test | def _get_client(self, project_id):
"""
Provides a client for interacting with the Cloud Spanner API.
:param project_id: The ID of the GCP project.
:type project_id: str
:return: google.cloud.spanner_v1.client.Client
:rtype: object
"""
if not self._client:
self._client = Client(project=project_id, credentials=self._get_credentials())
return self._client | python | {
"resource": ""
} |
q266231 | CloudSpannerHook.get_instance | test | def get_instance(self, instance_id, project_id=None):
"""
Gets information about a particular instance.
:param project_id: Optional, The ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:return: google.cloud.spanner_v1.instance.Instance
:rtype: object
"""
instance = self._get_client(project_id=project_id).instance(instance_id=instance_id)
if not instance.exists():
return None
return instance | python | {
"resource": ""
} |
q266232 | CloudSpannerHook._apply_to_instance | test | def _apply_to_instance(self, project_id, instance_id, configuration_name, node_count,
display_name, func):
"""
Invokes a method on a given instance by applying a specified Callable.
:param project_id: The ID of the GCP project that owns the Cloud Spanner
database.
:type project_id: str
:param instance_id: The ID of the instance.
:type instance_id: str
:param configuration_name: Name of the instance configuration defining how the
instance will be created. Required for instances which do not yet exist.
:type configuration_name: str
:param node_count: (Optional) Number of nodes allocated to the instance.
:type node_count: int
:param display_name: (Optional) The display name for the instance in the Cloud
Console UI. (Must be between 4 and 30 characters.) If this value is not set
in the constructor, will fall back to the instance ID.
:type display_name: str
:param func: Method of the instance to be called.
:type func: Callable
"""
# noinspection PyUnresolvedReferences
instance = self._get_client(project_id=project_id).instance(
instance_id=instance_id, configuration_name=configuration_name,
node_count=node_count, display_name=display_name)
try:
operation = func(instance) # type: Operation
except GoogleAPICallError as e:
self.log.error('An error occurred: %s. Exiting.', e.message)
raise e
if operation:
result = operation.result()
self.log.info(result) | python | {
"resource": ""
} |
q266233 | CloudSpannerHook.create_instance | test | def create_instance(self, instance_id, configuration_name, node_count,
display_name, project_id=None):
"""
Creates a new Cloud Spanner instance.
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:param configuration_name: The name of the instance configuration defining how the
instance will be created. Possible configuration values can be retrieved via
https://cloud.google.com/spanner/docs/reference/rest/v1/projects.instanceConfigs/list
:type configuration_name: str
:param node_count: (Optional) The number of nodes allocated to the Cloud Spanner
instance.
:type node_count: int
:param display_name: (Optional) The display name for the instance in the GCP
Console. Must be between 4 and 30 characters. If this value is not set in
the constructor, the name falls back to the instance ID.
:type display_name: str
:param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: None
"""
self._apply_to_instance(project_id, instance_id, configuration_name,
node_count, display_name, lambda x: x.create()) | python | {
"resource": ""
} |
q266234 | CloudSpannerHook.update_instance | test | def update_instance(self, instance_id, configuration_name, node_count,
display_name, project_id=None):
"""
Updates an existing Cloud Spanner instance.
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:param configuration_name: The name of the instance configuration defining how the
instance will be created. Possible configuration values can be retrieved via
https://cloud.google.com/spanner/docs/reference/rest/v1/projects.instanceConfigs/list
:type configuration_name: str
:param node_count: (Optional) The number of nodes allocated to the Cloud Spanner
instance.
:type node_count: int
:param display_name: (Optional) The display name for the instance in the GCP
Console. Must be between 4 and 30 characters. If this value is not set in
the constructor, the name falls back to the instance ID.
:type display_name: str
:param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: None
"""
return self._apply_to_instance(project_id, instance_id, configuration_name,
node_count, display_name, lambda x: x.update()) | python | {
"resource": ""
} |
q266235 | CloudSpannerHook.delete_instance | test | def delete_instance(self, instance_id, project_id=None):
"""
Deletes an existing Cloud Spanner instance.
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: None
"""
instance = self._get_client(project_id=project_id).instance(instance_id)
try:
instance.delete()
return
except GoogleAPICallError as e:
self.log.error('An error occurred: %s. Exiting.', e.message)
raise e | python | {
"resource": ""
} |
q266236 | CloudSpannerHook.get_database | test | def get_database(self, instance_id, database_id, project_id=None):
"""
Retrieves a database in Cloud Spanner. If the database does not exist
in the specified instance, it returns None.
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:param database_id: The ID of the database in Cloud Spanner.
:type database_id: str
:param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: Database object or None if database does not exist
:rtype: google.cloud.spanner_v1.database.Database or None
"""
instance = self._get_client(project_id=project_id).instance(
instance_id=instance_id)
if not instance.exists():
raise AirflowException("The instance {} does not exist in project {} !".
format(instance_id, project_id))
database = instance.database(database_id=database_id)
if not database.exists():
return None
else:
return database | python | {
"resource": ""
} |
q266237 | CloudSpannerHook.create_database | test | def create_database(self, instance_id, database_id, ddl_statements, project_id=None):
"""
Creates a new database in Cloud Spanner.
:type project_id: str
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:param database_id: The ID of the database to create in Cloud Spanner.
:type database_id: str
:param ddl_statements: The string list containing DDL for the new database.
:type ddl_statements: list[str]
:param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:return: None
"""
instance = self._get_client(project_id=project_id).instance(
instance_id=instance_id)
if not instance.exists():
raise AirflowException("The instance {} does not exist in project {} !".
format(instance_id, project_id))
database = instance.database(database_id=database_id,
ddl_statements=ddl_statements)
try:
operation = database.create() # type: Operation
except GoogleAPICallError as e:
self.log.error('An error occurred: %s. Exiting.', e.message)
raise e
if operation:
result = operation.result()
self.log.info(result)
return | python | {
"resource": ""
} |
q266238 | CloudSpannerHook.update_database | test | def update_database(self, instance_id, database_id, ddl_statements,
project_id=None,
operation_id=None):
"""
Updates DDL of a database in Cloud Spanner.
:type project_id: str
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:param database_id: The ID of the database in Cloud Spanner.
:type database_id: str
:param ddl_statements: The string list containing DDL for the new database.
:type ddl_statements: list[str]
:param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:param operation_id: (Optional) The unique per database operation ID that can be
specified to implement idempotency check.
:type operation_id: str
:return: None
"""
instance = self._get_client(project_id=project_id).instance(
instance_id=instance_id)
if not instance.exists():
raise AirflowException("The instance {} does not exist in project {} !".
format(instance_id, project_id))
database = instance.database(database_id=database_id)
try:
operation = database.update_ddl(
ddl_statements=ddl_statements, operation_id=operation_id)
if operation:
result = operation.result()
self.log.info(result)
return
except AlreadyExists as e:
if e.code == 409 and operation_id in e.message:
self.log.info("Replayed update_ddl message - the operation id %s "
"was already done before.", operation_id)
return
except GoogleAPICallError as e:
self.log.error('An error occurred: %s. Exiting.', e.message)
raise e | python | {
"resource": ""
} |
q266239 | CloudSpannerHook.delete_database | test | def delete_database(self, instance_id, database_id, project_id=None):
"""
Drops a database in Cloud Spanner.
:type project_id: str
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:param database_id: The ID of the database in Cloud Spanner.
:type database_id: str
:param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:return: True if everything succeeded
:rtype: bool
"""
instance = self._get_client(project_id=project_id).\
instance(instance_id=instance_id)
if not instance.exists():
raise AirflowException("The instance {} does not exist in project {} !".
format(instance_id, project_id))
database = instance.database(database_id=database_id)
if not database.exists():
self.log.info("The database {} is already deleted from instance {}. "
"Exiting.".format(database_id, instance_id))
return
try:
operation = database.drop() # type: Operation
except GoogleAPICallError as e:
self.log.error('An error occurred: %s. Exiting.', e.message)
raise e
if operation:
result = operation.result()
self.log.info(result)
return | python | {
"resource": ""
} |
q266240 | ImapAttachmentSensor.poke | test | def poke(self, context):
"""
Pokes for a mail attachment on the mail server.
:param context: The context that is being provided when poking.
:type context: dict
:return: True if attachment with the given name is present and False if not.
:rtype: bool
"""
self.log.info('Poking for %s', self.attachment_name)
with ImapHook(imap_conn_id=self.conn_id) as imap_hook:
return imap_hook.has_mail_attachment(
name=self.attachment_name,
mail_folder=self.mail_folder,
check_regex=self.check_regex
) | python | {
"resource": ""
} |
q266241 | prepare_additional_parameters | test | def prepare_additional_parameters(additional_properties, language_hints, web_detection_params):
"""
Creates additional_properties parameter based on language_hints, web_detection_params and
additional_properties parameters specified by the user
"""
if language_hints is None and web_detection_params is None:
return additional_properties
if additional_properties is None:
return {}
merged_additional_parameters = deepcopy(additional_properties)
if 'image_context' not in merged_additional_parameters:
merged_additional_parameters['image_context'] = {}
merged_additional_parameters['image_context']['language_hints'] = merged_additional_parameters[
'image_context'
].get('language_hints', language_hints)
merged_additional_parameters['image_context']['web_detection_params'] = merged_additional_parameters[
'image_context'
].get('web_detection_params', web_detection_params)
return merged_additional_parameters | python | {
"resource": ""
} |
q266242 | CassandraHook.get_conn | test | def get_conn(self):
"""
Returns a cassandra Session object
"""
if self.session and not self.session.is_shutdown:
return self.session
self.session = self.cluster.connect(self.keyspace)
return self.session | python | {
"resource": ""
} |
q266243 | CassandraHook.table_exists | test | def table_exists(self, table):
"""
Checks if a table exists in Cassandra
:param table: Target Cassandra table.
Use dot notation to target a specific keyspace.
:type table: str
"""
keyspace = self.keyspace
if '.' in table:
keyspace, table = table.split('.', 1)
cluster_metadata = self.get_conn().cluster.metadata
return (keyspace in cluster_metadata.keyspaces and
table in cluster_metadata.keyspaces[keyspace].tables) | python | {
"resource": ""
} |
q266244 | CassandraHook.record_exists | test | def record_exists(self, table, keys):
"""
Checks if a record exists in Cassandra
:param table: Target Cassandra table.
Use dot notation to target a specific keyspace.
:type table: str
:param keys: The keys and their values to check the existence.
:type keys: dict
"""
keyspace = self.keyspace
if '.' in table:
keyspace, table = table.split('.', 1)
ks = " AND ".join("{}=%({})s".format(key, key) for key in keys.keys())
cql = "SELECT * FROM {keyspace}.{table} WHERE {keys}".format(
keyspace=keyspace, table=table, keys=ks)
try:
rs = self.get_conn().execute(cql, keys)
return rs.one() is not None
except Exception:
return False | python | {
"resource": ""
} |
q266245 | SparkSubmitHook._build_track_driver_status_command | test | def _build_track_driver_status_command(self):
"""
Construct the command to poll the driver status.
:return: full command to be executed
"""
connection_cmd = self._get_spark_binary_path()
# The url ot the spark master
connection_cmd += ["--master", self._connection['master']]
# The driver id so we can poll for its status
if self._driver_id:
connection_cmd += ["--status", self._driver_id]
else:
raise AirflowException(
"Invalid status: attempted to poll driver " +
"status but no driver id is known. Giving up.")
self.log.debug("Poll driver status cmd: %s", connection_cmd)
return connection_cmd | python | {
"resource": ""
} |
q266246 | SparkSubmitHook.submit | test | def submit(self, application="", **kwargs):
"""
Remote Popen to execute the spark-submit job
:param application: Submitted application, jar or py file
:type application: str
:param kwargs: extra arguments to Popen (see subprocess.Popen)
"""
spark_submit_cmd = self._build_spark_submit_command(application)
if hasattr(self, '_env'):
env = os.environ.copy()
env.update(self._env)
kwargs["env"] = env
self._submit_sp = subprocess.Popen(spark_submit_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=-1,
universal_newlines=True,
**kwargs)
self._process_spark_submit_log(iter(self._submit_sp.stdout.readline, ''))
returncode = self._submit_sp.wait()
# Check spark-submit return code. In Kubernetes mode, also check the value
# of exit code in the log, as it may differ.
if returncode or (self._is_kubernetes and self._spark_exit_code != 0):
raise AirflowException(
"Cannot execute: {}. Error code is: {}.".format(
spark_submit_cmd, returncode
)
)
self.log.debug("Should track driver: {}".format(self._should_track_driver_status))
# We want the Airflow job to wait until the Spark driver is finished
if self._should_track_driver_status:
if self._driver_id is None:
raise AirflowException(
"No driver id is known: something went wrong when executing " +
"the spark submit command"
)
# We start with the SUBMITTED status as initial status
self._driver_status = "SUBMITTED"
# Start tracking the driver status (blocking function)
self._start_driver_status_tracking()
if self._driver_status != "FINISHED":
raise AirflowException(
"ERROR : Driver {} badly exited with status {}"
.format(self._driver_id, self._driver_status)
) | python | {
"resource": ""
} |
q266247 | SparkSubmitHook._process_spark_submit_log | test | def _process_spark_submit_log(self, itr):
"""
Processes the log files and extracts useful information out of it.
If the deploy-mode is 'client', log the output of the submit command as those
are the output logs of the Spark worker directly.
Remark: If the driver needs to be tracked for its status, the log-level of the
spark deploy needs to be at least INFO (log4j.logger.org.apache.spark.deploy=INFO)
:param itr: An iterator which iterates over the input of the subprocess
"""
# Consume the iterator
for line in itr:
line = line.strip()
# If we run yarn cluster mode, we want to extract the application id from
# the logs so we can kill the application when we stop it unexpectedly
if self._is_yarn and self._connection['deploy_mode'] == 'cluster':
match = re.search('(application[0-9_]+)', line)
if match:
self._yarn_application_id = match.groups()[0]
self.log.info("Identified spark driver id: %s",
self._yarn_application_id)
# If we run Kubernetes cluster mode, we want to extract the driver pod id
# from the logs so we can kill the application when we stop it unexpectedly
elif self._is_kubernetes:
match = re.search(r'\s*pod name: ((.+?)-([a-z0-9]+)-driver)', line)
if match:
self._kubernetes_driver_pod = match.groups()[0]
self.log.info("Identified spark driver pod: %s",
self._kubernetes_driver_pod)
# Store the Spark Exit code
match_exit_code = re.search(r'\s*Exit code: (\d+)', line)
if match_exit_code:
self._spark_exit_code = int(match_exit_code.groups()[0])
# if we run in standalone cluster mode and we want to track the driver status
# we need to extract the driver id from the logs. This allows us to poll for
# the status using the driver id. Also, we can kill the driver when needed.
elif self._should_track_driver_status and not self._driver_id:
match_driver_id = re.search(r'(driver-[0-9\-]+)', line)
if match_driver_id:
self._driver_id = match_driver_id.groups()[0]
self.log.info("identified spark driver id: {}"
.format(self._driver_id))
else:
self.log.info(line)
self.log.debug("spark submit log: {}".format(line)) | python | {
"resource": ""
} |
q266248 | SparkSubmitHook._process_spark_status_log | test | def _process_spark_status_log(self, itr):
"""
parses the logs of the spark driver status query process
:param itr: An iterator which iterates over the input of the subprocess
"""
# Consume the iterator
for line in itr:
line = line.strip()
# Check if the log line is about the driver status and extract the status.
if "driverState" in line:
self._driver_status = line.split(' : ')[1] \
.replace(',', '').replace('\"', '').strip()
self.log.debug("spark driver status log: {}".format(line)) | python | {
"resource": ""
} |
q266249 | get_task_runner | test | def get_task_runner(local_task_job):
"""
Get the task runner that can be used to run the given job.
:param local_task_job: The LocalTaskJob associated with the TaskInstance
that needs to be executed.
:type local_task_job: airflow.jobs.LocalTaskJob
:return: The task runner to use to run the task.
:rtype: airflow.task.task_runner.base_task_runner.BaseTaskRunner
"""
if _TASK_RUNNER == "StandardTaskRunner":
return StandardTaskRunner(local_task_job)
elif _TASK_RUNNER == "CgroupTaskRunner":
from airflow.contrib.task_runner.cgroup_task_runner import CgroupTaskRunner
return CgroupTaskRunner(local_task_job)
else:
raise AirflowException("Unknown task runner type {}".format(_TASK_RUNNER)) | python | {
"resource": ""
} |
q266250 | AWSBatchOperator._wait_for_task_ended | test | def _wait_for_task_ended(self):
"""
Try to use a waiter from the below pull request
* https://github.com/boto/botocore/pull/1307
If the waiter is not available apply a exponential backoff
* docs.aws.amazon.com/general/latest/gr/api-retries.html
"""
try:
waiter = self.client.get_waiter('job_execution_complete')
waiter.config.max_attempts = sys.maxsize # timeout is managed by airflow
waiter.wait(jobs=[self.jobId])
except ValueError:
# If waiter not available use expo
retry = True
retries = 0
while retries < self.max_retries and retry:
self.log.info('AWS Batch retry in the next %s seconds', retries)
response = self.client.describe_jobs(
jobs=[self.jobId]
)
if response['jobs'][-1]['status'] in ['SUCCEEDED', 'FAILED']:
retry = False
sleep(1 + pow(retries * 0.1, 2))
retries += 1 | python | {
"resource": ""
} |
q266251 | MySqlToGoogleCloudStorageOperator._query_mysql | test | def _query_mysql(self):
"""
Queries mysql and returns a cursor to the results.
"""
mysql = MySqlHook(mysql_conn_id=self.mysql_conn_id)
conn = mysql.get_conn()
cursor = conn.cursor()
cursor.execute(self.sql)
return cursor | python | {
"resource": ""
} |
q266252 | MySqlToGoogleCloudStorageOperator._configure_csv_file | test | def _configure_csv_file(self, file_handle, schema):
"""Configure a csv writer with the file_handle and write schema
as headers for the new file.
"""
csv_writer = csv.writer(file_handle, encoding='utf-8',
delimiter=self.field_delimiter)
csv_writer.writerow(schema)
return csv_writer | python | {
"resource": ""
} |
q266253 | MySqlToGoogleCloudStorageOperator._write_local_schema_file | test | def _write_local_schema_file(self, cursor):
"""
Takes a cursor, and writes the BigQuery schema in .json format for the
results to a local file system.
:return: A dictionary where key is a filename to be used as an object
name in GCS, and values are file handles to local files that
contains the BigQuery schema fields in .json format.
"""
schema_str = None
schema_file_mime_type = 'application/json'
tmp_schema_file_handle = NamedTemporaryFile(delete=True)
if self.schema is not None and isinstance(self.schema, string_types):
schema_str = self.schema.encode('utf-8')
elif self.schema is not None and isinstance(self.schema, list):
schema_str = json.dumps(self.schema).encode('utf-8')
else:
schema = []
for field in cursor.description:
# See PEP 249 for details about the description tuple.
field_name = field[0]
field_type = self.type_map(field[1])
# Always allow TIMESTAMP to be nullable. MySQLdb returns None types
# for required fields because some MySQL timestamps can't be
# represented by Python's datetime (e.g. 0000-00-00 00:00:00).
if field[6] or field_type == 'TIMESTAMP':
field_mode = 'NULLABLE'
else:
field_mode = 'REQUIRED'
schema.append({
'name': field_name,
'type': field_type,
'mode': field_mode,
})
schema_str = json.dumps(schema, sort_keys=True).encode('utf-8')
tmp_schema_file_handle.write(schema_str)
self.log.info('Using schema for %s: %s', self.schema_filename, schema_str)
schema_file_to_upload = {
'file_name': self.schema_filename,
'file_handle': tmp_schema_file_handle,
'file_mime_type': schema_file_mime_type
}
return schema_file_to_upload | python | {
"resource": ""
} |
q266254 | MySqlToGoogleCloudStorageOperator._get_col_type_dict | test | def _get_col_type_dict(self):
"""
Return a dict of column name and column type based on self.schema if not None.
"""
schema = []
if isinstance(self.schema, string_types):
schema = json.loads(self.schema)
elif isinstance(self.schema, list):
schema = self.schema
elif self.schema is not None:
self.log.warn('Using default schema due to unexpected type.'
'Should be a string or list.')
col_type_dict = {}
try:
col_type_dict = {col['name']: col['type'] for col in schema}
except KeyError:
self.log.warn('Using default schema due to missing name or type. Please '
'refer to: https://cloud.google.com/bigquery/docs/schemas'
'#specifying_a_json_schema_file')
return col_type_dict | python | {
"resource": ""
} |
q266255 | MySqlToGoogleCloudStorageOperator.type_map | test | def type_map(cls, mysql_type):
"""
Helper function that maps from MySQL fields to BigQuery fields. Used
when a schema_filename is set.
"""
d = {
FIELD_TYPE.INT24: 'INTEGER',
FIELD_TYPE.TINY: 'INTEGER',
FIELD_TYPE.BIT: 'INTEGER',
FIELD_TYPE.DATETIME: 'TIMESTAMP',
FIELD_TYPE.DATE: 'TIMESTAMP',
FIELD_TYPE.DECIMAL: 'FLOAT',
FIELD_TYPE.NEWDECIMAL: 'FLOAT',
FIELD_TYPE.DOUBLE: 'FLOAT',
FIELD_TYPE.FLOAT: 'FLOAT',
FIELD_TYPE.LONG: 'INTEGER',
FIELD_TYPE.LONGLONG: 'INTEGER',
FIELD_TYPE.SHORT: 'INTEGER',
FIELD_TYPE.TIMESTAMP: 'TIMESTAMP',
FIELD_TYPE.YEAR: 'INTEGER',
}
return d[mysql_type] if mysql_type in d else 'STRING' | python | {
"resource": ""
} |
q266256 | SqoopOperator.execute | test | def execute(self, context):
"""
Execute sqoop job
"""
self.hook = SqoopHook(
conn_id=self.conn_id,
verbose=self.verbose,
num_mappers=self.num_mappers,
hcatalog_database=self.hcatalog_database,
hcatalog_table=self.hcatalog_table,
properties=self.properties
)
if self.cmd_type == 'export':
self.hook.export_table(
table=self.table,
export_dir=self.export_dir,
input_null_string=self.input_null_string,
input_null_non_string=self.input_null_non_string,
staging_table=self.staging_table,
clear_staging_table=self.clear_staging_table,
enclosed_by=self.enclosed_by,
escaped_by=self.escaped_by,
input_fields_terminated_by=self.input_fields_terminated_by,
input_lines_terminated_by=self.input_lines_terminated_by,
input_optionally_enclosed_by=self.input_optionally_enclosed_by,
batch=self.batch,
relaxed_isolation=self.relaxed_isolation,
extra_export_options=self.extra_export_options)
elif self.cmd_type == 'import':
# add create hcatalog table to extra import options if option passed
# if new params are added to constructor can pass them in here
# so don't modify sqoop_hook for each param
if self.create_hcatalog_table:
self.extra_import_options['create-hcatalog-table'] = ''
if self.table and self.query:
raise AirflowException(
'Cannot specify query and table together. Need to specify either or.'
)
if self.table:
self.hook.import_table(
table=self.table,
target_dir=self.target_dir,
append=self.append,
file_type=self.file_type,
columns=self.columns,
split_by=self.split_by,
where=self.where,
direct=self.direct,
driver=self.driver,
extra_import_options=self.extra_import_options)
elif self.query:
self.hook.import_query(
query=self.query,
target_dir=self.target_dir,
append=self.append,
file_type=self.file_type,
split_by=self.split_by,
direct=self.direct,
driver=self.driver,
extra_import_options=self.extra_import_options)
else:
raise AirflowException(
"Provide query or table parameter to import using Sqoop"
)
else:
raise AirflowException("cmd_type should be 'import' or 'export'") | python | {
"resource": ""
} |
q266257 | apply_lineage | test | def apply_lineage(func):
"""
Saves the lineage to XCom and if configured to do so sends it
to the backend.
"""
backend = _get_backend()
@wraps(func)
def wrapper(self, context, *args, **kwargs):
self.log.debug("Backend: %s, Lineage called with inlets: %s, outlets: %s",
backend, self.inlets, self.outlets)
ret_val = func(self, context, *args, **kwargs)
outlets = [x.as_dict() for x in self.outlets]
inlets = [x.as_dict() for x in self.inlets]
if len(self.outlets) > 0:
self.xcom_push(context,
key=PIPELINE_OUTLETS,
value=outlets,
execution_date=context['ti'].execution_date)
if len(self.inlets) > 0:
self.xcom_push(context,
key=PIPELINE_INLETS,
value=inlets,
execution_date=context['ti'].execution_date)
if backend:
backend.send_lineage(operator=self, inlets=self.inlets,
outlets=self.outlets, context=context)
return ret_val
return wrapper | python | {
"resource": ""
} |
q266258 | Connection.extra_dejson | test | def extra_dejson(self):
"""Returns the extra property by deserializing json."""
obj = {}
if self.extra:
try:
obj = json.loads(self.extra)
except Exception as e:
self.log.exception(e)
self.log.error("Failed parsing the json for conn_id %s", self.conn_id)
return obj | python | {
"resource": ""
} |
q266259 | date_range | test | def date_range(start_date, end_date=None, num=None, delta=None):
"""
Get a set of dates as a list based on a start, end and delta, delta
can be something that can be added to `datetime.datetime`
or a cron expression as a `str`
:Example::
date_range(datetime(2016, 1, 1), datetime(2016, 1, 3), delta=timedelta(1))
[datetime.datetime(2016, 1, 1, 0, 0), datetime.datetime(2016, 1, 2, 0, 0),
datetime.datetime(2016, 1, 3, 0, 0)]
date_range(datetime(2016, 1, 1), datetime(2016, 1, 3), delta='0 0 * * *')
[datetime.datetime(2016, 1, 1, 0, 0), datetime.datetime(2016, 1, 2, 0, 0),
datetime.datetime(2016, 1, 3, 0, 0)]
date_range(datetime(2016, 1, 1), datetime(2016, 3, 3), delta="0 0 0 * *")
[datetime.datetime(2016, 1, 1, 0, 0), datetime.datetime(2016, 2, 1, 0, 0),
datetime.datetime(2016, 3, 1, 0, 0)]
:param start_date: anchor date to start the series from
:type start_date: datetime.datetime
:param end_date: right boundary for the date range
:type end_date: datetime.datetime
:param num: alternatively to end_date, you can specify the number of
number of entries you want in the range. This number can be negative,
output will always be sorted regardless
:type num: int
"""
if not delta:
return []
if end_date and start_date > end_date:
raise Exception("Wait. start_date needs to be before end_date")
if end_date and num:
raise Exception("Wait. Either specify end_date OR num")
if not end_date and not num:
end_date = timezone.utcnow()
delta_iscron = False
tz = start_date.tzinfo
if isinstance(delta, six.string_types):
delta_iscron = True
start_date = timezone.make_naive(start_date, tz)
cron = croniter(delta, start_date)
elif isinstance(delta, timedelta):
delta = abs(delta)
dates = []
if end_date:
if timezone.is_naive(start_date):
end_date = timezone.make_naive(end_date, tz)
while start_date <= end_date:
if timezone.is_naive(start_date):
dates.append(timezone.make_aware(start_date, tz))
else:
dates.append(start_date)
if delta_iscron:
start_date = cron.get_next(datetime)
else:
start_date += delta
else:
for _ in range(abs(num)):
if timezone.is_naive(start_date):
dates.append(timezone.make_aware(start_date, tz))
else:
dates.append(start_date)
if delta_iscron:
if num > 0:
start_date = cron.get_next(datetime)
else:
start_date = cron.get_prev(datetime)
else:
if num > 0:
start_date += delta
else:
start_date -= delta
return sorted(dates) | python | {
"resource": ""
} |
q266260 | scale_time_units | test | def scale_time_units(time_seconds_arr, unit):
"""
Convert an array of time durations in seconds to the specified time unit.
"""
if unit == 'minutes':
return list(map(lambda x: x * 1.0 / 60, time_seconds_arr))
elif unit == 'hours':
return list(map(lambda x: x * 1.0 / (60 * 60), time_seconds_arr))
elif unit == 'days':
return list(map(lambda x: x * 1.0 / (24 * 60 * 60), time_seconds_arr))
return time_seconds_arr | python | {
"resource": ""
} |
q266261 | days_ago | test | def days_ago(n, hour=0, minute=0, second=0, microsecond=0):
"""
Get a datetime object representing `n` days ago. By default the time is
set to midnight.
"""
today = timezone.utcnow().replace(
hour=hour,
minute=minute,
second=second,
microsecond=microsecond)
return today - timedelta(days=n) | python | {
"resource": ""
} |
q266262 | AirflowSecurityManager.init_role | test | def init_role(self, role_name, role_vms, role_perms):
"""
Initialize the role with the permissions and related view-menus.
:param role_name:
:param role_vms:
:param role_perms:
:return:
"""
pvms = self.get_session.query(sqla_models.PermissionView).all()
pvms = [p for p in pvms if p.permission and p.view_menu]
role = self.find_role(role_name)
if not role:
role = self.add_role(role_name)
if len(role.permissions) == 0:
self.log.info('Initializing permissions for role:%s in the database.', role_name)
role_pvms = set()
for pvm in pvms:
if pvm.view_menu.name in role_vms and pvm.permission.name in role_perms:
role_pvms.add(pvm)
role.permissions = list(role_pvms)
self.get_session.merge(role)
self.get_session.commit()
else:
self.log.debug('Existing permissions for the role:%s '
'within the database will persist.', role_name) | python | {
"resource": ""
} |
q266263 | AirflowSecurityManager.delete_role | test | def delete_role(self, role_name):
"""Delete the given Role
:param role_name: the name of a role in the ab_role table
"""
session = self.get_session
role = session.query(sqla_models.Role)\
.filter(sqla_models.Role.name == role_name)\
.first()
if role:
self.log.info("Deleting role '%s'", role_name)
session.delete(role)
session.commit()
else:
raise AirflowException("Role named '{}' does not exist".format(
role_name)) | python | {
"resource": ""
} |
q266264 | AirflowSecurityManager.get_user_roles | test | def get_user_roles(self, user=None):
"""
Get all the roles associated with the user.
:param user: the ab_user in FAB model.
:return: a list of roles associated with the user.
"""
if user is None:
user = g.user
if user.is_anonymous:
public_role = appbuilder.config.get('AUTH_ROLE_PUBLIC')
return [appbuilder.security_manager.find_role(public_role)] \
if public_role else []
return user.roles | python | {
"resource": ""
} |
q266265 | AirflowSecurityManager.get_all_permissions_views | test | def get_all_permissions_views(self):
"""
Returns a set of tuples with the perm name and view menu name
"""
perms_views = set()
for role in self.get_user_roles():
perms_views.update({(perm_view.permission.name, perm_view.view_menu.name)
for perm_view in role.permissions})
return perms_views | python | {
"resource": ""
} |
q266266 | AirflowSecurityManager._has_role | test | def _has_role(self, role_name_or_list):
"""
Whether the user has this role name
"""
if not isinstance(role_name_or_list, list):
role_name_or_list = [role_name_or_list]
return any(
[r.name in role_name_or_list for r in self.get_user_roles()]) | python | {
"resource": ""
} |
q266267 | AirflowSecurityManager._has_perm | test | def _has_perm(self, permission_name, view_menu_name):
"""
Whether the user has this perm
"""
if hasattr(self, 'perms'):
if (permission_name, view_menu_name) in self.perms:
return True
# rebuild the permissions set
self._get_and_cache_perms()
return (permission_name, view_menu_name) in self.perms | python | {
"resource": ""
} |
q266268 | AirflowSecurityManager.clean_perms | test | def clean_perms(self):
"""
FAB leaves faulty permissions that need to be cleaned up
"""
self.log.debug('Cleaning faulty perms')
sesh = self.get_session
pvms = (
sesh.query(sqla_models.PermissionView)
.filter(or_(
sqla_models.PermissionView.permission == None, # NOQA
sqla_models.PermissionView.view_menu == None, # NOQA
))
)
deleted_count = pvms.delete()
sesh.commit()
if deleted_count:
self.log.info('Deleted %s faulty permissions', deleted_count) | python | {
"resource": ""
} |
q266269 | AirflowSecurityManager._merge_perm | test | def _merge_perm(self, permission_name, view_menu_name):
"""
Add the new permission , view_menu to ab_permission_view_role if not exists.
It will add the related entry to ab_permission
and ab_view_menu two meta tables as well.
:param permission_name: Name of the permission.
:type permission_name: str
:param view_menu_name: Name of the view-menu
:type view_menu_name: str
:return:
"""
permission = self.find_permission(permission_name)
view_menu = self.find_view_menu(view_menu_name)
pv = None
if permission and view_menu:
pv = self.get_session.query(self.permissionview_model).filter_by(
permission=permission, view_menu=view_menu).first()
if not pv and permission_name and view_menu_name:
self.add_permission_view_menu(permission_name, view_menu_name) | python | {
"resource": ""
} |
q266270 | AirflowSecurityManager.update_admin_perm_view | test | def update_admin_perm_view(self):
"""
Admin should have all the permission-views.
Add the missing ones to the table for admin.
:return: None.
"""
pvms = self.get_session.query(sqla_models.PermissionView).all()
pvms = [p for p in pvms if p.permission and p.view_menu]
admin = self.find_role('Admin')
admin.permissions = list(set(admin.permissions) | set(pvms))
self.get_session.commit() | python | {
"resource": ""
} |
q266271 | AirflowSecurityManager._sync_dag_view_permissions | test | def _sync_dag_view_permissions(self, dag_id, access_control):
"""Set the access policy on the given DAG's ViewModel.
:param dag_id: the ID of the DAG whose permissions should be updated
:type dag_id: string
:param access_control: a dict where each key is a rolename and
each value is a set() of permission names (e.g.,
{'can_dag_read'}
:type access_control: dict
"""
def _get_or_create_dag_permission(perm_name):
dag_perm = self.find_permission_view_menu(perm_name, dag_id)
if not dag_perm:
self.log.info(
"Creating new permission '%s' on view '%s'",
perm_name, dag_id
)
dag_perm = self.add_permission_view_menu(perm_name, dag_id)
return dag_perm
def _revoke_stale_permissions(dag_view):
existing_dag_perms = self.find_permissions_view_menu(dag_view)
for perm in existing_dag_perms:
non_admin_roles = [role for role in perm.role
if role.name != 'Admin']
for role in non_admin_roles:
target_perms_for_role = access_control.get(role.name, {})
if perm.permission.name not in target_perms_for_role:
self.log.info(
"Revoking '%s' on DAG '%s' for role '%s'",
perm.permission, dag_id, role.name
)
self.del_permission_role(role, perm)
dag_view = self.find_view_menu(dag_id)
if dag_view:
_revoke_stale_permissions(dag_view)
for rolename, perms in access_control.items():
role = self.find_role(rolename)
if not role:
raise AirflowException(
"The access_control mapping for DAG '{}' includes a role "
"named '{}', but that role does not exist".format(
dag_id,
rolename))
perms = set(perms)
invalid_perms = perms - self.DAG_PERMS
if invalid_perms:
raise AirflowException(
"The access_control map for DAG '{}' includes the following "
"invalid permissions: {}; The set of valid permissions "
"is: {}".format(dag_id,
(perms - self.DAG_PERMS),
self.DAG_PERMS))
for perm_name in perms:
dag_perm = _get_or_create_dag_permission(perm_name)
self.add_permission_role(role, dag_perm) | python | {
"resource": ""
} |
q266272 | AirflowSecurityManager.create_perm_vm_for_all_dag | test | def create_perm_vm_for_all_dag(self):
"""
Create perm-vm if not exist and insert into FAB security model for all-dags.
"""
# create perm for global logical dag
for dag_vm in self.DAG_VMS:
for perm in self.DAG_PERMS:
self._merge_perm(permission_name=perm,
view_menu_name=dag_vm) | python | {
"resource": ""
} |
q266273 | get_fernet | test | def get_fernet():
"""
Deferred load of Fernet key.
This function could fail either because Cryptography is not installed
or because the Fernet key is invalid.
:return: Fernet object
:raises: airflow.exceptions.AirflowException if there's a problem trying to load Fernet
"""
global _fernet
log = LoggingMixin().log
if _fernet:
return _fernet
try:
from cryptography.fernet import Fernet, MultiFernet, InvalidToken
global InvalidFernetToken
InvalidFernetToken = InvalidToken
except BuiltinImportError:
log.warning(
"cryptography not found - values will not be stored encrypted."
)
_fernet = NullFernet()
return _fernet
try:
fernet_key = configuration.conf.get('core', 'FERNET_KEY')
if not fernet_key:
log.warning(
"empty cryptography key - values will not be stored encrypted."
)
_fernet = NullFernet()
else:
_fernet = MultiFernet([
Fernet(fernet_part.encode('utf-8'))
for fernet_part in fernet_key.split(',')
])
_fernet.is_encrypted = True
except (ValueError, TypeError) as ve:
raise AirflowException("Could not create Fernet object: {}".format(ve))
return _fernet | python | {
"resource": ""
} |
q266274 | AwsGlueCatalogPartitionSensor.poke | test | def poke(self, context):
"""
Checks for existence of the partition in the AWS Glue Catalog table
"""
if '.' in self.table_name:
self.database_name, self.table_name = self.table_name.split('.')
self.log.info(
'Poking for table %s. %s, expression %s', self.database_name, self.table_name, self.expression
)
return self.get_hook().check_for_partition(
self.database_name, self.table_name, self.expression) | python | {
"resource": ""
} |
q266275 | AwsGlueCatalogPartitionSensor.get_hook | test | def get_hook(self):
"""
Gets the AwsGlueCatalogHook
"""
if not hasattr(self, 'hook'):
from airflow.contrib.hooks.aws_glue_catalog_hook import AwsGlueCatalogHook
self.hook = AwsGlueCatalogHook(
aws_conn_id=self.aws_conn_id,
region_name=self.region_name)
return self.hook | python | {
"resource": ""
} |
q266276 | SQSSensor.poke | test | def poke(self, context):
"""
Check for message on subscribed queue and write to xcom the message with key ``messages``
:param context: the context object
:type context: dict
:return: ``True`` if message is available or ``False``
"""
sqs_hook = SQSHook(aws_conn_id=self.aws_conn_id)
sqs_conn = sqs_hook.get_conn()
self.log.info('SQSSensor checking for message on queue: %s', self.sqs_queue)
messages = sqs_conn.receive_message(QueueUrl=self.sqs_queue,
MaxNumberOfMessages=self.max_messages,
WaitTimeSeconds=self.wait_time_seconds)
self.log.info("reveived message %s", str(messages))
if 'Messages' in messages and len(messages['Messages']) > 0:
entries = [{'Id': message['MessageId'], 'ReceiptHandle': message['ReceiptHandle']}
for message in messages['Messages']]
result = sqs_conn.delete_message_batch(QueueUrl=self.sqs_queue,
Entries=entries)
if 'Successful' in result:
context['ti'].xcom_push(key='messages', value=messages)
return True
else:
raise AirflowException(
'Delete SQS Messages failed ' + str(result) + ' for messages ' + str(messages))
return False | python | {
"resource": ""
} |
q266277 | HDFSHook.get_conn | test | def get_conn(self):
"""
Returns a snakebite HDFSClient object.
"""
# When using HAClient, proxy_user must be the same, so is ok to always
# take the first.
effective_user = self.proxy_user
autoconfig = self.autoconfig
use_sasl = configuration.conf.get('core', 'security') == 'kerberos'
try:
connections = self.get_connections(self.hdfs_conn_id)
if not effective_user:
effective_user = connections[0].login
if not autoconfig:
autoconfig = connections[0].extra_dejson.get('autoconfig',
False)
hdfs_namenode_principal = connections[0].extra_dejson.get(
'hdfs_namenode_principal')
except AirflowException:
if not autoconfig:
raise
if autoconfig:
# will read config info from $HADOOP_HOME conf files
client = AutoConfigClient(effective_user=effective_user,
use_sasl=use_sasl)
elif len(connections) == 1:
client = Client(connections[0].host, connections[0].port,
effective_user=effective_user, use_sasl=use_sasl,
hdfs_namenode_principal=hdfs_namenode_principal)
elif len(connections) > 1:
nn = [Namenode(conn.host, conn.port) for conn in connections]
client = HAClient(nn, effective_user=effective_user,
use_sasl=use_sasl,
hdfs_namenode_principal=hdfs_namenode_principal)
else:
raise HDFSHookException("conn_id doesn't exist in the repository "
"and autoconfig is not specified")
return client | python | {
"resource": ""
} |
q266278 | WebHDFSHook.get_conn | test | def get_conn(self):
"""
Establishes a connection depending on the security mode set via config or environment variable.
:return: a hdfscli InsecureClient or KerberosClient object.
:rtype: hdfs.InsecureClient or hdfs.ext.kerberos.KerberosClient
"""
connections = self.get_connections(self.webhdfs_conn_id)
for connection in connections:
try:
self.log.debug('Trying namenode %s', connection.host)
client = self._get_client(connection)
client.status('/')
self.log.debug('Using namenode %s for hook', connection.host)
return client
except HdfsError as hdfs_error:
self.log.debug('Read operation on namenode %s failed with error: %s',
connection.host, hdfs_error)
hosts = [connection.host for connection in connections]
error_message = 'Read operations failed on the namenodes below:\n{hosts}'.format(
hosts='\n'.join(hosts))
raise AirflowWebHDFSHookException(error_message) | python | {
"resource": ""
} |
q266279 | WebHDFSHook.check_for_path | test | def check_for_path(self, hdfs_path):
"""
Check for the existence of a path in HDFS by querying FileStatus.
:param hdfs_path: The path to check.
:type hdfs_path: str
:return: True if the path exists and False if not.
:rtype: bool
"""
conn = self.get_conn()
status = conn.status(hdfs_path, strict=False)
return bool(status) | python | {
"resource": ""
} |
q266280 | WebHDFSHook.load_file | test | def load_file(self, source, destination, overwrite=True, parallelism=1, **kwargs):
r"""
Uploads a file to HDFS.
:param source: Local path to file or folder.
If it's a folder, all the files inside of it will be uploaded.
.. note:: This implies that folders empty of files will not be created remotely.
:type source: str
:param destination: PTarget HDFS path.
If it already exists and is a directory, files will be uploaded inside.
:type destination: str
:param overwrite: Overwrite any existing file or directory.
:type overwrite: bool
:param parallelism: Number of threads to use for parallelization.
A value of `0` (or negative) uses as many threads as there are files.
:type parallelism: int
:param \**kwargs: Keyword arguments forwarded to :meth:`hdfs.client.Client.upload`.
"""
conn = self.get_conn()
conn.upload(hdfs_path=destination,
local_path=source,
overwrite=overwrite,
n_threads=parallelism,
**kwargs)
self.log.debug("Uploaded file %s to %s", source, destination) | python | {
"resource": ""
} |
q266281 | PinotDbApiHook.get_conn | test | def get_conn(self):
"""
Establish a connection to pinot broker through pinot dbqpi.
"""
conn = self.get_connection(self.pinot_broker_conn_id)
pinot_broker_conn = connect(
host=conn.host,
port=conn.port,
path=conn.extra_dejson.get('endpoint', '/pql'),
scheme=conn.extra_dejson.get('schema', 'http')
)
self.log.info('Get the connection to pinot '
'broker on {host}'.format(host=conn.host))
return pinot_broker_conn | python | {
"resource": ""
} |
q266282 | PinotDbApiHook.get_uri | test | def get_uri(self):
"""
Get the connection uri for pinot broker.
e.g: http://localhost:9000/pql
"""
conn = self.get_connection(getattr(self, self.conn_name_attr))
host = conn.host
if conn.port is not None:
host += ':{port}'.format(port=conn.port)
conn_type = 'http' if not conn.conn_type else conn.conn_type
endpoint = conn.extra_dejson.get('endpoint', 'pql')
return '{conn_type}://{host}/{endpoint}'.format(
conn_type=conn_type, host=host, endpoint=endpoint) | python | {
"resource": ""
} |
q266283 | TransferJobPreprocessor._convert_date_to_dict | test | def _convert_date_to_dict(field_date):
"""
Convert native python ``datetime.date`` object to a format supported by the API
"""
return {DAY: field_date.day, MONTH: field_date.month, YEAR: field_date.year} | python | {
"resource": ""
} |
q266284 | TransferJobPreprocessor._convert_time_to_dict | test | def _convert_time_to_dict(time):
"""
Convert native python ``datetime.time`` object to a format supported by the API
"""
return {HOURS: time.hour, MINUTES: time.minute, SECONDS: time.second} | python | {
"resource": ""
} |
q266285 | RedisHook.get_conn | test | def get_conn(self):
"""
Returns a Redis connection.
"""
conn = self.get_connection(self.redis_conn_id)
self.host = conn.host
self.port = conn.port
self.password = None if str(conn.password).lower() in ['none', 'false', ''] else conn.password
self.db = conn.extra_dejson.get('db', None)
if not self.redis:
self.log.debug(
'Initializing redis object for conn_id "%s" on %s:%s:%s',
self.redis_conn_id, self.host, self.port, self.db
)
self.redis = Redis(
host=self.host,
port=self.port,
password=self.password,
db=self.db)
return self.redis | python | {
"resource": ""
} |
q266286 | DbApiHook.get_pandas_df | test | def get_pandas_df(self, sql, parameters=None):
"""
Executes the sql and returns a pandas dataframe
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
import pandas.io.sql as psql
with closing(self.get_conn()) as conn:
return psql.read_sql(sql, con=conn, params=parameters) | python | {
"resource": ""
} |
q266287 | DbApiHook.run | test | def run(self, sql, autocommit=False, parameters=None):
"""
Runs a command or a list of commands. Pass a list of sql
statements to the sql parameter to get them to execute
sequentially
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param autocommit: What to set the connection's autocommit setting to
before executing the query.
:type autocommit: bool
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
if isinstance(sql, basestring):
sql = [sql]
with closing(self.get_conn()) as conn:
if self.supports_autocommit:
self.set_autocommit(conn, autocommit)
with closing(conn.cursor()) as cur:
for s in sql:
if parameters is not None:
self.log.info("{} with parameters {}".format(s, parameters))
cur.execute(s, parameters)
else:
self.log.info(s)
cur.execute(s)
# If autocommit was set to False for db that supports autocommit,
# or if db does not supports autocommit, we do a manual commit.
if not self.get_autocommit(conn):
conn.commit() | python | {
"resource": ""
} |
q266288 | DbApiHook.set_autocommit | test | def set_autocommit(self, conn, autocommit):
"""
Sets the autocommit flag on the connection
"""
if not self.supports_autocommit and autocommit:
self.log.warn(
("%s connection doesn't support "
"autocommit but autocommit activated."),
getattr(self, self.conn_name_attr))
conn.autocommit = autocommit | python | {
"resource": ""
} |
q266289 | DbApiHook.insert_rows | test | def insert_rows(self, table, rows, target_fields=None, commit_every=1000,
replace=False):
"""
A generic way to insert a set of tuples into a table,
a new transaction is created every commit_every rows
:param table: Name of the target table
:type table: str
:param rows: The rows to insert into the table
:type rows: iterable of tuples
:param target_fields: The names of the columns to fill in the table
:type target_fields: iterable of strings
:param commit_every: The maximum number of rows to insert in one
transaction. Set to 0 to insert all rows in one transaction.
:type commit_every: int
:param replace: Whether to replace instead of insert
:type replace: bool
"""
if target_fields:
target_fields = ", ".join(target_fields)
target_fields = "({})".format(target_fields)
else:
target_fields = ''
i = 0
with closing(self.get_conn()) as conn:
if self.supports_autocommit:
self.set_autocommit(conn, False)
conn.commit()
with closing(conn.cursor()) as cur:
for i, row in enumerate(rows, 1):
lst = []
for cell in row:
lst.append(self._serialize_cell(cell, conn))
values = tuple(lst)
placeholders = ["%s", ] * len(values)
if not replace:
sql = "INSERT INTO "
else:
sql = "REPLACE INTO "
sql += "{0} {1} VALUES ({2})".format(
table,
target_fields,
",".join(placeholders))
cur.execute(sql, values)
if commit_every and i % commit_every == 0:
conn.commit()
self.log.info(
"Loaded %s into %s rows so far", i, table
)
conn.commit()
self.log.info("Done loading. Loaded a total of %s rows", i) | python | {
"resource": ""
} |
q266290 | DbApiHook._serialize_cell | test | def _serialize_cell(cell, conn=None):
"""
Returns the SQL literal of the cell as a string.
:param cell: The cell to insert into the table
:type cell: object
:param conn: The database connection
:type conn: connection object
:return: The serialized cell
:rtype: str
"""
if cell is None:
return None
if isinstance(cell, datetime):
return cell.isoformat()
return str(cell) | python | {
"resource": ""
} |
q266291 | Airflow.health | test | def health(self, session=None):
"""
An endpoint helping check the health status of the Airflow instance,
including metadatabase and scheduler.
"""
BJ = jobs.BaseJob
payload = {}
scheduler_health_check_threshold = timedelta(seconds=conf.getint('scheduler',
'scheduler_health_check_threshold'
))
latest_scheduler_heartbeat = None
payload['metadatabase'] = {'status': 'healthy'}
try:
latest_scheduler_heartbeat = session.query(func.max(BJ.latest_heartbeat)).\
filter(BJ.state == 'running', BJ.job_type == 'SchedulerJob').\
scalar()
except Exception:
payload['metadatabase']['status'] = 'unhealthy'
if not latest_scheduler_heartbeat:
scheduler_status = 'unhealthy'
else:
if timezone.utcnow() - latest_scheduler_heartbeat <= scheduler_health_check_threshold:
scheduler_status = 'healthy'
else:
scheduler_status = 'unhealthy'
payload['scheduler'] = {'status': scheduler_status,
'latest_scheduler_heartbeat': str(latest_scheduler_heartbeat)}
return wwwutils.json_response(payload) | python | {
"resource": ""
} |
q266292 | Airflow.extra_links | test | def extra_links(self):
"""
A restful endpoint that returns external links for a given Operator
It queries the operator that sent the request for the links it wishes
to provide for a given external link name.
API: GET
Args: dag_id: The id of the dag containing the task in question
task_id: The id of the task in question
execution_date: The date of execution of the task
link_name: The name of the link reference to find the actual URL for
Returns:
200: {url: <url of link>, error: None} - returned when there was no problem
finding the URL
404: {url: None, error: <error message>} - returned when the operator does
not return a URL
"""
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
link_name = request.args.get('link_name')
dttm = airflow.utils.timezone.parse(execution_date)
dag = dagbag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
response = jsonify(
{'url': None,
'error': "can't find dag {dag} or task_id {task_id}".format(
dag=dag,
task_id=task_id
)}
)
response.status_code = 404
return response
task = dag.get_task(task_id)
try:
url = task.get_extra_links(dttm, link_name)
except ValueError as err:
response = jsonify({'url': None, 'error': str(err)})
response.status_code = 404
return response
if url:
response = jsonify({'error': None, 'url': url})
response.status_code = 200
return response
else:
response = jsonify(
{'url': None, 'error': 'No URL found for {dest}'.format(dest=link_name)})
response.status_code = 404
return response | python | {
"resource": ""
} |
q266293 | CloudantHook.get_conn | test | def get_conn(self):
"""
Opens a connection to the cloudant service and closes it automatically if used as context manager.
.. note::
In the connection form:
- 'host' equals the 'Account' (optional)
- 'login' equals the 'Username (or API Key)' (required)
- 'password' equals the 'Password' (required)
:return: an authorized cloudant session context manager object.
:rtype: cloudant
"""
conn = self.get_connection(self.cloudant_conn_id)
self._validate_connection(conn)
cloudant_session = cloudant(user=conn.login, passwd=conn.password, account=conn.host)
return cloudant_session | python | {
"resource": ""
} |
q266294 | SlackWebhookOperator.execute | test | def execute(self, context):
"""
Call the SlackWebhookHook to post the provided Slack message
"""
self.hook = SlackWebhookHook(
self.http_conn_id,
self.webhook_token,
self.message,
self.attachments,
self.channel,
self.username,
self.icon_emoji,
self.link_names,
self.proxy
)
self.hook.execute() | python | {
"resource": ""
} |
q266295 | GoogleCloudBaseHook._get_credentials | test | def _get_credentials(self):
"""
Returns the Credentials object for Google API
"""
key_path = self._get_field('key_path', False)
keyfile_dict = self._get_field('keyfile_dict', False)
scope = self._get_field('scope', None)
if scope:
scopes = [s.strip() for s in scope.split(',')]
else:
scopes = _DEFAULT_SCOPES
if not key_path and not keyfile_dict:
self.log.info('Getting connection using `google.auth.default()` '
'since no key file is defined for hook.')
credentials, _ = google.auth.default(scopes=scopes)
elif key_path:
# Get credentials from a JSON file.
if key_path.endswith('.json'):
self.log.debug('Getting connection using JSON key file %s' % key_path)
credentials = (
google.oauth2.service_account.Credentials.from_service_account_file(
key_path, scopes=scopes)
)
elif key_path.endswith('.p12'):
raise AirflowException('Legacy P12 key file are not supported, '
'use a JSON key file.')
else:
raise AirflowException('Unrecognised extension for key file.')
else:
# Get credentials from JSON data provided in the UI.
try:
keyfile_dict = json.loads(keyfile_dict)
# Depending on how the JSON was formatted, it may contain
# escaped newlines. Convert those to actual newlines.
keyfile_dict['private_key'] = keyfile_dict['private_key'].replace(
'\\n', '\n')
credentials = (
google.oauth2.service_account.Credentials.from_service_account_info(
keyfile_dict, scopes=scopes)
)
except json.decoder.JSONDecodeError:
raise AirflowException('Invalid key JSON.')
return credentials.with_subject(self.delegate_to) \
if self.delegate_to else credentials | python | {
"resource": ""
} |
q266296 | GoogleCloudBaseHook._authorize | test | def _authorize(self):
"""
Returns an authorized HTTP object to be used to build a Google cloud
service hook connection.
"""
credentials = self._get_credentials()
http = httplib2.Http()
authed_http = google_auth_httplib2.AuthorizedHttp(
credentials, http=http)
return authed_http | python | {
"resource": ""
} |
q266297 | GoogleCloudBaseHook.catch_http_exception | test | def catch_http_exception(func):
"""
Function decorator that intercepts HTTP Errors and raises AirflowException
with more informative message.
"""
@functools.wraps(func)
def wrapper_decorator(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except GoogleAPICallError as e:
if isinstance(e, AlreadyExists):
raise e
else:
self.log.error('The request failed:\n%s', str(e))
raise AirflowException(e)
except RetryError as e:
self.log.error('The request failed due to a retryable error and retry attempts failed.')
raise AirflowException(e)
except ValueError as e:
self.log.error('The request failed, the parameters are invalid.')
raise AirflowException(e)
except HttpError as e:
self.log.error('The request failed:\n%s', str(e))
raise AirflowException(e)
return wrapper_decorator | python | {
"resource": ""
} |
q266298 | GoogleCloudBaseHook.fallback_to_default_project_id | test | def fallback_to_default_project_id(func):
"""
Decorator that provides fallback for Google Cloud Platform project id. If
the project is None it will be replaced with the project_id from the
service account the Hook is authenticated with. Project id can be specified
either via project_id kwarg or via first parameter in positional args.
:param func: function to wrap
:return: result of the function call
"""
@functools.wraps(func)
def inner_wrapper(self, *args, **kwargs):
if len(args) > 0:
raise AirflowException(
"You must use keyword arguments in this methods rather than"
" positional")
if 'project_id' in kwargs:
kwargs['project_id'] = self._get_project_id(kwargs['project_id'])
else:
kwargs['project_id'] = self._get_project_id(None)
if not kwargs['project_id']:
raise AirflowException("The project id must be passed either as "
"keyword project_id parameter or as project_id extra "
"in GCP connection definition. Both are not set!")
return func(self, *args, **kwargs)
return inner_wrapper | python | {
"resource": ""
} |
q266299 | State.unfinished | test | def unfinished(cls):
"""
A list of states indicating that a task either has not completed
a run or has not even started.
"""
return [
cls.NONE,
cls.SCHEDULED,
cls.QUEUED,
cls.RUNNING,
cls.SHUTDOWN,
cls.UP_FOR_RETRY,
cls.UP_FOR_RESCHEDULE
] | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.