_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
31
13.1k
language
stringclasses
1 value
meta_information
dict
q266200
GCPTextToSpeechHook.synthesize_speech
test
def synthesize_speech(self, input_data, voice, audio_config, retry=None, timeout=None): """ Synthesizes text input :param input_data: text input to be synthesized. See more: https://googleapis.github.io/google-cloud-python/latest/texttospeech/gapic/v1/types.html#google.cloud.texttospeech_v1.types.SynthesisInput :type input_data: dict or google.cloud.texttospeech_v1.types.SynthesisInput :param voice: configuration of voice to be used in synthesis. See more: https://googleapis.github.io/google-cloud-python/latest/texttospeech/gapic/v1/types.html#google.cloud.texttospeech_v1.types.VoiceSelectionParams :type voice: dict or google.cloud.texttospeech_v1.types.VoiceSelectionParams :param audio_config: configuration of the synthesized audio. See more: https://googleapis.github.io/google-cloud-python/latest/texttospeech/gapic/v1/types.html#google.cloud.texttospeech_v1.types.AudioConfig :type audio_config: dict or google.cloud.texttospeech_v1.types.AudioConfig :return: SynthesizeSpeechResponse See more: https://googleapis.github.io/google-cloud-python/latest/texttospeech/gapic/v1/types.html#google.cloud.texttospeech_v1.types.SynthesizeSpeechResponse :rtype: object
python
{ "resource": "" }
q266201
S3TaskHandler.close
test
def close(self): """ Close and upload local log file to remote storage S3. """ # When application exit, system shuts down all handlers by # calling close method. Here we check if logger is already # closed to prevent uploading the log to remote storage multiple # times when `logging.shutdown` is called. if self.closed: return super().close() if not self.upload_on_close: return local_loc = os.path.join(self.local_base, self.log_relative_path) remote_loc = os.path.join(self.remote_base, self.log_relative_path)
python
{ "resource": "" }
q266202
WorkerConfiguration._get_init_containers
test
def _get_init_containers(self): """When using git to retrieve the DAGs, use the GitSync Init Container""" # If we're using volume claims to mount the dags, no init container is needed if self.kube_config.dags_volume_claim or \ self.kube_config.dags_volume_host or self.kube_config.dags_in_image: return [] # Otherwise, define a git-sync init container init_environment = [{ 'name': 'GIT_SYNC_REPO', 'value': self.kube_config.git_repo }, { 'name': 'GIT_SYNC_BRANCH', 'value': self.kube_config.git_branch }, { 'name': 'GIT_SYNC_ROOT', 'value': self.kube_config.git_sync_root }, { 'name': 'GIT_SYNC_DEST', 'value': self.kube_config.git_sync_dest }, { 'name': 'GIT_SYNC_DEPTH', 'value': '1' }, { 'name': 'GIT_SYNC_ONE_TIME', 'value': 'true' }] if self.kube_config.git_user: init_environment.append({ 'name': 'GIT_SYNC_USERNAME', 'value': self.kube_config.git_user }) if self.kube_config.git_password: init_environment.append({ 'name': 'GIT_SYNC_PASSWORD', 'value': self.kube_config.git_password }) volume_mounts = [{ 'mountPath': self.kube_config.git_sync_root, 'name': self.dags_volume_name, 'readOnly': False }] if self.kube_config.git_ssh_key_secret_name: volume_mounts.append({ 'name': self.git_sync_ssh_secret_volume_name, 'mountPath': '/etc/git-secret/ssh', 'subPath': 'ssh' }) init_environment.extend([ { 'name': 'GIT_SSH_KEY_FILE', 'value': '/etc/git-secret/ssh'
python
{ "resource": "" }
q266203
WorkerConfiguration._get_environment
test
def _get_environment(self): """Defines any necessary environment variables for the pod executor""" env = {} for env_var_name, env_var_val in six.iteritems(self.kube_config.kube_env_vars): env[env_var_name] = env_var_val env["AIRFLOW__CORE__EXECUTOR"] = "LocalExecutor" if self.kube_config.airflow_configmap: env['AIRFLOW_HOME'] = self.worker_airflow_home env['AIRFLOW__CORE__DAGS_FOLDER'] = self.worker_airflow_dags if (not self.kube_config.airflow_configmap and 'AIRFLOW__CORE__SQL_ALCHEMY_CONN' not in self.kube_config.kube_secrets): env['AIRFLOW__CORE__SQL_ALCHEMY_CONN'] = conf.get("core", "SQL_ALCHEMY_CONN") if self.kube_config.git_dags_folder_mount_point: #
python
{ "resource": "" }
q266204
WorkerConfiguration._get_secrets
test
def _get_secrets(self): """Defines any necessary secrets for the pod executor""" worker_secrets = [] for env_var_name, obj_key_pair in six.iteritems(self.kube_config.kube_secrets): k8s_secret_obj, k8s_secret_key = obj_key_pair.split('=')
python
{ "resource": "" }
q266205
WorkerConfiguration._get_security_context
test
def _get_security_context(self): """Defines the security context""" security_context = {} if self.kube_config.worker_run_as_user: security_context['runAsUser'] = self.kube_config.worker_run_as_user if self.kube_config.worker_fs_group: security_context['fsGroup'] = self.kube_config.worker_fs_group # set fs_group to 65533 if not explicitly specified and using git
python
{ "resource": "" }
q266206
QuboleHook.get_extra_links
test
def get_extra_links(self, operator, dttm): """ Get link to qubole command result page. :param operator: operator :param dttm: datetime :return: url link """ conn = BaseHook.get_connection(operator.kwargs['qubole_conn_id'])
python
{ "resource": "" }
q266207
BaseJob.heartbeat
test
def heartbeat(self): """ Heartbeats update the job's entry in the database with a timestamp for the latest_heartbeat and allows for the job to be killed externally. This allows at the system level to monitor what is actually active. For instance, an old heartbeat for SchedulerJob would mean something is wrong. This also allows for any job to be killed externally, regardless of who is running it or on which machine it is running. Note that if your heartbeat is set to 60 seconds and you call this method after 10 seconds of processing since the last heartbeat, it will sleep 50 seconds to complete the 60 seconds and keep a steady heart rate. If you go over 60 seconds before calling it, it won't sleep at all. """ try: with
python
{ "resource": "" }
q266208
DagFileProcessor._launch_process
test
def _launch_process(result_queue, file_path, pickle_dags, dag_id_white_list, thread_name, zombies): """ Launch a process to process the given file. :param result_queue: the queue to use for passing back the result :type result_queue: multiprocessing.Queue :param file_path: the file to process :type file_path: unicode :param pickle_dags: whether to pickle the DAGs found in the file and save them to the DB :type pickle_dags: bool :param dag_id_white_list: if specified, only examine DAG ID's that are in this list :type dag_id_white_list: list[unicode] :param thread_name: the name to use for the process that is launched :type thread_name: unicode :return: the process that was launched :rtype: multiprocessing.Process :param zombies: zombie task instances to kill :type zombies: list[airflow.utils.dag_processing.SimpleTaskInstance] """ def helper(): # This helper runs in the newly created process log = logging.getLogger("airflow.processor") stdout = StreamLogWriter(log, logging.INFO) stderr = StreamLogWriter(log, logging.WARN) set_context(log, file_path) try: # redirect stdout/stderr to log sys.stdout = stdout sys.stderr = stderr # Re-configure the ORM engine as there are issues with multiple processes settings.configure_orm() # Change the thread name to differentiate log lines. This is # really a separate process, but changing the name of the # process doesn't work, so changing the thread name instead. threading.current_thread().name = thread_name start_time = time.time() log.info("Started process (PID=%s) to work on
python
{ "resource": "" }
q266209
DagFileProcessor.start
test
def start(self): """ Launch the process and start processing the DAG. """ self._process = DagFileProcessor._launch_process( self._result_queue, self.file_path, self._pickle_dags,
python
{ "resource": "" }
q266210
DagFileProcessor.done
test
def done(self): """ Check if the process launched to process this file is done. :return: whether the process is finished running :rtype: bool """ if self._process is None: raise AirflowException("Tried to see if it's done before starting!") if self._done: return True # In case result queue is corrupted. if self._result_queue and not self._result_queue.empty(): self._result = self._result_queue.get_nowait() self._done = True self.log.debug("Waiting for %s", self._process) self._process.join() return True
python
{ "resource": "" }
q266211
SchedulerJob._exit_gracefully
test
def _exit_gracefully(self, signum, frame): """ Helper method to clean up processor_agent to avoid leaving orphan processes. """ self.log.info("Exiting
python
{ "resource": "" }
q266212
SchedulerJob.update_import_errors
test
def update_import_errors(session, dagbag): """ For the DAGs in the given DagBag, record any associated import errors and clears errors for files that no longer have them. These are usually displayed through the Airflow UI so that users know that there are issues parsing DAGs. :param session: session for ORM operations :type session: sqlalchemy.orm.session.Session :param dagbag: DagBag containing DAGs with import errors :type dagbag: airflow.models.DagBag """ # Clear the errors of the processed files for dagbag_file in dagbag.file_last_changed:
python
{ "resource": "" }
q266213
SchedulerJob._process_task_instances
test
def _process_task_instances(self, dag, queue, session=None): """ This method schedules the tasks for a single DAG by looking at the active DAG runs and adding task instances that should run to the queue. """ # update the state of the previously active dag runs dag_runs = DagRun.find(dag_id=dag.dag_id, state=State.RUNNING, session=session) active_dag_runs = [] for run in dag_runs: self.log.info("Examining DAG run %s", run) # don't consider runs that are executed in the future if run.execution_date > timezone.utcnow(): self.log.error( "Execution date is in future: %s", run.execution_date ) continue if len(active_dag_runs) >= dag.max_active_runs: self.log.info("Number of active dag runs reached max_active_run.") break # skip backfill dagruns for now as long as they are not really scheduled
python
{ "resource": "" }
q266214
SchedulerJob._change_state_for_tis_without_dagrun
test
def _change_state_for_tis_without_dagrun(self, simple_dag_bag, old_states, new_state, session=None): """ For all DAG IDs in the SimpleDagBag, look for task instances in the old_states and set them to new_state if the corresponding DagRun does not exist or exists but is not in the running state. This normally should not happen, but it can if the state of DagRuns are changed manually. :param old_states: examine TaskInstances in this state :type old_state: list[airflow.utils.state.State] :param new_state: set TaskInstances to this state :type new_state: airflow.utils.state.State :param simple_dag_bag: TaskInstances associated with DAGs in the simple_dag_bag and with states in the old_state will be examined :type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag """
python
{ "resource": "" }
q266215
SchedulerJob.__get_concurrency_maps
test
def __get_concurrency_maps(self, states, session=None): """ Get the concurrency maps. :param states: List of states to query for :type states: list[airflow.utils.state.State] :return: A map from (dag_id, task_id) to # of task instances and a map from (dag_id, task_id) to # of task instances in the given state list :rtype: dict[tuple[str, str], int] """ TI = models.TaskInstance ti_concurrency_query = ( session .query(TI.task_id, TI.dag_id, func.count('*'))
python
{ "resource": "" }
q266216
SchedulerJob._change_state_for_executable_task_instances
test
def _change_state_for_executable_task_instances(self, task_instances, acceptable_states, session=None): """ Changes the state of task instances in the list with one of the given states to QUEUED atomically, and returns the TIs changed in SimpleTaskInstance format. :param task_instances: TaskInstances to change the state of :type task_instances: list[airflow.models.TaskInstance] :param acceptable_states: Filters the TaskInstances updated to be in these states :type acceptable_states: Iterable[State] :rtype: list[airflow.utils.dag_processing.SimpleTaskInstance] """ if len(task_instances) == 0: session.commit() return [] TI = models.TaskInstance filter_for_ti_state_change = ( [and_( TI.dag_id == ti.dag_id, TI.task_id == ti.task_id, TI.execution_date == ti.execution_date) for ti in task_instances]) ti_query = ( session .query(TI) .filter(or_(*filter_for_ti_state_change))) if None in acceptable_states: ti_query = ti_query.filter( or_(TI.state == None, TI.state.in_(acceptable_states)) # noqa: E711 ) else: ti_query = ti_query.filter(TI.state.in_(acceptable_states)) tis_to_set_to_queued = ( ti_query .with_for_update() .all()) if len(tis_to_set_to_queued) == 0: self.log.info("No tasks were able to have their state changed to queued.") session.commit() return [] # set TIs to queued state for task_instance in tis_to_set_to_queued: task_instance.state
python
{ "resource": "" }
q266217
SchedulerJob._enqueue_task_instances_with_queued_state
test
def _enqueue_task_instances_with_queued_state(self, simple_dag_bag, simple_task_instances): """ Takes task_instances, which should have been set to queued, and enqueues them with the executor. :param simple_task_instances: TaskInstances to enqueue :type simple_task_instances: list[SimpleTaskInstance] :param simple_dag_bag: Should contains all of the task_instances' dags :type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag """ TI = models.TaskInstance # actually enqueue them for simple_task_instance in simple_task_instances: simple_dag = simple_dag_bag.get_dag(simple_task_instance.dag_id) command = TI.generate_command( simple_task_instance.dag_id, simple_task_instance.task_id, simple_task_instance.execution_date, local=True, mark_success=False, ignore_all_deps=False, ignore_depends_on_past=False,
python
{ "resource": "" }
q266218
SchedulerJob._execute_task_instances
test
def _execute_task_instances(self, simple_dag_bag, states, session=None): """ Attempts to execute TaskInstances that should be executed by the scheduler. There are three steps: 1. Pick TIs by priority with the constraint that they are in the expected states and that we do exceed max_active_runs or pool limits. 2. Change the state for the TIs above atomically. 3. Enqueue the TIs in the executor. :param simple_dag_bag: TaskInstances associated with DAGs in the simple_dag_bag will be fetched from the DB and executed :type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag :param states: Execute TaskInstances in these states :type states: tuple[airflow.utils.state.State] :return: Number of task instance with state changed. """ executable_tis = self._find_executable_task_instances(simple_dag_bag, states, session=session) def query(result, items):
python
{ "resource": "" }
q266219
SchedulerJob._change_state_for_tasks_failed_to_execute
test
def _change_state_for_tasks_failed_to_execute(self, session): """ If there are tasks left over in the executor, we set them back to SCHEDULED to avoid creating hanging tasks. :param session: session for ORM operations """ if self.executor.queued_tasks: TI = models.TaskInstance filter_for_ti_state_change = ( [and_( TI.dag_id == dag_id, TI.task_id == task_id, TI.execution_date == execution_date, # The TI.try_number will return raw try_number+1 since the # ti is not running. And we need to -1 to match the DB record. TI._try_number == try_number - 1, TI.state == State.QUEUED) for dag_id, task_id, execution_date, try_number in self.executor.queued_tasks.keys()]) ti_query = (session.query(TI) .filter(or_(*filter_for_ti_state_change)))
python
{ "resource": "" }
q266220
SchedulerJob._process_executor_events
test
def _process_executor_events(self, simple_dag_bag, session=None): """ Respond to executor events. """ # TODO: this shares quite a lot of code with _manage_executor_state TI = models.TaskInstance for key, state in list(self.executor.get_event_buffer(simple_dag_bag.dag_ids) .items()): dag_id, task_id, execution_date, try_number = key self.log.info( "Executor reports execution of %s.%s execution_date=%s " "exited with status %s for try_number %s", dag_id, task_id, execution_date, state, try_number ) if state == State.FAILED or state == State.SUCCESS: qry = session.query(TI).filter(TI.dag_id == dag_id, TI.task_id == task_id, TI.execution_date == execution_date) ti = qry.first() if not ti: self.log.warning("TaskInstance %s went missing from the database", ti) continue # TODO: should we fail RUNNING as well, as we do in Backfills? if ti.try_number == try_number and ti.state == State.QUEUED: msg = ("Executor reports task instance {} finished ({}) " "although the task says its {}. Was the task " "killed externally?".format(ti, state, ti.state)) self.log.error(msg) try:
python
{ "resource": "" }
q266221
SchedulerJob.process_file
test
def process_file(self, file_path, zombies, pickle_dags=False, session=None): """ Process a Python file containing Airflow DAGs. This includes: 1. Execute the file and look for DAG objects in the namespace. 2. Pickle the DAG and save it to the DB (if necessary). 3. For each DAG, see what tasks should run and create appropriate task instances in the DB. 4. Record any errors importing the file into ORM 5. Kill (in ORM) any task instances belonging to the DAGs that haven't issued a heartbeat in a while. Returns a list of SimpleDag objects that represent the DAGs found in the file :param file_path: the path to the Python file that should be executed :type file_path: unicode :param zombies: zombie task instances to kill. :type zombies: list[airflow.utils.dag_processing.SimpleTaskInstance] :param pickle_dags: whether serialize the DAGs found in the file and save them to the db :type pickle_dags: bool :return: a list of SimpleDags made from the Dags found in the file :rtype: list[airflow.utils.dag_processing.SimpleDagBag] """ self.log.info("Processing file %s for tasks to queue", file_path) # As DAGs are parsed from this file, they will be converted into SimpleDags simple_dags = [] try: dagbag = models.DagBag(file_path, include_examples=False) except Exception: self.log.exception("Failed at reloading the DAG file %s", file_path) Stats.incr('dag_file_refresh_error', 1, 1) return [] if len(dagbag.dags) > 0: self.log.info("DAG(s) %s retrieved from %s", dagbag.dags.keys(), file_path) else: self.log.warning("No viable dags retrieved from %s", file_path) self.update_import_errors(session, dagbag) return [] # Save individual DAGs in the ORM and update DagModel.last_scheduled_time for dag in dagbag.dags.values(): dag.sync_to_db() paused_dag_ids = [dag.dag_id for dag in dagbag.dags.values()
python
{ "resource": "" }
q266222
BackfillJob._update_counters
test
def _update_counters(self, ti_status): """ Updates the counters per state of the tasks that were running. Can re-add to tasks to run in case required. :param ti_status: the internal status of the backfill job tasks :type ti_status: BackfillJob._DagRunTaskStatus """ for key, ti in list(ti_status.running.items()): ti.refresh_from_db() if ti.state == State.SUCCESS: ti_status.succeeded.add(key) self.log.debug("Task instance %s succeeded. Don't rerun.", ti) ti_status.running.pop(key) continue elif ti.state == State.SKIPPED: ti_status.skipped.add(key) self.log.debug("Task instance %s skipped. Don't rerun.", ti) ti_status.running.pop(key) continue elif ti.state == State.FAILED: self.log.error("Task instance %s failed", ti) ti_status.failed.add(key)
python
{ "resource": "" }
q266223
BackfillJob._manage_executor_state
test
def _manage_executor_state(self, running): """ Checks if the executor agrees with the state of task instances that are running :param running: dict of key, task to verify """ executor = self.executor for key, state in list(executor.get_event_buffer().items()): if key not in running: self.log.warning( "%s state %s not in running=%s",
python
{ "resource": "" }
q266224
BackfillJob._get_dag_run
test
def _get_dag_run(self, run_date, session=None): """ Returns a dag run for the given run date, which will be matched to an existing dag run if available or create a new dag run otherwise. If the max_active_runs limit is reached, this function will return None. :param run_date: the execution date for the dag run :type run_date: datetime.datetime :param session: the database session object :type session: sqlalchemy.orm.session.Session :return: a DagRun in state RUNNING or None """ run_id = BackfillJob.ID_FORMAT_PREFIX.format(run_date.isoformat()) # consider max_active_runs but ignore when running subdags respect_dag_max_active_limit = (True if (self.dag.schedule_interval and not self.dag.is_subdag) else False) current_active_dag_count = self.dag.get_num_active_runs(external_trigger=False) # check if we are scheduling on top of a already existing dag_run # we could find a "scheduled" run instead of a "backfill" run = DagRun.find(dag_id=self.dag.dag_id, execution_date=run_date, session=session) if run is not None and len(run) > 0:
python
{ "resource": "" }
q266225
BackfillJob._task_instances_for_dag_run
test
def _task_instances_for_dag_run(self, dag_run, session=None): """ Returns a map of task instance key to task instance object for the tasks to run in the given dag run. :param dag_run: the dag run to get the tasks from :type dag_run: airflow.models.DagRun :param session: the database session object :type session: sqlalchemy.orm.session.Session """ tasks_to_run = {} if dag_run is None: return tasks_to_run # check if we have orphaned tasks self.reset_state_for_orphaned_tasks(filter_by_dag_run=dag_run, session=session) # for some reason if we don't refresh the reference to run is lost dag_run.refresh_from_db()
python
{ "resource": "" }
q266226
BackfillJob._execute_for_run_dates
test
def _execute_for_run_dates(self, run_dates, ti_status, executor, pickle_id, start_date, session=None): """ Computes the dag runs and their respective task instances for the given run dates and executes the task instances. Returns a list of execution dates of the dag runs that were executed. :param run_dates: Execution dates for dag runs :type run_dates: list :param ti_status: internal BackfillJob status structure to tis track progress :type ti_status: BackfillJob._DagRunTaskStatus :param executor: the executor to use, it must be previously started :type executor: BaseExecutor :param pickle_id: numeric id of the pickled dag, None if not pickled :type pickle_id: int :param start_date: backfill start date :type start_date: datetime.datetime :param session: the current session object :type session: sqlalchemy.orm.session.Session """ for next_run_date in run_dates: dag_run = self._get_dag_run(next_run_date, session=session) tis_map = self._task_instances_for_dag_run(dag_run,
python
{ "resource": "" }
q266227
BackfillJob._set_unfinished_dag_runs_to_failed
test
def _set_unfinished_dag_runs_to_failed(self, dag_runs, session=None): """ Go through the dag_runs and update the state based on the task_instance state. Then set DAG runs that are not finished to failed.
python
{ "resource": "" }
q266228
BackfillJob._execute
test
def _execute(self, session=None): """ Initializes all components required to run a dag for a specified date range and calls helper method to execute the tasks. """ ti_status = BackfillJob._DagRunTaskStatus() start_date = self.bf_start_date # Get intervals between the start/end dates, which will turn into dag runs run_dates = self.dag.get_run_dates(start_date=start_date, end_date=self.bf_end_date) if self.run_backwards: tasks_that_depend_on_past = [t.task_id for t in self.dag.task_dict.values() if t.depends_on_past] if tasks_that_depend_on_past: raise AirflowException( 'You cannot backfill backwards because one or more tasks depend_on_past: {}'.format( ",".join(tasks_that_depend_on_past))) run_dates = run_dates[::-1] if len(run_dates) == 0: self.log.info("No run dates were found for the given dates and dag interval.") return # picklin' pickle_id = None if not self.donot_pickle and self.executor.__class__ not in ( executors.LocalExecutor, executors.SequentialExecutor): pickle = DagPickle(self.dag) session.add(pickle) session.commit() pickle_id = pickle.id executor = self.executor executor.start() ti_status.total_runs = len(run_dates) # total dag runs in backfill try: remaining_dates = ti_status.total_runs while remaining_dates > 0: dates_to_process = [run_date for run_date in run_dates if run_date not in ti_status.executed_dag_run_dates] self._execute_for_run_dates(run_dates=dates_to_process, ti_status=ti_status,
python
{ "resource": "" }
q266229
LocalTaskJob.heartbeat_callback
test
def heartbeat_callback(self, session=None): """Self destruct task if state has been moved away from running externally""" if self.terminating: # ensure termination if processes are created later self.task_runner.terminate() return self.task_instance.refresh_from_db() ti = self.task_instance fqdn = get_hostname() same_hostname = fqdn == ti.hostname same_process = ti.pid == os.getpid() if ti.state == State.RUNNING: if not same_hostname: self.log.warning("The recorded hostname %s " "does not match this instance's hostname " "%s", ti.hostname, fqdn) raise AirflowException("Hostname of job runner does not match") elif not same_process: current_pid = os.getpid()
python
{ "resource": "" }
q266230
CloudSpannerHook._get_client
test
def _get_client(self, project_id): """ Provides a client for interacting with the Cloud Spanner API. :param project_id: The ID of the GCP project. :type project_id: str :return: google.cloud.spanner_v1.client.Client :rtype: object """
python
{ "resource": "" }
q266231
CloudSpannerHook.get_instance
test
def get_instance(self, instance_id, project_id=None): """ Gets information about a particular instance. :param project_id: Optional, The ID of the GCP project that owns the Cloud Spanner database. If set to None or missing, the default project_id from the GCP connection is used. :type project_id: str :param instance_id: The ID of the Cloud Spanner instance.
python
{ "resource": "" }
q266232
CloudSpannerHook._apply_to_instance
test
def _apply_to_instance(self, project_id, instance_id, configuration_name, node_count, display_name, func): """ Invokes a method on a given instance by applying a specified Callable. :param project_id: The ID of the GCP project that owns the Cloud Spanner database. :type project_id: str :param instance_id: The ID of the instance. :type instance_id: str :param configuration_name: Name of the instance configuration defining how the instance will be created. Required for instances which do not yet exist. :type configuration_name: str :param node_count: (Optional) Number of nodes allocated to the instance. :type node_count: int :param display_name: (Optional) The display name for the instance in the Cloud Console UI. (Must be between 4 and 30 characters.) If this value is not set in the constructor, will fall back to the instance ID. :type display_name: str :param func: Method of the instance to be called. :type func: Callable """ # noinspection
python
{ "resource": "" }
q266233
CloudSpannerHook.create_instance
test
def create_instance(self, instance_id, configuration_name, node_count, display_name, project_id=None): """ Creates a new Cloud Spanner instance. :param instance_id: The ID of the Cloud Spanner instance. :type instance_id: str :param configuration_name: The name of the instance configuration defining how the instance will be created. Possible configuration values can be retrieved via https://cloud.google.com/spanner/docs/reference/rest/v1/projects.instanceConfigs/list :type configuration_name: str :param node_count: (Optional) The number of nodes allocated to the Cloud Spanner instance. :type node_count: int :param display_name: (Optional) The display name for the instance in the GCP
python
{ "resource": "" }
q266234
CloudSpannerHook.update_instance
test
def update_instance(self, instance_id, configuration_name, node_count, display_name, project_id=None): """ Updates an existing Cloud Spanner instance. :param instance_id: The ID of the Cloud Spanner instance. :type instance_id: str :param configuration_name: The name of the instance configuration defining how the instance will be created. Possible configuration values can be retrieved via https://cloud.google.com/spanner/docs/reference/rest/v1/projects.instanceConfigs/list :type configuration_name: str :param node_count: (Optional) The number of nodes allocated to the Cloud Spanner instance. :type node_count: int :param display_name: (Optional) The display name for the instance in the GCP
python
{ "resource": "" }
q266235
CloudSpannerHook.delete_instance
test
def delete_instance(self, instance_id, project_id=None): """ Deletes an existing Cloud Spanner instance. :param instance_id: The ID of the Cloud Spanner instance. :type instance_id: str :param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner
python
{ "resource": "" }
q266236
CloudSpannerHook.get_database
test
def get_database(self, instance_id, database_id, project_id=None): """ Retrieves a database in Cloud Spanner. If the database does not exist in the specified instance, it returns None. :param instance_id: The ID of the Cloud Spanner instance. :type instance_id: str :param database_id: The ID of the database in Cloud Spanner. :type database_id: str :param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner database. If set to None or missing, the default project_id from the GCP connection is used. :type project_id: str :return: Database object or None if database does not exist :rtype: google.cloud.spanner_v1.database.Database or None
python
{ "resource": "" }
q266237
CloudSpannerHook.create_database
test
def create_database(self, instance_id, database_id, ddl_statements, project_id=None): """ Creates a new database in Cloud Spanner. :type project_id: str :param instance_id: The ID of the Cloud Spanner instance. :type instance_id: str :param database_id: The ID of the database to create in Cloud Spanner. :type database_id: str :param ddl_statements: The string list containing DDL for the new database. :type ddl_statements: list[str] :param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner database. If set to None or missing, the default project_id from the GCP connection is used. :return: None """ instance = self._get_client(project_id=project_id).instance( instance_id=instance_id) if not instance.exists(): raise AirflowException("The instance {} does not exist in project {} !". format(instance_id, project_id))
python
{ "resource": "" }
q266238
CloudSpannerHook.update_database
test
def update_database(self, instance_id, database_id, ddl_statements, project_id=None, operation_id=None): """ Updates DDL of a database in Cloud Spanner. :type project_id: str :param instance_id: The ID of the Cloud Spanner instance. :type instance_id: str :param database_id: The ID of the database in Cloud Spanner. :type database_id: str :param ddl_statements: The string list containing DDL for the new database. :type ddl_statements: list[str] :param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner database. If set to None or missing, the default project_id from the GCP connection is used. :param operation_id: (Optional) The unique per database operation ID that can be specified to implement idempotency check. :type operation_id: str :return: None """ instance = self._get_client(project_id=project_id).instance( instance_id=instance_id) if not instance.exists(): raise AirflowException("The instance {} does not exist in project {} !".
python
{ "resource": "" }
q266239
CloudSpannerHook.delete_database
test
def delete_database(self, instance_id, database_id, project_id=None): """ Drops a database in Cloud Spanner. :type project_id: str :param instance_id: The ID of the Cloud Spanner instance. :type instance_id: str :param database_id: The ID of the database in Cloud Spanner. :type database_id: str :param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner database. If set to None or missing, the default project_id from the GCP connection is used. :return: True if everything succeeded :rtype: bool
python
{ "resource": "" }
q266240
ImapAttachmentSensor.poke
test
def poke(self, context): """ Pokes for a mail attachment on the mail server. :param context: The context that is being provided when poking. :type context: dict :return: True if attachment with the given name is present and False if not. :rtype: bool """ self.log.info('Poking for %s', self.attachment_name)
python
{ "resource": "" }
q266241
prepare_additional_parameters
test
def prepare_additional_parameters(additional_properties, language_hints, web_detection_params): """ Creates additional_properties parameter based on language_hints, web_detection_params and additional_properties parameters specified by the user """ if language_hints is None and web_detection_params is None: return additional_properties if additional_properties is None: return {} merged_additional_parameters = deepcopy(additional_properties) if 'image_context' not in merged_additional_parameters:
python
{ "resource": "" }
q266242
CassandraHook.get_conn
test
def get_conn(self): """ Returns a cassandra Session object """ if self.session and not self.session.is_shutdown: return self.session
python
{ "resource": "" }
q266243
CassandraHook.table_exists
test
def table_exists(self, table): """ Checks if a table exists in Cassandra :param table: Target Cassandra table. Use dot notation to target a specific keyspace. :type table: str """ keyspace = self.keyspace if '.' in table:
python
{ "resource": "" }
q266244
CassandraHook.record_exists
test
def record_exists(self, table, keys): """ Checks if a record exists in Cassandra :param table: Target Cassandra table. Use dot notation to target a specific keyspace. :type table: str :param keys: The keys and their values to check the existence. :type keys: dict """ keyspace = self.keyspace if '.' in table: keyspace, table = table.split('.', 1) ks = " AND ".join("{}=%({})s".format(key, key) for key in keys.keys())
python
{ "resource": "" }
q266245
SparkSubmitHook._build_track_driver_status_command
test
def _build_track_driver_status_command(self): """ Construct the command to poll the driver status. :return: full command to be executed """ connection_cmd = self._get_spark_binary_path() # The url ot the spark master connection_cmd += ["--master", self._connection['master']]
python
{ "resource": "" }
q266246
SparkSubmitHook.submit
test
def submit(self, application="", **kwargs): """ Remote Popen to execute the spark-submit job :param application: Submitted application, jar or py file :type application: str :param kwargs: extra arguments to Popen (see subprocess.Popen) """ spark_submit_cmd = self._build_spark_submit_command(application) if hasattr(self, '_env'): env = os.environ.copy() env.update(self._env) kwargs["env"] = env self._submit_sp = subprocess.Popen(spark_submit_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=-1, universal_newlines=True, **kwargs) self._process_spark_submit_log(iter(self._submit_sp.stdout.readline, '')) returncode = self._submit_sp.wait() # Check spark-submit return code. In Kubernetes mode, also check the value # of exit code in the log, as it may differ. if returncode or (self._is_kubernetes and self._spark_exit_code != 0): raise AirflowException( "Cannot execute: {}. Error code is: {}.".format( spark_submit_cmd, returncode )
python
{ "resource": "" }
q266247
SparkSubmitHook._process_spark_submit_log
test
def _process_spark_submit_log(self, itr): """ Processes the log files and extracts useful information out of it. If the deploy-mode is 'client', log the output of the submit command as those are the output logs of the Spark worker directly. Remark: If the driver needs to be tracked for its status, the log-level of the spark deploy needs to be at least INFO (log4j.logger.org.apache.spark.deploy=INFO) :param itr: An iterator which iterates over the input of the subprocess """ # Consume the iterator for line in itr: line = line.strip() # If we run yarn cluster mode, we want to extract the application id from # the logs so we can kill the application when we stop it unexpectedly if self._is_yarn and self._connection['deploy_mode'] == 'cluster': match = re.search('(application[0-9_]+)', line) if match: self._yarn_application_id = match.groups()[0] self.log.info("Identified spark driver id: %s", self._yarn_application_id) # If we run Kubernetes cluster mode, we want to extract the driver pod id # from the logs so we can kill the application when we stop it unexpectedly elif self._is_kubernetes: match = re.search(r'\s*pod name: ((.+?)-([a-z0-9]+)-driver)', line) if match: self._kubernetes_driver_pod = match.groups()[0] self.log.info("Identified spark driver pod: %s", self._kubernetes_driver_pod) # Store the Spark Exit code match_exit_code = re.search(r'\s*Exit code: (\d+)', line)
python
{ "resource": "" }
q266248
SparkSubmitHook._process_spark_status_log
test
def _process_spark_status_log(self, itr): """ parses the logs of the spark driver status query process :param itr: An iterator which iterates over the input of the subprocess """ # Consume the iterator for line in itr: line = line.strip() # Check if the log line is about the driver status and extract the status. if "driverState" in line:
python
{ "resource": "" }
q266249
get_task_runner
test
def get_task_runner(local_task_job): """ Get the task runner that can be used to run the given job. :param local_task_job: The LocalTaskJob associated with the TaskInstance that needs to be executed. :type local_task_job: airflow.jobs.LocalTaskJob :return: The task runner to use to run the task. :rtype: airflow.task.task_runner.base_task_runner.BaseTaskRunner """ if _TASK_RUNNER == "StandardTaskRunner": return StandardTaskRunner(local_task_job)
python
{ "resource": "" }
q266250
AWSBatchOperator._wait_for_task_ended
test
def _wait_for_task_ended(self): """ Try to use a waiter from the below pull request * https://github.com/boto/botocore/pull/1307 If the waiter is not available apply a exponential backoff * docs.aws.amazon.com/general/latest/gr/api-retries.html """ try: waiter = self.client.get_waiter('job_execution_complete') waiter.config.max_attempts = sys.maxsize # timeout is managed by airflow waiter.wait(jobs=[self.jobId]) except ValueError: # If waiter not available use expo retry = True
python
{ "resource": "" }
q266251
MySqlToGoogleCloudStorageOperator._query_mysql
test
def _query_mysql(self): """ Queries mysql and returns a cursor to the results. """
python
{ "resource": "" }
q266252
MySqlToGoogleCloudStorageOperator._configure_csv_file
test
def _configure_csv_file(self, file_handle, schema): """Configure a csv writer with the file_handle and write schema as headers for the new file. """ csv_writer = csv.writer(file_handle, encoding='utf-8',
python
{ "resource": "" }
q266253
MySqlToGoogleCloudStorageOperator._write_local_schema_file
test
def _write_local_schema_file(self, cursor): """ Takes a cursor, and writes the BigQuery schema in .json format for the results to a local file system. :return: A dictionary where key is a filename to be used as an object name in GCS, and values are file handles to local files that contains the BigQuery schema fields in .json format. """ schema_str = None schema_file_mime_type = 'application/json' tmp_schema_file_handle = NamedTemporaryFile(delete=True) if self.schema is not None and isinstance(self.schema, string_types): schema_str = self.schema.encode('utf-8') elif self.schema is not None and isinstance(self.schema, list): schema_str = json.dumps(self.schema).encode('utf-8') else: schema = [] for field in cursor.description: # See PEP 249 for details about the description tuple. field_name = field[0] field_type = self.type_map(field[1]) # Always allow TIMESTAMP to be nullable. MySQLdb returns None types # for required fields because some MySQL timestamps can't be
python
{ "resource": "" }
q266254
MySqlToGoogleCloudStorageOperator._get_col_type_dict
test
def _get_col_type_dict(self): """ Return a dict of column name and column type based on self.schema if not None. """ schema = [] if isinstance(self.schema, string_types): schema = json.loads(self.schema) elif isinstance(self.schema, list): schema = self.schema elif self.schema is not None: self.log.warn('Using default schema due to unexpected type.' 'Should be a string or list.') col_type_dict = {}
python
{ "resource": "" }
q266255
MySqlToGoogleCloudStorageOperator.type_map
test
def type_map(cls, mysql_type): """ Helper function that maps from MySQL fields to BigQuery fields. Used when a schema_filename is set. """ d = { FIELD_TYPE.INT24: 'INTEGER', FIELD_TYPE.TINY: 'INTEGER', FIELD_TYPE.BIT: 'INTEGER', FIELD_TYPE.DATETIME: 'TIMESTAMP', FIELD_TYPE.DATE: 'TIMESTAMP', FIELD_TYPE.DECIMAL: 'FLOAT', FIELD_TYPE.NEWDECIMAL: 'FLOAT', FIELD_TYPE.DOUBLE: 'FLOAT', FIELD_TYPE.FLOAT: 'FLOAT',
python
{ "resource": "" }
q266256
SqoopOperator.execute
test
def execute(self, context): """ Execute sqoop job """ self.hook = SqoopHook( conn_id=self.conn_id, verbose=self.verbose, num_mappers=self.num_mappers, hcatalog_database=self.hcatalog_database, hcatalog_table=self.hcatalog_table, properties=self.properties ) if self.cmd_type == 'export': self.hook.export_table( table=self.table, export_dir=self.export_dir, input_null_string=self.input_null_string, input_null_non_string=self.input_null_non_string, staging_table=self.staging_table, clear_staging_table=self.clear_staging_table, enclosed_by=self.enclosed_by, escaped_by=self.escaped_by, input_fields_terminated_by=self.input_fields_terminated_by, input_lines_terminated_by=self.input_lines_terminated_by, input_optionally_enclosed_by=self.input_optionally_enclosed_by, batch=self.batch, relaxed_isolation=self.relaxed_isolation, extra_export_options=self.extra_export_options) elif self.cmd_type == 'import': # add create hcatalog table to extra import options if option passed # if new params are added to constructor can pass them in here # so don't modify sqoop_hook for each param if self.create_hcatalog_table: self.extra_import_options['create-hcatalog-table'] = '' if self.table and self.query: raise AirflowException( 'Cannot specify query and table together. Need to specify either or.' ) if self.table: self.hook.import_table( table=self.table, target_dir=self.target_dir, append=self.append,
python
{ "resource": "" }
q266257
apply_lineage
test
def apply_lineage(func): """ Saves the lineage to XCom and if configured to do so sends it to the backend. """ backend = _get_backend() @wraps(func) def wrapper(self, context, *args, **kwargs): self.log.debug("Backend: %s, Lineage called with inlets: %s, outlets: %s", backend, self.inlets, self.outlets) ret_val = func(self, context, *args, **kwargs) outlets = [x.as_dict() for x in self.outlets] inlets = [x.as_dict() for x in self.inlets] if len(self.outlets) > 0: self.xcom_push(context, key=PIPELINE_OUTLETS, value=outlets, execution_date=context['ti'].execution_date) if len(self.inlets) > 0:
python
{ "resource": "" }
q266258
Connection.extra_dejson
test
def extra_dejson(self): """Returns the extra property by deserializing json.""" obj = {} if self.extra: try: obj = json.loads(self.extra) except Exception as e:
python
{ "resource": "" }
q266259
date_range
test
def date_range(start_date, end_date=None, num=None, delta=None): """ Get a set of dates as a list based on a start, end and delta, delta can be something that can be added to `datetime.datetime` or a cron expression as a `str` :Example:: date_range(datetime(2016, 1, 1), datetime(2016, 1, 3), delta=timedelta(1)) [datetime.datetime(2016, 1, 1, 0, 0), datetime.datetime(2016, 1, 2, 0, 0), datetime.datetime(2016, 1, 3, 0, 0)] date_range(datetime(2016, 1, 1), datetime(2016, 1, 3), delta='0 0 * * *') [datetime.datetime(2016, 1, 1, 0, 0), datetime.datetime(2016, 1, 2, 0, 0), datetime.datetime(2016, 1, 3, 0, 0)] date_range(datetime(2016, 1, 1), datetime(2016, 3, 3), delta="0 0 0 * *") [datetime.datetime(2016, 1, 1, 0, 0), datetime.datetime(2016, 2, 1, 0, 0), datetime.datetime(2016, 3, 1, 0, 0)] :param start_date: anchor date to start the series from :type start_date: datetime.datetime :param end_date: right boundary for the date range :type end_date: datetime.datetime :param num: alternatively to end_date, you can specify the number of number of entries you want in the range. This number can be negative, output will always be sorted regardless :type num: int """ if not delta: return [] if end_date and start_date > end_date: raise Exception("Wait. start_date needs to be before end_date") if end_date and num: raise Exception("Wait. Either specify end_date OR num") if not end_date and not num: end_date = timezone.utcnow() delta_iscron = False tz = start_date.tzinfo if isinstance(delta, six.string_types):
python
{ "resource": "" }
q266260
scale_time_units
test
def scale_time_units(time_seconds_arr, unit): """ Convert an array of time durations in seconds to the specified time unit. """ if unit == 'minutes': return list(map(lambda x: x
python
{ "resource": "" }
q266261
days_ago
test
def days_ago(n, hour=0, minute=0, second=0, microsecond=0): """ Get a datetime object representing `n` days ago. By default the time is set to midnight. """ today = timezone.utcnow().replace( hour=hour,
python
{ "resource": "" }
q266262
AirflowSecurityManager.init_role
test
def init_role(self, role_name, role_vms, role_perms): """ Initialize the role with the permissions and related view-menus. :param role_name: :param role_vms: :param role_perms: :return: """ pvms = self.get_session.query(sqla_models.PermissionView).all() pvms = [p for p in pvms if p.permission and p.view_menu] role = self.find_role(role_name) if not role: role = self.add_role(role_name) if len(role.permissions) == 0: self.log.info('Initializing permissions for role:%s in the database.', role_name) role_pvms = set()
python
{ "resource": "" }
q266263
AirflowSecurityManager.delete_role
test
def delete_role(self, role_name): """Delete the given Role :param role_name: the name of a role in the ab_role table """ session = self.get_session role = session.query(sqla_models.Role)\ .filter(sqla_models.Role.name == role_name)\ .first() if role: self.log.info("Deleting role '%s'", role_name)
python
{ "resource": "" }
q266264
AirflowSecurityManager.get_user_roles
test
def get_user_roles(self, user=None): """ Get all the roles associated with the user. :param user: the ab_user in FAB model. :return: a list of roles associated with the user. """ if user is None: user = g.user
python
{ "resource": "" }
q266265
AirflowSecurityManager.get_all_permissions_views
test
def get_all_permissions_views(self): """ Returns a set of tuples with the perm name and view menu name """ perms_views = set() for role in self.get_user_roles():
python
{ "resource": "" }
q266266
AirflowSecurityManager._has_role
test
def _has_role(self, role_name_or_list): """ Whether the user has this role name """ if not isinstance(role_name_or_list, list): role_name_or_list = [role_name_or_list]
python
{ "resource": "" }
q266267
AirflowSecurityManager._has_perm
test
def _has_perm(self, permission_name, view_menu_name): """ Whether the user has this perm """ if hasattr(self, 'perms'): if (permission_name, view_menu_name) in self.perms: return True
python
{ "resource": "" }
q266268
AirflowSecurityManager.clean_perms
test
def clean_perms(self): """ FAB leaves faulty permissions that need to be cleaned up """ self.log.debug('Cleaning faulty perms') sesh = self.get_session pvms = ( sesh.query(sqla_models.PermissionView) .filter(or_( sqla_models.PermissionView.permission == None, # NOQA
python
{ "resource": "" }
q266269
AirflowSecurityManager._merge_perm
test
def _merge_perm(self, permission_name, view_menu_name): """ Add the new permission , view_menu to ab_permission_view_role if not exists. It will add the related entry to ab_permission and ab_view_menu two meta tables as well. :param permission_name: Name of the permission. :type permission_name: str :param view_menu_name: Name of the view-menu :type view_menu_name: str :return: """ permission = self.find_permission(permission_name) view_menu = self.find_view_menu(view_menu_name) pv = None
python
{ "resource": "" }
q266270
AirflowSecurityManager.update_admin_perm_view
test
def update_admin_perm_view(self): """ Admin should have all the permission-views. Add the missing ones to the table for admin. :return: None. """
python
{ "resource": "" }
q266271
AirflowSecurityManager._sync_dag_view_permissions
test
def _sync_dag_view_permissions(self, dag_id, access_control): """Set the access policy on the given DAG's ViewModel. :param dag_id: the ID of the DAG whose permissions should be updated :type dag_id: string :param access_control: a dict where each key is a rolename and each value is a set() of permission names (e.g., {'can_dag_read'} :type access_control: dict """ def _get_or_create_dag_permission(perm_name): dag_perm = self.find_permission_view_menu(perm_name, dag_id) if not dag_perm: self.log.info( "Creating new permission '%s' on view '%s'", perm_name, dag_id ) dag_perm = self.add_permission_view_menu(perm_name, dag_id) return dag_perm def _revoke_stale_permissions(dag_view): existing_dag_perms = self.find_permissions_view_menu(dag_view) for perm in existing_dag_perms: non_admin_roles = [role for role in perm.role if role.name != 'Admin'] for role in non_admin_roles: target_perms_for_role = access_control.get(role.name, {}) if perm.permission.name not in target_perms_for_role: self.log.info( "Revoking '%s' on DAG '%s' for role '%s'", perm.permission, dag_id, role.name ) self.del_permission_role(role, perm) dag_view = self.find_view_menu(dag_id) if dag_view: _revoke_stale_permissions(dag_view) for rolename, perms in access_control.items():
python
{ "resource": "" }
q266272
AirflowSecurityManager.create_perm_vm_for_all_dag
test
def create_perm_vm_for_all_dag(self): """ Create perm-vm if not exist and insert into FAB security model for all-dags. """ # create perm for global logical dag for dag_vm in self.DAG_VMS: for perm
python
{ "resource": "" }
q266273
get_fernet
test
def get_fernet(): """ Deferred load of Fernet key. This function could fail either because Cryptography is not installed or because the Fernet key is invalid. :return: Fernet object :raises: airflow.exceptions.AirflowException if there's a problem trying to load Fernet """ global _fernet log = LoggingMixin().log if _fernet: return _fernet try: from cryptography.fernet import Fernet, MultiFernet, InvalidToken global InvalidFernetToken InvalidFernetToken = InvalidToken except BuiltinImportError:
python
{ "resource": "" }
q266274
AwsGlueCatalogPartitionSensor.poke
test
def poke(self, context): """ Checks for existence of the partition in the AWS Glue Catalog table """ if '.' in self.table_name: self.database_name, self.table_name = self.table_name.split('.')
python
{ "resource": "" }
q266275
AwsGlueCatalogPartitionSensor.get_hook
test
def get_hook(self): """ Gets the AwsGlueCatalogHook """ if not hasattr(self, 'hook'): from airflow.contrib.hooks.aws_glue_catalog_hook import AwsGlueCatalogHook self.hook = AwsGlueCatalogHook(
python
{ "resource": "" }
q266276
SQSSensor.poke
test
def poke(self, context): """ Check for message on subscribed queue and write to xcom the message with key ``messages`` :param context: the context object :type context: dict :return: ``True`` if message is available or ``False`` """ sqs_hook = SQSHook(aws_conn_id=self.aws_conn_id) sqs_conn = sqs_hook.get_conn() self.log.info('SQSSensor checking for message on queue: %s', self.sqs_queue) messages = sqs_conn.receive_message(QueueUrl=self.sqs_queue, MaxNumberOfMessages=self.max_messages,
python
{ "resource": "" }
q266277
HDFSHook.get_conn
test
def get_conn(self): """ Returns a snakebite HDFSClient object. """ # When using HAClient, proxy_user must be the same, so is ok to always # take the first. effective_user = self.proxy_user autoconfig = self.autoconfig use_sasl = configuration.conf.get('core', 'security') == 'kerberos' try: connections = self.get_connections(self.hdfs_conn_id) if not effective_user: effective_user = connections[0].login if not autoconfig: autoconfig = connections[0].extra_dejson.get('autoconfig', False) hdfs_namenode_principal = connections[0].extra_dejson.get( 'hdfs_namenode_principal') except AirflowException: if not autoconfig: raise if autoconfig: # will read config info from $HADOOP_HOME conf files client = AutoConfigClient(effective_user=effective_user, use_sasl=use_sasl)
python
{ "resource": "" }
q266278
WebHDFSHook.get_conn
test
def get_conn(self): """ Establishes a connection depending on the security mode set via config or environment variable. :return: a hdfscli InsecureClient or KerberosClient object. :rtype: hdfs.InsecureClient or hdfs.ext.kerberos.KerberosClient """ connections = self.get_connections(self.webhdfs_conn_id) for connection in connections: try: self.log.debug('Trying namenode %s', connection.host) client = self._get_client(connection) client.status('/') self.log.debug('Using namenode %s for hook', connection.host)
python
{ "resource": "" }
q266279
WebHDFSHook.check_for_path
test
def check_for_path(self, hdfs_path): """ Check for the existence of a path in HDFS by querying FileStatus. :param hdfs_path: The path to check. :type hdfs_path: str :return: True if the path exists and False if not.
python
{ "resource": "" }
q266280
WebHDFSHook.load_file
test
def load_file(self, source, destination, overwrite=True, parallelism=1, **kwargs): r""" Uploads a file to HDFS. :param source: Local path to file or folder. If it's a folder, all the files inside of it will be uploaded. .. note:: This implies that folders empty of files will not be created remotely. :type source: str :param destination: PTarget HDFS path. If it already exists and is a directory, files will be uploaded inside. :type destination: str :param overwrite: Overwrite any existing file or directory. :type overwrite: bool :param parallelism: Number of threads to use for parallelization. A value of `0`
python
{ "resource": "" }
q266281
PinotDbApiHook.get_conn
test
def get_conn(self): """ Establish a connection to pinot broker through pinot dbqpi. """ conn = self.get_connection(self.pinot_broker_conn_id) pinot_broker_conn = connect( host=conn.host, port=conn.port, path=conn.extra_dejson.get('endpoint', '/pql'),
python
{ "resource": "" }
q266282
PinotDbApiHook.get_uri
test
def get_uri(self): """ Get the connection uri for pinot broker. e.g: http://localhost:9000/pql """ conn = self.get_connection(getattr(self, self.conn_name_attr)) host = conn.host if conn.port is not None: host += ':{port}'.format(port=conn.port)
python
{ "resource": "" }
q266283
TransferJobPreprocessor._convert_date_to_dict
test
def _convert_date_to_dict(field_date): """ Convert native python ``datetime.date`` object to a format supported by the API """
python
{ "resource": "" }
q266284
TransferJobPreprocessor._convert_time_to_dict
test
def _convert_time_to_dict(time): """ Convert native python ``datetime.time`` object to a format supported by the API """
python
{ "resource": "" }
q266285
RedisHook.get_conn
test
def get_conn(self): """ Returns a Redis connection. """ conn = self.get_connection(self.redis_conn_id) self.host = conn.host self.port = conn.port self.password = None if str(conn.password).lower() in ['none', 'false', ''] else conn.password self.db = conn.extra_dejson.get('db', None) if not self.redis: self.log.debug( 'Initializing redis object for conn_id "%s" on %s:%s:%s',
python
{ "resource": "" }
q266286
DbApiHook.get_pandas_df
test
def get_pandas_df(self, sql, parameters=None): """ Executes the sql and returns a pandas dataframe :param sql: the sql statement to be executed (str) or a list of sql statements to execute :type sql: str or list :param parameters: The parameters to render the SQL query with.
python
{ "resource": "" }
q266287
DbApiHook.run
test
def run(self, sql, autocommit=False, parameters=None): """ Runs a command or a list of commands. Pass a list of sql statements to the sql parameter to get them to execute sequentially :param sql: the sql statement to be executed (str) or a list of sql statements to execute :type sql: str or list :param autocommit: What to set the connection's autocommit setting to before executing the query. :type autocommit: bool :param parameters: The parameters to render the SQL query with. :type parameters: mapping or iterable """ if isinstance(sql, basestring): sql = [sql] with closing(self.get_conn()) as conn: if self.supports_autocommit: self.set_autocommit(conn, autocommit) with closing(conn.cursor()) as cur: for s in sql:
python
{ "resource": "" }
q266288
DbApiHook.set_autocommit
test
def set_autocommit(self, conn, autocommit): """ Sets the autocommit flag on the connection """ if not self.supports_autocommit and autocommit: self.log.warn( ("%s connection doesn't support "
python
{ "resource": "" }
q266289
DbApiHook.insert_rows
test
def insert_rows(self, table, rows, target_fields=None, commit_every=1000, replace=False): """ A generic way to insert a set of tuples into a table, a new transaction is created every commit_every rows :param table: Name of the target table :type table: str :param rows: The rows to insert into the table :type rows: iterable of tuples :param target_fields: The names of the columns to fill in the table :type target_fields: iterable of strings :param commit_every: The maximum number of rows to insert in one transaction. Set to 0 to insert all rows in one transaction. :type commit_every: int :param replace: Whether to replace instead of insert :type replace: bool """ if target_fields: target_fields = ", ".join(target_fields) target_fields = "({})".format(target_fields) else: target_fields = '' i = 0 with closing(self.get_conn()) as conn: if self.supports_autocommit: self.set_autocommit(conn, False) conn.commit() with closing(conn.cursor()) as cur: for i, row in enumerate(rows, 1): lst = [] for cell in row: lst.append(self._serialize_cell(cell, conn)) values = tuple(lst) placeholders = ["%s", ] * len(values) if not replace:
python
{ "resource": "" }
q266290
DbApiHook._serialize_cell
test
def _serialize_cell(cell, conn=None): """ Returns the SQL literal of the cell as a string. :param cell: The cell to insert into the table :type cell: object :param conn: The database connection :type conn: connection object :return: The serialized cell
python
{ "resource": "" }
q266291
Airflow.health
test
def health(self, session=None): """ An endpoint helping check the health status of the Airflow instance, including metadatabase and scheduler. """ BJ = jobs.BaseJob payload = {} scheduler_health_check_threshold = timedelta(seconds=conf.getint('scheduler', 'scheduler_health_check_threshold' )) latest_scheduler_heartbeat = None payload['metadatabase'] = {'status': 'healthy'} try: latest_scheduler_heartbeat = session.query(func.max(BJ.latest_heartbeat)).\ filter(BJ.state == 'running', BJ.job_type == 'SchedulerJob').\ scalar()
python
{ "resource": "" }
q266292
Airflow.extra_links
test
def extra_links(self): """ A restful endpoint that returns external links for a given Operator It queries the operator that sent the request for the links it wishes to provide for a given external link name. API: GET Args: dag_id: The id of the dag containing the task in question task_id: The id of the task in question execution_date: The date of execution of the task link_name: The name of the link reference to find the actual URL for Returns: 200: {url: <url of link>, error: None} - returned when there was no problem finding the URL 404: {url: None, error: <error message>} - returned when the operator does not return a URL """ dag_id = request.args.get('dag_id') task_id = request.args.get('task_id') execution_date = request.args.get('execution_date') link_name = request.args.get('link_name') dttm = airflow.utils.timezone.parse(execution_date) dag = dagbag.get_dag(dag_id) if not dag or task_id not in dag.task_ids: response = jsonify( {'url': None, 'error': "can't find dag {dag} or task_id {task_id}".format(
python
{ "resource": "" }
q266293
CloudantHook.get_conn
test
def get_conn(self): """ Opens a connection to the cloudant service and closes it automatically if used as context manager. .. note:: In the connection form: - 'host' equals the 'Account' (optional) - 'login' equals the 'Username (or API Key)' (required)
python
{ "resource": "" }
q266294
SlackWebhookOperator.execute
test
def execute(self, context): """ Call the SlackWebhookHook to post the provided Slack message """ self.hook = SlackWebhookHook( self.http_conn_id,
python
{ "resource": "" }
q266295
GoogleCloudBaseHook._get_credentials
test
def _get_credentials(self): """ Returns the Credentials object for Google API """ key_path = self._get_field('key_path', False) keyfile_dict = self._get_field('keyfile_dict', False) scope = self._get_field('scope', None) if scope: scopes = [s.strip() for s in scope.split(',')] else: scopes = _DEFAULT_SCOPES if not key_path and not keyfile_dict: self.log.info('Getting connection using `google.auth.default()` ' 'since no key file is defined for hook.') credentials, _ = google.auth.default(scopes=scopes) elif key_path: # Get credentials from a JSON file. if key_path.endswith('.json'): self.log.debug('Getting connection using JSON key file %s' % key_path) credentials = (
python
{ "resource": "" }
q266296
GoogleCloudBaseHook._authorize
test
def _authorize(self): """ Returns an authorized HTTP object to be used to build a Google cloud service hook connection. """ credentials = self._get_credentials() http = httplib2.Http()
python
{ "resource": "" }
q266297
GoogleCloudBaseHook.catch_http_exception
test
def catch_http_exception(func): """ Function decorator that intercepts HTTP Errors and raises AirflowException with more informative message. """ @functools.wraps(func) def wrapper_decorator(self, *args, **kwargs): try: return func(self, *args, **kwargs) except GoogleAPICallError as e: if isinstance(e, AlreadyExists): raise e
python
{ "resource": "" }
q266298
GoogleCloudBaseHook.fallback_to_default_project_id
test
def fallback_to_default_project_id(func): """ Decorator that provides fallback for Google Cloud Platform project id. If the project is None it will be replaced with the project_id from the service account the Hook is authenticated with. Project id can be specified either via project_id kwarg or via first parameter in positional args. :param func: function to wrap :return: result of the function call """ @functools.wraps(func) def inner_wrapper(self, *args, **kwargs): if len(args) > 0: raise AirflowException( "You must use keyword arguments in this methods rather than" " positional") if 'project_id' in kwargs: kwargs['project_id'] = self._get_project_id(kwargs['project_id']) else:
python
{ "resource": "" }
q266299
State.unfinished
test
def unfinished(cls): """ A list of states indicating that a task either has not completed a run or has not even started.
python
{ "resource": "" }