_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
31
13.1k
language
stringclasses
1 value
meta_information
dict
q257200
_Pipelines.build_pipeline_args
validation
def build_pipeline_args(cls, project, script, job_params, task_params, reserved_labels, preemptible, logging_uri, scopes, keep_alive): """Builds pipeline args for execution. Args: project: string name of project. script: Body of the script to execute. job_params: dictionary of values for labels, envs, inputs, and outputs for this job. task_params: dictionary of values for labels, envs, inputs, and outputs for this task. reserved_labels: dictionary of reserved labels (e.g. task-id, task-attempt) preemptible: use a preemptible VM for the job logging_uri: path for job logging output. scopes: list of scope. keep_alive: Seconds to keep VM alive on failure Returns: A nested dictionary with one entry under the key pipelineArgs containing the pipeline arguments. """ # For the Pipelines API, envs and file inputs are all "inputs". inputs = {} inputs.update({SCRIPT_VARNAME: script}) inputs.update({ var.name: var.value for var in job_params['envs'] | task_params['envs'] if var.value }) inputs.update({ var.name: var.uri for var in job_params['inputs'] | task_params['inputs'] if not var.recursive and var.value }) # Remove wildcard references for non-recursive output. When the pipelines # controller generates a delocalize call, it must point to a bare directory # for patterns. The output param OUTFILE=gs://bucket/path/*.bam should # delocalize with a call similar to: # gsutil cp /mnt/data/output/gs/bucket/path/*.bam gs://bucket/path/ outputs = {} for var in job_params['outputs'] | task_params['outputs']:
python
{ "resource": "" }
q257201
_Operations._datetime_to_utc_int
validation
def _datetime_to_utc_int(date): """Convert the integer UTC time value into a local datetime.""" if date is None: return None # Convert localized datetime to a UTC integer
python
{ "resource": "" }
q257202
GoogleJobProvider._build_pipeline_request
validation
def _build_pipeline_request(self, task_view): """Returns a Pipeline objects for the job.""" job_metadata = task_view.job_metadata job_params = task_view.job_params job_resources = task_view.job_resources task_metadata = task_view.task_descriptors[0].task_metadata task_params = task_view.task_descriptors[0].task_params task_resources = task_view.task_descriptors[0].task_resources script = task_view.job_metadata['script'] reserved_labels = google_base.build_pipeline_labels( job_metadata, task_metadata, task_id_pattern='task-%d') # Build the ephemeralPipeline for this job. # The ephemeralPipeline definition changes for each job because file # parameters localCopy.path changes based on the remote_uri. pipeline = _Pipelines.build_pipeline(
python
{ "resource": "" }
q257203
GoogleJobProvider.delete_jobs
validation
def delete_jobs(self, user_ids, job_ids, task_ids, labels, create_time_min=None, create_time_max=None): """Kills the operations associated with the specified job or job.task. Args: user_ids: List of user ids who "own" the job(s) to cancel. job_ids: List of job_ids to cancel. task_ids: List of task-ids to cancel. labels: List of LabelParam, each must match the job(s) to be canceled. create_time_min: a timezone-aware datetime value for the earliest create
python
{ "resource": "" }
q257204
GoogleOperation._operation_status_message
validation
def _operation_status_message(self): """Returns the most relevant status string and last updated date string. This string is meant for display only. Returns: A printable status string and date string. """ metadata = self._op['metadata'] if not self._op['done']: if 'events' in metadata and metadata['events']: # Get the last event last_event = metadata['events'][-1] msg = last_event['description'] ds = last_event['startTime'] else: msg = 'Pending'
python
{ "resource": "" }
q257205
GoogleOperation._get_operation_input_field_values
validation
def _get_operation_input_field_values(self, metadata, file_input): """Returns a dictionary of envs or file inputs for an operation. Args: metadata: operation metadata field file_input: True to return a dict of file inputs, False to return envs. Returns: A dictionary of input field name value pairs """ # To determine input parameter type, we iterate through the # pipeline inputParameters. # The values come from the pipelineArgs inputs. input_args = metadata['request']['ephemeralPipeline']['inputParameters']
python
{ "resource": "" }
q257206
_format_task_name
validation
def _format_task_name(job_id, task_id, task_attempt): """Create a task name from a job-id, task-id, and task-attempt. Task names are used internally by dsub as well as by the docker task runner. The name is formatted as "<job-id>.<task-id>[.task-attempt]". Task names follow formatting conventions allowing them to be safely used as a docker name.
python
{ "resource": "" }
q257207
_convert_suffix_to_docker_chars
validation
def _convert_suffix_to_docker_chars(suffix): """Rewrite string so that all characters are valid in a docker name suffix.""" # Docker container names must match: [a-zA-Z0-9][a-zA-Z0-9_.-]
python
{ "resource": "" }
q257208
_task_sort_function
validation
def _task_sort_function(task): """Return a tuple for sorting 'most recent first'.""" return (task.get_field('create-time'),
python
{ "resource": "" }
q257209
LocalJobProvider._datetime_in_range
validation
def _datetime_in_range(self, dt, dt_min=None, dt_max=None): """Determine if the provided time is within the range, inclusive.""" # The pipelines API stores operation create-time with second granularity. # We mimic this behavior in the local provider by truncating to seconds. dt = dt.replace(microsecond=0) if dt_min: dt_min = dt_min.replace(microsecond=0) else:
python
{ "resource": "" }
q257210
LocalJobProvider._get_task_from_task_dir
validation
def _get_task_from_task_dir(self, job_id, user_id, task_id, task_attempt): """Return a Task object with this task's info.""" # We need to be very careful about how we read and interpret the contents # of the task directory. The directory could be changing because a new # task is being created. The directory could be changing because a task # is ending. # # If the meta.yaml does not exist, the task does not yet exist. # If the meta.yaml exists, it means the task is scheduled. It does not mean # it is yet running. # If the task.pid file exists, it means that the runner.sh was started. task_dir = self._task_directory(job_id, task_id, task_attempt) job_descriptor = self._read_task_metadata(task_dir) if not job_descriptor: return None # If we read up an old task, the user-id will not be in the job_descriptor. if not job_descriptor.job_metadata.get('user-id'): job_descriptor.job_metadata['user-id'] = user_id # Get the pid of the runner pid = -1 try: with open(os.path.join(task_dir, 'task.pid'), 'r') as f: pid = int(f.readline().strip()) except (IOError, OSError): pass # Get the script contents script = None script_name = job_descriptor.job_metadata.get('script-name') if script_name: script = self._read_script(task_dir, script_name) # Read the files written by the runner.sh. # For new tasks, these may not have been written yet.
python
{ "resource": "" }
q257211
LocalJobProvider._delocalize_logging_command
validation
def _delocalize_logging_command(self, logging_path, user_project): """Returns a command to delocalize logs. Args: logging_path: location of log files. user_project: name of the project to be billed for the request. Returns: eg. 'gs://bucket/path/myfile' or 'gs://bucket/script-foobar-12' """ # Get the logging prefix (everything up to ".log") logging_prefix = os.path.splitext(logging_path.uri)[0] # Set the provider-specific mkdir and file copy commands if logging_path.file_provider == job_model.P_LOCAL: mkdir_cmd = 'mkdir -p "%s"\n' % os.path.dirname(logging_prefix) cp_cmd = 'cp' elif logging_path.file_provider == job_model.P_GCS: mkdir_cmd = '' if user_project: cp_cmd = 'gsutil -u {} -mq cp'.format(user_project) else:
python
{ "resource": "" }
q257212
LocalJobProvider._task_directory
validation
def _task_directory(self, job_id, task_id, task_attempt): """The local dir for staging files for that particular task.""" dir_name = 'task' if task_id is None else str(task_id) if task_attempt:
python
{ "resource": "" }
q257213
LocalJobProvider._make_environment
validation
def _make_environment(self, inputs, outputs, mounts): """Return a dictionary of environment variables for the container.""" env = {} env.update(providers_util.get_file_environment_variables(inputs))
python
{ "resource": "" }
q257214
LocalJobProvider._localize_inputs_recursive_command
validation
def _localize_inputs_recursive_command(self, task_dir, inputs): """Returns a command that will stage recursive inputs.""" data_dir = os.path.join(task_dir, _DATA_SUBDIR) provider_commands = [ providers_util.build_recursive_localize_command(data_dir, inputs,
python
{ "resource": "" }
q257215
LocalJobProvider._get_input_target_path
validation
def _get_input_target_path(self, local_file_path): """Returns a directory or file path to be the target for "gsutil cp". If the filename contains a wildcard, then the target path must be a directory in order to ensure consistency whether the source pattern contains one or multiple files. Args: local_file_path: A full path terminating in a file or a
python
{ "resource": "" }
q257216
LocalJobProvider._localize_inputs_command
validation
def _localize_inputs_command(self, task_dir, inputs, user_project): """Returns a command that will stage inputs.""" commands = [] for i in inputs: if i.recursive or not i.value: continue source_file_path = i.uri local_file_path = task_dir + '/' + _DATA_SUBDIR + '/' + i.docker_path dest_file_path = self._get_input_target_path(local_file_path) commands.append('mkdir -p "%s"' % os.path.dirname(local_file_path)) if i.file_provider in [job_model.P_LOCAL, job_model.P_GCS]: # The semantics that we expect here are implemented consistently in # "gsutil cp", and are a bit different than "cp" when it comes to # wildcard handling, so use it for both local and GCS: #
python
{ "resource": "" }
q257217
LocalJobProvider._delocalize_outputs_commands
validation
def _delocalize_outputs_commands(self, task_dir, outputs, user_project): """Copy outputs from local disk to GCS.""" commands = [] for o in outputs: if o.recursive or not o.value: continue # The destination path is o.uri.path, which is the target directory # (rather than o.uri, which includes the filename or wildcard). dest_path = o.uri.path local_path = task_dir + '/' + _DATA_SUBDIR
python
{ "resource": "" }
q257218
get_dsub_version
validation
def get_dsub_version(): """Get the dsub version out of the _dsub_version.py source file. Setup.py should not import dsub version from dsub directly since ambiguity in import order could lead to an old version of dsub setting the version number. Parsing the file directly is simpler than using import tools (whose interface varies between python 2.7, 3.4, and 3.5). Returns: string of dsub version. Raises: ValueError: if the version is not found. """
python
{ "resource": "" }
q257219
GoogleV2EventMap.get_filtered_normalized_events
validation
def get_filtered_normalized_events(self): """Filter the granular v2 events down to events of interest. Filter through the large number of granular events returned by the pipelines API, and extract only those that are interesting to a user. This is implemented by filtering out events which are known to be uninteresting (i.e. the default actions run for every job) and by explicitly matching specific events which are interesting and mapping those to v1 style naming. Events which are not whitelisted or blacklisted will still be output, meaning any events which are added in the future won't be masked. We don't want to suppress display of events that we don't recognize. They may be important. Returns: A list of maps containing the normalized, filtered events. """ # Need the user-image to look for the right "pulling image" event user_image = google_v2_operations.get_action_image(self._op, _ACTION_USER_COMMAND) # Only create an "ok" event for operations with SUCCESS status. need_ok = google_v2_operations.is_success(self._op) # Events are keyed by name for easier deletion.
python
{ "resource": "" }
q257220
GoogleV2EventMap._map
validation
def _map(self, event): """Extract elements from an operation event and map to a named event.""" description = event.get('description', '') start_time = google_base.parse_rfc3339_utc_string(
python
{ "resource": "" }
q257221
GoogleV2JobProvider._get_logging_env
validation
def _get_logging_env(self, logging_uri, user_project): """Returns the environment for actions that copy logging files.""" if not logging_uri.endswith('.log'): raise ValueError('Logging URI must end in ".log": {}'.format(logging_uri))
python
{ "resource": "" }
q257222
GoogleV2JobProvider._get_prepare_env
validation
def _get_prepare_env(self, script, job_descriptor, inputs, outputs, mounts): """Return a dict with variables for the 'prepare' action.""" # Add the _SCRIPT_REPR with the repr(script) contents # Add the _META_YAML_REPR with the repr(meta) contents # Add variables for directories that need to be created, for example: # DIR_COUNT: 2 # DIR_0: /mnt/data/input/gs/bucket/path1/ # DIR_1: /mnt/data/output/gs/bucket/path2 # List the directories in sorted order so that they are created in that # order. This is primarily to ensure that permissions are set as we create # each directory. # For example: # mkdir -m 777 -p /root/first/second # mkdir -m 777 -p /root/first # *may* not actually set 777 on /root/first docker_paths = sorted([
python
{ "resource": "" }
q257223
GoogleV2JobProvider._get_localization_env
validation
def _get_localization_env(self, inputs, user_project): """Return a dict with variables for the 'localization' action.""" # Add variables for paths that need to be localized, for example: # INPUT_COUNT: 1 # INPUT_0: MY_INPUT_FILE # INPUT_RECURSIVE_0: 0 # INPUT_SRC_0: gs://mybucket/mypath/myfile # INPUT_DST_0: /mnt/data/inputs/mybucket/mypath/myfile non_empty_inputs = [var for var in inputs if var.value] env = {'INPUT_COUNT': str(len(non_empty_inputs))} for idx, var in enumerate(non_empty_inputs): env['INPUT_{}'.format(idx)] = var.name env['INPUT_RECURSIVE_{}'.format(idx)]
python
{ "resource": "" }
q257224
GoogleV2JobProvider._get_delocalization_env
validation
def _get_delocalization_env(self, outputs, user_project): """Return a dict with variables for the 'delocalization' action.""" # Add variables for paths that need to be delocalized, for example: # OUTPUT_COUNT: 1 # OUTPUT_0: MY_OUTPUT_FILE # OUTPUT_RECURSIVE_0: 0 # OUTPUT_SRC_0: gs://mybucket/mypath/myfile # OUTPUT_DST_0: /mnt/data/outputs/mybucket/mypath/myfile non_empty_outputs = [var for var in outputs if var.value] env = {'OUTPUT_COUNT': str(len(non_empty_outputs))} for idx, var in enumerate(non_empty_outputs): env['OUTPUT_{}'.format(idx)] = var.name
python
{ "resource": "" }
q257225
GoogleV2JobProvider._build_user_environment
validation
def _build_user_environment(self, envs, inputs, outputs, mounts): """Returns a dictionary of for the user container environment.""" envs = {env.name: env.value for env in envs} envs.update(providers_util.get_file_environment_variables(inputs))
python
{ "resource": "" }
q257226
GoogleV2JobProvider._get_mount_actions
validation
def _get_mount_actions(self, mounts, mnt_datadisk): """Returns a list of two actions per gcs bucket to mount.""" actions_to_add = [] for mount in mounts: bucket = mount.value[len('gs://'):] mount_path = mount.docker_path actions_to_add.extend([ google_v2_pipelines.build_action( name='mount-{}'.format(bucket), flags=['ENABLE_FUSE', 'RUN_IN_BACKGROUND'], image_uri=_GCSFUSE_IMAGE, mounts=[mnt_datadisk], commands=[ '--implicit-dirs', '--foreground', '-o ro', bucket,
python
{ "resource": "" }
q257227
GoogleOperation._operation_status
validation
def _operation_status(self): """Returns the status of this operation. Raises: ValueError: if the operation status cannot be determined. Returns: A printable status string (RUNNING, SUCCESS, CANCELED or FAILURE). """ if not google_v2_operations.is_done(self._op):
python
{ "resource": "" }
q257228
GoogleOperation._operation_status_message
validation
def _operation_status_message(self): """Returns the most relevant status string and failed action. This string is meant for display only. Returns: A printable status string and name of failed action (if any). """ msg = None action = None if not google_v2_operations.is_done(self._op): last_event = google_v2_operations.get_last_event(self._op) if last_event: msg = last_event['description'] action_id = last_event.get('details', {}).get('actionId') if action_id: action = google_v2_operations.get_action_by_id(self._op, action_id) else: msg = 'Pending' else: failed_events = google_v2_operations.get_failed_events(self._op) if failed_events:
python
{ "resource": "" }
q257229
GoogleV2CustomMachine._validate_ram
validation
def _validate_ram(ram_in_mb): """Rounds ram up to the nearest multiple of _MEMORY_MULTIPLE."""
python
{ "resource": "" }
q257230
GoogleV2CustomMachine.build_machine_type
validation
def build_machine_type(cls, min_cores, min_ram): """Returns a custom machine type string.""" min_cores = min_cores or job_model.DEFAULT_MIN_CORES min_ram = min_ram or job_model.DEFAULT_MIN_RAM # First, min_ram is given in GB. Convert to MB. min_ram *= GoogleV2CustomMachine._MB_PER_GB # Only machine types with 1 vCPU or an even number of vCPUs can be created. cores = cls._validate_cores(min_cores) # The total memory of the instance must be a multiple of 256 MB. ram = cls._validate_ram(min_ram) # Memory must be between 0.9 GB per vCPU, up to 6.5 GB per vCPU. memory_to_cpu_ratio = ram / cores if memory_to_cpu_ratio < GoogleV2CustomMachine._MIN_MEMORY_PER_CPU: # If we're under the ratio, top up the memory. adjusted_ram = GoogleV2CustomMachine._MIN_MEMORY_PER_CPU * cores ram =
python
{ "resource": "" }
q257231
build_machine
validation
def build_machine(network=None, machine_type=None, preemptible=None, service_account=None, boot_disk_size_gb=None, disks=None, accelerators=None, labels=None, cpu_platform=None, nvidia_driver_version=None): """Build a VirtualMachine object for a Pipeline request. Args: network (dict): Network details for the pipeline to run in. machine_type (str): GCE Machine Type string for the pipeline. preemptible (bool): Use a preemptible VM for the job. service_account (dict): Service account configuration for the VM. boot_disk_size_gb (int): Boot disk size in GB. disks (list[dict]): List of disks to mount. accelerators (list[dict]): List of accelerators to attach to the VM. labels (dict[string, string]): Labels for the VM. cpu_platform (str): The CPU platform to
python
{ "resource": "" }
q257232
build_action
validation
def build_action(name=None, image_uri=None, commands=None, entrypoint=None, environment=None, pid_namespace=None, flags=None, port_mappings=None, mounts=None, labels=None): """Build an Action object for a Pipeline request. Args: name (str): An optional name for the container. image_uri (str): The URI to pull the container image from. commands (List[str]): commands and arguments to run inside the container. entrypoint (str): overrides the ENTRYPOINT specified in the container. environment (dict[str,str]): The environment to pass into the container. pid_namespace (str): The PID namespace to run the action inside. flags (str): Flags that control the execution of this action. port_mappings (dict[int, int]): A map of container to host port mappings for this container.
python
{ "resource": "" }
q257233
StubJobProvider.lookup_job_tasks
validation
def lookup_job_tasks(self, statuses, user_ids=None, job_ids=None, job_names=None, task_ids=None, task_attempts=None, labels=None, create_time_min=None, create_time_max=None, max_tasks=0): """Return a list of operations. See base.py for additional detail.""" statuses = None if statuses == {'*'} else statuses user_ids = None if user_ids == {'*'} else user_ids job_ids = None if job_ids == {'*'} else job_ids job_names = None if job_names == {'*'} else job_names task_ids = None if task_ids == {'*'} else task_ids task_attempts = None if task_attempts == {'*'} else task_attempts if labels or create_time_min or create_time_max: raise NotImplementedError( 'Lookup by labels and create_time
python
{ "resource": "" }
q257234
get_provider
validation
def get_provider(args, resources): """Returns a provider for job submission requests.""" provider = getattr(args, 'provider', 'google') if provider == 'google': return google.GoogleJobProvider( getattr(args, 'verbose', False), getattr(args, 'dry_run', False), args.project) elif provider == 'google-v2': return google_v2.GoogleV2JobProvider( getattr(args, 'verbose', False), getattr(args, 'dry_run', False),
python
{ "resource": "" }
q257235
parse_args
validation
def parse_args(parser, provider_required_args, argv): """Add provider required arguments epilog message, parse, and validate.""" # Add the provider required arguments epilog message epilog = 'Provider-required arguments:\n' for provider in provider_required_args: epilog += ' %s: %s\n' % (provider, provider_required_args[provider]) parser.epilog = epilog # Parse arguments args = parser.parse_args(argv) # For the selected
python
{ "resource": "" }
q257236
get_dstat_provider_args
validation
def get_dstat_provider_args(provider, project): """A string with the arguments to point dstat to the same provider+project.""" provider_name = get_provider_name(provider) args = [] if provider_name == 'google': args.append('--project %s' % project) elif provider_name == 'google-v2': args.append('--project %s' % project) elif provider_name == 'local': pass elif provider_name == 'test-fails': pass else:
python
{ "resource": "" }
q257237
_format_task_uri
validation
def _format_task_uri(fmt, job_metadata, task_metadata): """Returns a URI with placeholders replaced by metadata values.""" values = { 'job-id': None, 'task-id': 'task', 'job-name': None, 'user-id': None, 'task-attempt': None }
python
{ "resource": "" }
q257238
format_logging_uri
validation
def format_logging_uri(uri, job_metadata, task_metadata): """Inserts task metadata into the logging URI. The core behavior is inspired by the Google Pipelines API: (1) If a the uri ends in ".log", then that is the logging path. (2) Otherwise, the uri is treated as "directory" for logs and a filename needs to be automatically generated. For (1), if the job is a --tasks job, then the {task-id} is inserted before ".log". For (2), the file name generated is {job-id}, or for --tasks jobs, it is {job-id}.{task-id}. In both cases .{task-attempt} is inserted before .log for --retries jobs. In addition, full task metadata substitution is supported. The URI may include substitution strings such as "{job-id}", "{task-id}", "{job-name}", "{user-id}", and "{task-attempt}". Args: uri: User-specified logging URI which may contain substitution fields. job_metadata: job-global metadata. task_metadata: tasks-specific metadata. Returns: The logging_uri formatted as described above. """ # If the user specifies any formatting (with curly braces), then use
python
{ "resource": "" }
q257239
_google_v2_parse_arguments
validation
def _google_v2_parse_arguments(args): """Validated google-v2 arguments.""" if (args.zones and args.regions) or (not args.zones and not args.regions): raise ValueError('Exactly
python
{ "resource": "" }
q257240
_get_job_resources
validation
def _get_job_resources(args): """Extract job-global resources requirements from input args. Args: args: parsed command-line arguments Returns: Resources object containing the requested resources for the job """ logging = param_util.build_logging_param( args.logging) if args.logging else None timeout = param_util.timeout_in_seconds(args.timeout) log_interval = param_util.log_interval_in_seconds(args.log_interval) return job_model.Resources( min_cores=args.min_cores, min_ram=args.min_ram, machine_type=args.machine_type, disk_size=args.disk_size, disk_type=args.disk_type, boot_disk_size=args.boot_disk_size, preemptible=args.preemptible, image=args.image, regions=args.regions, zones=args.zones, logging=logging, logging_path=None,
python
{ "resource": "" }
q257241
_get_job_metadata
validation
def _get_job_metadata(provider, user_id, job_name, script, task_ids, user_project, unique_job_id): """Allow provider to extract job-specific metadata from command-line args. Args: provider: job service provider user_id: user submitting the job job_name: name for the job script: the script to run task_ids: a set of the task-ids for all tasks in the job user_project: name of the project to be billed for the request unique_job_id: generate a
python
{ "resource": "" }
q257242
_resolve_task_logging
validation
def _resolve_task_logging(job_metadata, job_resources, task_descriptors): """Resolve the logging path from job and task properties. Args: job_metadata: Job metadata, such as job-id, job-name, and user-id. job_resources: Resources specified such as ram, cpu, and logging path. task_descriptors: Task metadata, parameters, and resources. Resolve the logging path, which may have substitution parameters such as job-id, task-id, user-id, and job-name.
python
{ "resource": "" }
q257243
_wait_after
validation
def _wait_after(provider, job_ids, poll_interval, stop_on_failure): """Print status info as we wait for those jobs. Blocks until either all of the listed jobs succeed, or one of them fails. Args: provider: job service provider job_ids: a set of job IDs (string) to wait for poll_interval: integer seconds to wait between iterations stop_on_failure: whether to stop waiting if one of the tasks fails. Returns: Empty list if there was no error, a list of error messages from the failed tasks otherwise. """ # Each time through the loop, the job_set is re-set to the jobs remaining to # check. Jobs are removed from the list when they complete. # # We exit the loop when: # * No jobs remain are running, OR # * stop_on_failure is TRUE AND at least one job returned an error # remove NO_JOB job_ids_to_check = {j for j in job_ids if j != dsub_util.NO_JOB} error_messages = [] while job_ids_to_check and (not error_messages or not stop_on_failure): print('Waiting for: %s.' % (', '.join(job_ids_to_check)))
python
{ "resource": "" }
q257244
_wait_and_retry
validation
def _wait_and_retry(provider, job_id, poll_interval, retries, job_descriptor): """Wait for job and retry any tasks that fail. Stops retrying an individual task when: it succeeds, is canceled, or has been retried "retries" times. This function exits when there are no tasks running and there are no tasks eligible to be retried. Args: provider: job service provider job_id: a single job ID (string) to wait for poll_interval: integer seconds to wait between iterations retries: number of retries job_descriptor: job descriptor used to originally submit job Returns: Empty list if there was no error, a list containing an error message from a failed task otherwise. """ while True: tasks = provider.lookup_job_tasks({'*'}, job_ids=[job_id]) running_tasks = set() completed_tasks = set() canceled_tasks = set() fully_failed_tasks = set() task_fail_count = dict() # This is an arbitrary task that is either fully failed or canceled (with # preference for the former). message_task = None task_dict = dict() for t in tasks: task_id = job_model.numeric_task_id(t.get_field('task-id')) task_dict[task_id] = t status = t.get_field('task-status') if status == 'FAILURE': # Could compute this from task-attempt as well. task_fail_count[task_id] = task_fail_count.get(task_id, 0) + 1 if task_fail_count[task_id] > retries: fully_failed_tasks.add(task_id) message_task = t elif status == 'CANCELED': canceled_tasks.add(task_id) if not message_task: message_task = t elif status
python
{ "resource": "" }
q257245
_dominant_task_for_jobs
validation
def _dominant_task_for_jobs(tasks): """A list with, for each job, its dominant task. The dominant task is the one that exemplifies its job's status. It is either: - the first (FAILURE or CANCELED) task, or if none - the first RUNNING task, or if none - the first SUCCESS task. Args: tasks: a list of tasks to consider Returns: A list with,
python
{ "resource": "" }
q257246
_group_tasks_by_jobid
validation
def _group_tasks_by_jobid(tasks): """A defaultdict with, for each job, a list of its tasks.""" ret = collections.defaultdict(list)
python
{ "resource": "" }
q257247
_wait_for_any_job
validation
def _wait_for_any_job(provider, job_ids, poll_interval): """Waits until any of the listed jobs is not running. In particular, if any of the jobs sees one of its tasks fail, we count the whole job as failing (but do not terminate the remaining tasks ourselves). Args: provider: job service provider job_ids: a list of job IDs (string) to wait for poll_interval: integer seconds to wait between iterations Returns: A set of the jobIDs with still at least one running task. """ if not job_ids: return while True: tasks = provider.lookup_job_tasks({'*'}, job_ids=job_ids) running_jobs = set() failed_jobs = set() for t in tasks: status = t.get_field('task-status')
python
{ "resource": "" }
q257248
_validate_job_and_task_arguments
validation
def _validate_job_and_task_arguments(job_params, task_descriptors): """Validates that job and task argument names do not overlap.""" if not task_descriptors: return task_params = task_descriptors[0].task_params # The use case for specifying a label or env/input/output parameter on # the command-line and also including it in the --tasks file is not obvious. # Should the command-line override the --tasks file? Why? # Until this use is articulated, generate an error on overlapping names. # Check labels from_jobs = {label.name for label in job_params['labels']} from_tasks = {label.name for label in task_params['labels']} intersect = from_jobs & from_tasks if intersect: raise ValueError( 'Names for labels on the command-line and in the --tasks file must not ' 'be repeated: {}'.format(','.join(intersect))) # Check envs, inputs, and outputs, all of which must
python
{ "resource": "" }
q257249
_name_for_command
validation
def _name_for_command(command): r"""Craft a simple command name from the command. The best command strings for this are going to be those where a simple command was given; we will use the command to derive the name. We won't always be able to figure something out and the caller should just specify a "--name" on the command-line. For example, commands like "export VAR=val\necho ${VAR}", this function would return "export". If the command starts space or a comment, then we'll skip to the first code we can find. If we find nothing, just return "command". >>> _name_for_command('samtools index "${BAM}"') 'samtools' >>>
python
{ "resource": "" }
q257250
_local_uri_rewriter
validation
def _local_uri_rewriter(raw_uri): """Rewrite local file URIs as required by the rewrite_uris method. Local file paths, unlike GCS paths, may have their raw URI simplified by os.path.normpath which collapses extraneous indirect characters. >>> _local_uri_rewriter('/tmp/a_path/../B_PATH/file.txt') ('/tmp/B_PATH/file.txt', 'file/tmp/B_PATH/file.txt') >>> _local_uri_rewriter('/myhome/./mydir/') ('/myhome/mydir/', 'file/myhome/mydir/') The local path rewriter will also work to preserve relative paths even when creating the docker path. This prevents leaking of information on the invoker's system to the remote system. Doing this requires a number of path substitutions denoted with the _<rewrite>_ convention. >>> _local_uri_rewriter('./../upper_dir/')[1] 'file/_dotdot_/upper_dir/' >>> _local_uri_rewriter('~/localdata/*.bam')[1] 'file/_home_/localdata/*.bam' Args: raw_uri: (str) the raw file or directory path. Returns: normalized: a simplified and/or expanded version of the uri. docker_path: the uri rewritten in the format required for mounting inside a docker worker. """ # The path is split into components so that the filename is not rewritten. raw_path, filename = os.path.split(raw_uri) # Generate the local path that can be resolved by filesystem operations, # this removes special shell characters, condenses indirects and replaces # any unnecessary prefix. prefix_replacements = [('file:///', '/'), ('~/', os.getenv('HOME')), ('./',
python
{ "resource": "" }
q257251
_get_filtered_mounts
validation
def _get_filtered_mounts(mounts, mount_param_type): """Helper function to return an appropriate set of mount parameters.""" return
python
{ "resource": "" }
q257252
build_logging_param
validation
def build_logging_param(logging_uri, util_class=OutputFileParamUtil): """Convenience function simplifies construction of the logging uri.""" if not logging_uri: return job_model.LoggingParam(None, None) recursive = not logging_uri.endswith('.log') oututil = util_class('') _, uri, provider =
python
{ "resource": "" }
q257253
split_pair
validation
def split_pair(pair_string, separator, nullable_idx=1): """Split a string into a pair, which can have one empty value. Args: pair_string: The string to be split. separator: The separator to be used for splitting. nullable_idx: The location to be set to null if the separator is not in the
python
{ "resource": "" }
q257254
parse_tasks_file_header
validation
def parse_tasks_file_header(header, input_file_param_util, output_file_param_util): """Parse the header from the tasks file into env, input, output definitions. Elements are formatted similar to their equivalent command-line arguments, but with associated values coming from the data rows. Environment variables columns are headered as "--env <name>" Inputs columns are headered as "--input <name>" with the name optional. Outputs columns are headered as "--output <name>" with the name optional. For historical reasons, bareword column headers (such as "JOB_ID") are equivalent to "--env var_name". Args: header: Array of header fields input_file_param_util: Utility for producing InputFileParam objects. output_file_param_util: Utility for producing OutputFileParam objects. Returns: job_params: A list of EnvParams and FileParams for the environment variables, LabelParams, input file parameters, and output file parameters. Raises: ValueError: If a header contains a ":" and the prefix is not supported. """ job_params = [] for col in header: # Reserve the "-" and "--" namespace. # If the column has no leading "-", treat it as an environment variable col_type = '--env' col_value = col if col.startswith('-'): col_type, col_value = split_pair(col, ' ', 1) if col_type == '--env': job_params.append(job_model.EnvParam(col_value)) elif col_type == '--label':
python
{ "resource": "" }
q257255
tasks_file_to_task_descriptors
validation
def tasks_file_to_task_descriptors(tasks, retries, input_file_param_util, output_file_param_util): """Parses task parameters from a TSV. Args: tasks: Dict containing the path to a TSV file and task numbers to run variables, input, and output parameters as column headings. Subsequent lines specify parameter values, one row per job. retries: Number of retries allowed. input_file_param_util: Utility for producing InputFileParam objects. output_file_param_util: Utility for producing OutputFileParam objects. Returns: task_descriptors: an array of records, each containing the task-id, task-attempt, 'envs', 'inputs', 'outputs', 'labels' that defines the set of parameters for each task of the job. Raises: ValueError: If no job records were provided """ task_descriptors = [] path = tasks['path'] task_min = tasks.get('min') task_max = tasks.get('max') # Load the file and set up a Reader that tokenizes the fields param_file = dsub_util.load_file(path) reader = csv.reader(param_file, delimiter='\t') # Read the first line and extract the parameters header = six.advance_iterator(reader) job_params = parse_tasks_file_header(header, input_file_param_util,
python
{ "resource": "" }
q257256
parse_pair_args
validation
def parse_pair_args(labels, argclass): """Parse flags of key=value pairs and return a list of argclass. For pair variables, we need to: * split the input into name=value pairs (value optional) * Create the EnvParam object Args: labels: list of 'key' or 'key=value' strings. argclass: Container class for args, must instantiate with argclass(k, v). Returns:
python
{ "resource": "" }
q257257
args_to_job_params
validation
def args_to_job_params(envs, labels, inputs, inputs_recursive, outputs, outputs_recursive, mounts, input_file_param_util, output_file_param_util, mount_param_util): """Parse env, input, and output parameters into a job parameters and data. Passing arguments on the command-line allows for launching a single job. The env, input, and output arguments encode both the definition of the job as well as the single job's values. Env arguments are simple name=value pairs. Input and output file arguments can contain name=value pairs or just values. Either of the following is valid: uri myfile=uri Args: envs: list of environment variable job parameters labels: list of labels to attach to the tasks inputs: list of file input parameters inputs_recursive: list of recursive directory input parameters outputs: list of file output parameters outputs_recursive: list of recursive directory output parameters mounts: list of gcs buckets to mount input_file_param_util: Utility for producing InputFileParam objects. output_file_param_util: Utility for producing OutputFileParam objects. mount_param_util: Utility for producing MountParam objects. Returns: job_params: a dictionary of 'envs', 'inputs', and 'outputs' that defines the set of parameters and data for a job. """ # Parse environmental variables and labels. env_data = parse_pair_args(envs, job_model.EnvParam) label_data = parse_pair_args(labels, job_model.LabelParam) # For input files, we need to: # * split the input into name=uri pairs (name optional) # * get the environmental variable name, or automatically set if null. # * create the input file param input_data = set() for (recursive, args) in ((False, inputs), (True, inputs_recursive)): for arg in args: name, value = split_pair(arg, '=', nullable_idx=0) name = input_file_param_util.get_variable_name(name) input_data.add(input_file_param_util.make_param(name, value, recursive))
python
{ "resource": "" }
q257258
validate_submit_args_or_fail
validation
def validate_submit_args_or_fail(job_descriptor, provider_name, input_providers, output_providers, logging_providers): """Validate that arguments passed to submit_job have valid file providers. This utility function takes resources and task data args from `submit_job` in the base provider. This function will fail with a value error if any of the parameters are not valid. See the following example; >>> job_resources = type('', (object,), ... {"logging": job_model.LoggingParam('gs://logtemp', job_model.P_GCS)})() >>> job_params={'inputs': set(), 'outputs': set(), 'mounts': set()} >>> task_descriptors = [ ... job_model.TaskDescriptor(None, { ... 'inputs': { ... job_model.FileParam('IN', uri='gs://in/*', ... file_provider=job_model.P_GCS)}, ... 'outputs': set()}, None), ... job_model.TaskDescriptor(None, { ... 'inputs': set(), ... 'outputs': { ... job_model.FileParam('OUT', uri='gs://out/*', ... file_provider=job_model.P_GCS)}}, None)] ... >>> validate_submit_args_or_fail(job_model.JobDescriptor(None, job_params, ... job_resources, task_descriptors), ... provider_name='MYPROVIDER', ... input_providers=[job_model.P_GCS], ... output_providers=[job_model.P_GCS], ... logging_providers=[job_model.P_GCS]) ... >>> validate_submit_args_or_fail(job_model.JobDescriptor(None, job_params, ... job_resources, task_descriptors), ... provider_name='MYPROVIDER', ... input_providers=[job_model.P_GCS], ... output_providers=[job_model.P_LOCAL], ... logging_providers=[job_model.P_GCS]) Traceback (most recent call last): ... ValueError: Unsupported output path (gs://out/*) for provider 'MYPROVIDER'. Args: job_descriptor: instance of job_model.JobDescriptor. provider_name: (str) the name of the execution provider.
python
{ "resource": "" }
q257259
_interval_to_seconds
validation
def _interval_to_seconds(interval, valid_units='smhdw'): """Convert the timeout duration to seconds. The value must be of the form "<integer><unit>" where supported units are s, m, h, d, w (seconds, minutes, hours, days, weeks). Args: interval: A "<integer><unit>" string. valid_units: A list of supported units. Returns: A string of the form "<integer>s" or None if timeout is empty. """ if not interval: return None try: last_char = interval[-1] if last_char == 's' and 's' in valid_units: return str(float(interval[:-1])) + 's' elif last_char == 'm' and 'm' in valid_units: return str(float(interval[:-1]) * 60) + 's' elif last_char == 'h' and 'h' in valid_units: return str(float(interval[:-1]) * 60 * 60) + 's' elif last_char ==
python
{ "resource": "" }
q257260
FileParamUtil.get_variable_name
validation
def get_variable_name(self, name): """Produce a default variable name if none is specified."""
python
{ "resource": "" }
q257261
FileParamUtil.rewrite_uris
validation
def rewrite_uris(self, raw_uri, file_provider): """Accept a raw uri and return rewritten versions. This function returns a normalized URI and a docker path. The normalized URI may have minor alterations meant to disambiguate and prepare for use by shell utilities that may require a specific format. The docker rewriter makes substantial modifications to the raw URI when constructing a docker path, but modifications must follow these rules: 1) System specific characters are not allowed (ex. indirect paths). 2) The path, if it is a directory, must end in a forward slash. 3) The path will begin with the value set in self._relative_path. 4) The path will have an additional prefix (after self._relative_path) set by the file provider-specific rewriter. Rewrite output for the docker path: >>> out_util = FileParamUtil('AUTO_', 'output') >>> out_util.rewrite_uris('gs://mybucket/myfile.txt', job_model.P_GCS)[1] 'output/gs/mybucket/myfile.txt'
python
{ "resource": "" }
q257262
FileParamUtil.parse_file_provider
validation
def parse_file_provider(uri): """Find the file provider for a URI.""" providers = {'gs': job_model.P_GCS, 'file': job_model.P_LOCAL} # URI scheme detector uses a range up to 30 since none of the IANA # registered schemes are longer than this. provider_found = re.match(r'^([A-Za-z][A-Za-z0-9+.-]{0,29})://', uri) if provider_found: prefix = provider_found.group(1).lower() else: # If no provider is specified in the URI,
python
{ "resource": "" }
q257263
FileParamUtil._validate_paths_or_fail
validation
def _validate_paths_or_fail(uri, recursive): """Do basic validation of the uri, return the path and filename.""" path, filename = os.path.split(uri) # dsub could support character ranges ([0-9]) with some more work, but for # now we assume that basic asterisk wildcards are sufficient. Reject any URI # that includes square brackets or question marks, since we know that # if they actually worked, it would be accidental. if '[' in uri or ']' in uri: raise ValueError( 'Square bracket (character ranges) are not supported: %s' % uri) if '?' in uri: raise ValueError('Question mark wildcards are not supported: %s' % uri) # Only support file URIs and *filename* wildcards # Wildcards at the directory level or "**" syntax would require better # support from the Pipelines API *or* doing expansion here and # (potentially) producing a series of FileParams, instead of one. if '*' in
python
{ "resource": "" }
q257264
FileParamUtil.parse_uri
validation
def parse_uri(self, raw_uri, recursive): """Return a valid docker_path, uri, and file provider from a flag value.""" # Assume recursive URIs are directory paths. if recursive: raw_uri = directory_fmt(raw_uri) # Get the file provider, validate the raw URI, and rewrite the path # component of the URI for docker and remote. file_provider = self.parse_file_provider(raw_uri) self._validate_paths_or_fail(raw_uri, recursive)
python
{ "resource": "" }
q257265
MountParamUtil._parse_image_uri
validation
def _parse_image_uri(self, raw_uri): """Return a valid docker_path from a Google Persistent Disk url.""" # The string replace is so we don't have colons and double slashes in the # mount path. The idea is the resulting mount path would look like:
python
{ "resource": "" }
q257266
MountParamUtil._parse_local_mount_uri
validation
def _parse_local_mount_uri(self, raw_uri): """Return a valid docker_path for a local file path.""" raw_uri = directory_fmt(raw_uri) _, docker_path = _local_uri_rewriter(raw_uri)
python
{ "resource": "" }
q257267
MountParamUtil._parse_gcs_uri
validation
def _parse_gcs_uri(self, raw_uri): """Return a valid docker_path for a GCS bucket.""" # Assume URI is a directory path.
python
{ "resource": "" }
q257268
MountParamUtil.make_param
validation
def make_param(self, name, raw_uri, disk_size): """Return a MountParam given a GCS bucket, disk image or local path.""" if raw_uri.startswith('https://www.googleapis.com/compute'): # Full Image URI should look something like: # https://www.googleapis.com/compute/v1/projects/<project>/global/images/ # But don't validate further, should the form of a valid image URI # change (v1->v2, for example) docker_path = self._parse_image_uri(raw_uri) return job_model.PersistentDiskMountParam( name, raw_uri, docker_path, disk_size, disk_type=None)
python
{ "resource": "" }
q257269
validate_param_name
validation
def validate_param_name(name, param_type): """Validate that the name follows posix conventions for env variables.""" # http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap03.html#tag_03_235 # # 3.235 Name # In the shell command language, a word
python
{ "resource": "" }
q257270
validate_bucket_name
validation
def validate_bucket_name(bucket): """Validate that the name is a valid GCS bucket.""" if not bucket.startswith('gs://'): raise ValueError( 'Invalid bucket path "%s". Must start with "gs://".' % bucket)
python
{ "resource": "" }
q257271
convert_to_label_chars
validation
def convert_to_label_chars(s): """Turn the specified name and value into a valid Google label.""" # We want the results to be user-friendly, not just functional. # So we can't base-64 encode it. # * If upper-case: lower-case it # * If the char is not a standard letter or digit. make it a dash # March 2019 note: underscores are now allowed in labels. # However, removing the conversion of underscores to dashes here would # create inconsistencies between old jobs and new jobs. # With existing code, $USER "jane_doe" has a user-id label of "jane-doe". # If we
python
{ "resource": "" }
q257272
ensure_task_params_are_complete
validation
def ensure_task_params_are_complete(task_descriptors): """For each task, ensure that each task param entry is not None.""" for task_desc in task_descriptors: for param in [ 'labels', 'envs',
python
{ "resource": "" }
q257273
_remove_empty_items
validation
def _remove_empty_items(d, required): """Return a new dict with any empty items removed. Note that this is not a deep check. If d contains a dictionary which itself contains empty items, those are never checked. This method exists to make to_serializable() functions cleaner. We could revisit this some day, but for now, the serialized objects are stripped of empty values to keep the output YAML more compact. Args: d: a dictionary required: list of required keys
python
{ "resource": "" }
q257274
task_view_generator
validation
def task_view_generator(job_descriptor): """Generator that yields a task-specific view of the job. This generator exists to make it easy for callers to iterate over the tasks in a JobDescriptor. Each pass yields a new JobDescriptor with a single task. Args: job_descriptor: A JobDescriptor with 1 or more tasks. Yields: A JobDescriptor with a single task. """
python
{ "resource": "" }
q257275
numeric_task_id
validation
def numeric_task_id(task_id): """Converts a task-id to the numeric task-id. Args: task_id: task-id in either task-n or n format Returns: n """ # This function exists to support the legacy "task-id" format in the "google" # provider. Google labels originally could not be numeric. When the google
python
{ "resource": "" }
q257276
LabelParam._validate_label
validation
def _validate_label(cls, name, value): """Raise ValueError if the label is invalid.""" # Rules for labels are described in: # https://cloud.google.com/compute/docs/labeling-resources#restrictions # * Keys and values cannot be longer than 63 characters each. # * Keys and values can only contain lowercase letters, numeric characters, #
python
{ "resource": "" }
q257277
JobDescriptor._from_yaml_v0
validation
def _from_yaml_v0(cls, job): """Populate a JobDescriptor from the local provider's original meta.yaml. The local job provider had the first incarnation of a YAML file for each task. That idea was extended here in the JobDescriptor and the local provider adopted the JobDescriptor.to_yaml() call to write its meta.yaml. The JobDescriptor.from_yaml() detects if it receives a local provider's "v0" meta.yaml and calls this function. Args: job: an object produced from decoding meta.yaml. Returns: A JobDescriptor populated as best we can from the old meta.yaml. """ # The v0 meta.yaml only contained: # create-time, job-id, job-name, logging, task-id # labels, envs, inputs, outputs # It did NOT contain user-id. # dsub-version might be there as a label. job_metadata = {} for key in ['job-id', 'job-name', 'create-time']: job_metadata[key] = job.get(key) # Make sure that create-time string is turned into a datetime job_metadata['create-time'] = dsub_util.replace_timezone( datetime.datetime.strptime(job['create-time'], '%Y-%m-%d %H:%M:%S.%f'), tzlocal()) # The v0 meta.yaml contained a "logging" field which was the task-specific # logging path. It did not include the actual "--logging" value the user # specified. job_resources = Resources() # The v0 meta.yaml represented a single task. # It did not distinguish whether params were job params or task params. # We will treat them as either all job params or all task params, based on # whether the task-id is empty or an integer value. # # We also cannot distinguish whether inputs/outputs were recursive or not. # Just treat them all as non-recursive. params = {} #
python
{ "resource": "" }
q257278
JobDescriptor.from_yaml
validation
def from_yaml(cls, yaml_string): """Populate and return a JobDescriptor from a YAML string.""" try: job = yaml.full_load(yaml_string) except AttributeError: # For installations that cannot update their PyYAML version job = yaml.load(yaml_string) # If the YAML does not contain a top-level dsub version, then assume that # the string is coming from the local provider, reading an old version of # its meta.yaml. dsub_version = job.get('dsub-version') if not dsub_version: return cls._from_yaml_v0(job) job_metadata = {} for key in [ 'job-id', 'job-name', 'task-ids', 'user-id', 'dsub-version', 'user-project', 'script-name' ]: if job.get(key) is not None: job_metadata[key] = job.get(key) # Make sure that create-time string is turned into a datetime job_metadata['create-time'] = dsub_util.replace_timezone( job.get('create-time'), pytz.utc) job_resources = Resources(logging=job.get('logging')) job_params = {} job_params['labels'] = cls._label_params_from_dict(job.get('labels', {})) job_params['envs'] = cls._env_params_from_dict(job.get('envs', {})) job_params['inputs'] = cls._input_file_params_from_dict( job.get('inputs', {}), False) job_params['input-recursives'] = cls._input_file_params_from_dict( job.get('input-recursives', {}), True) job_params['outputs'] = cls._output_file_params_from_dict( job.get('outputs', {}), False) job_params['output-recursives'] = cls._output_file_params_from_dict( job.get('output-recursives', {}), True) job_params['mounts'] = cls._mount_params_from_dict(job.get('mounts', {})) task_descriptors = [] for task in job.get('tasks', []): task_metadata = {'task-id': task.get('task-id')} # Old instances of the meta.yaml do not have a task create time. create_time = task.get('create-time') if create_time: task_metadata['create-time']
python
{ "resource": "" }
q257279
JobDescriptor.find_task_descriptor
validation
def find_task_descriptor(self, task_id): """Returns the task_descriptor corresponding to task_id.""" # It is not guaranteed that the index will be task_id - 1 when --tasks is # used with a min/max range.
python
{ "resource": "" }
q257280
get_file_environment_variables
validation
def get_file_environment_variables(file_params): """Return a dictionary of environment variables for the user container.""" env = {} for param in file_params: # We have no cases where the environment variable provided to user # scripts have a trailing slash, so be sure to always strip it. # The case that this is specifically handling is --input-recursive and
python
{ "resource": "" }
q257281
get_job_and_task_param
validation
def get_job_and_task_param(job_params, task_params, field): """Returns a dict combining the field for job and task params."""
python
{ "resource": "" }
q257282
_emit_search_criteria
validation
def _emit_search_criteria(user_ids, job_ids, task_ids, labels): """Print the filters used to delete tasks. Use raw flags as arguments.""" print('Delete running jobs:') print(' user:') print(' %s\n' % user_ids) print(' job-id:') print(' %s\n' % job_ids) if task_ids: print(' task-id:') print('
python
{ "resource": "" }
q257283
ddel_tasks
validation
def ddel_tasks(provider, user_ids=None, job_ids=None, task_ids=None, labels=None, create_time_min=None, create_time_max=None): """Kill jobs or job tasks. This function separates ddel logic from flag parsing and user output. Users of ddel who intend to access the data programmatically should use this. Args: provider: an instantiated dsub provider. user_ids: a set of user ids who "own" the job(s) to delete. job_ids: a set of job ids to delete. task_ids: a set of task ids to delete. labels: a set of LabelParam, each must match the job(s) to be cancelled. create_time_min: a timezone-aware datetime value for the earliest create
python
{ "resource": "" }
q257284
get_action_by_id
validation
def get_action_by_id(op, action_id): """Return the operation's array of actions.""" actions = get_actions(op) if actions
python
{ "resource": "" }
q257285
_get_action_by_name
validation
def _get_action_by_name(op, name): """Return the value for the specified action.""" actions = get_actions(op) for action
python
{ "resource": "" }
q257286
get_action_environment
validation
def get_action_environment(op, name): """Return the environment for the operation.""" action
python
{ "resource": "" }
q257287
get_action_image
validation
def get_action_image(op, name): """Return the image for the operation.""" action =
python
{ "resource": "" }
q257288
get_event_of_type
validation
def get_event_of_type(op, event_type): """Return all events of a particular type.""" events = get_events(op) if not events: return None return [e
python
{ "resource": "" }
q257289
get_last_update
validation
def get_last_update(op): """Return the most recent timestamp in the operation.""" last_update = get_end_time(op) if not last_update: last_event = get_last_event(op) if last_event:
python
{ "resource": "" }
q257290
_prepare_summary_table
validation
def _prepare_summary_table(rows): """Create a new table that is a summary of the input rows. All with the same (job-name or job-id, status) go together. Args: rows: the input rows, a list of dictionaries. Returns: A new row set of summary information. """ if not rows: return [] # We either group on the job-name (if present) or fall back to the job-id key_field = 'job-name' if key_field not in rows[0]: key_field = 'job-id' # Group each of the rows based on (job-name or job-id, status) grouped = collections.defaultdict(lambda: collections.defaultdict(lambda: [])) for row in rows: grouped[row.get(key_field, '')][row.get('status', '')] += [row] # Now that we have the rows grouped, create a summary table. # Use the original table as the driver in order to preserve the order. new_rows = [] for job_key in sorted(grouped.keys()): group = grouped.get(job_key, None) canonical_status = ['RUNNING', 'SUCCESS', 'FAILURE',
python
{ "resource": "" }
q257291
lookup_job_tasks
validation
def lookup_job_tasks(provider, statuses, user_ids=None, job_ids=None, job_names=None, task_ids=None, task_attempts=None, labels=None, create_time_min=None, create_time_max=None, max_tasks=0, page_size=0, summary_output=False): """Generate formatted jobs individually, in order of create-time. Args: provider: an instantiated dsub provider. statuses: a set of status strings that eligible jobs may match. user_ids: a set of user strings that eligible jobs may match. job_ids: a set of job-id strings eligible jobs may match. job_names: a set of job-name strings eligible jobs may match. task_ids: a set of task-id strings eligible tasks may match. task_attempts: a set of task-attempt strings eligible tasks may match. labels: set of LabelParam that all tasks must match. create_time_min: a timezone-aware datetime value for the earliest create time of a task, inclusive. create_time_max: a timezone-aware datetime value for the most recent create time of a task, inclusive. max_tasks: (int) maximum number of tasks to return per dstat job lookup. page_size: the page
python
{ "resource": "" }
q257292
OutputFormatter.prepare_output
validation
def prepare_output(self, row): """Convert types of task fields.""" date_fields = ['last-update', 'create-time', 'start-time', 'end-time'] int_fields = ['task-attempt']
python
{ "resource": "" }
q257293
TextOutput.trim_display_field
validation
def trim_display_field(self, value, max_length): """Return a value for display; if longer than max length, use ellipsis.""" if not value: return ''
python
{ "resource": "" }
q257294
TextOutput.format_pairs
validation
def format_pairs(self, values): """Returns a string of comma-delimited key=value pairs."""
python
{ "resource": "" }
q257295
YamlOutput.string_presenter
validation
def string_presenter(self, dumper, data): """Presenter to force yaml.dump to use multi-line string style.""" if '\n' in data:
python
{ "resource": "" }
q257296
get_zones
validation
def get_zones(input_list): """Returns a list of zones based on any wildcard input. This function is intended to provide an easy method for producing a list of desired zones for a pipeline to run in. The Pipelines API default zone list is "any zone". The problem with "any zone" is that it can lead to incurring Cloud Storage egress charges if the GCE zone selected is in a different region than the GCS bucket. See https://cloud.google.com/storage/pricing#network-egress. A user with a multi-region US bucket would want to pipelines to run in a "us-*" zone. A user with a regional bucket in US would want to restrict pipelines to run in a zone in that region. Rarely does the specific zone matter for a pipeline. This function allows for a simple short-hand such as: [ "us-*" ] [
python
{ "resource": "" }
q257297
parse_rfc3339_utc_string
validation
def parse_rfc3339_utc_string(rfc3339_utc_string): """Converts a datestamp from RFC3339 UTC to a datetime. Args: rfc3339_utc_string: a datetime string in RFC3339 UTC "Zulu" format Returns: A datetime. """ # The timestamp from the Google Operations are all in RFC3339 format, but # they are sometimes formatted to millisconds, microseconds, sometimes # nanoseconds, and sometimes only seconds: # * 2016-11-14T23:05:56Z # * 2016-11-14T23:05:56.010Z # * 2016-11-14T23:05:56.010429Z # * 2016-11-14T23:05:56.010429380Z m = re.match(r'(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2}).?(\d*)Z', rfc3339_utc_string) # It would be unexpected to get a different date format back from Google. # If we raise an exception here, we can break people completely. # Instead, let's just return None
python
{ "resource": "" }
q257298
get_operation_full_job_id
validation
def get_operation_full_job_id(op): """Returns the job-id or job-id.task-id for the operation.""" job_id = op.get_field('job-id') task_id =
python
{ "resource": "" }
q257299
_cancel_batch
validation
def _cancel_batch(batch_fn, cancel_fn, ops): """Cancel a batch of operations. Args: batch_fn: API-specific batch function. cancel_fn: API-specific cancel function. ops: A list of operations to cancel. Returns: A list of operations canceled and a list of error messages. """ # We define an inline callback which will populate a list of # successfully canceled operations as well as a list of operations # which were not successfully canceled. canceled = [] failed = [] def handle_cancel_response(request_id, response, exception): """Callback for the cancel response.""" del response # unused if exception: # We don't generally expect any failures here, except possibly trying # to cancel an operation that is already canceled or finished. # # If the operation is already finished, provide a clearer message than # "error 400: Bad Request". msg = 'error %s: %s' % (exception.resp.status, exception.resp.reason) if exception.resp.status == FAILED_PRECONDITION_CODE: detail = json.loads(exception.content) status = detail.get('error', {}).get('status') if status == FAILED_PRECONDITION_STATUS: msg = 'Not running' failed.append({'name': request_id, 'msg': msg}) else: canceled.append({'name': request_id}) return # Set up the batch object batch = batch_fn(callback=handle_cancel_response) # The callback gets a "request_id" which is the operation name.
python
{ "resource": "" }