text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def CaptureFrameLocals(self, frame): """Captures local variables and arguments of the specified frame. Args: frame: frame to capture locals and arguments. Returns: (arguments, locals) tuple. """
# Capture all local variables (including method arguments). variables = {n: self.CaptureNamedVariable(n, v, 1, self.default_capture_limits) for n, v in six.viewitems(frame.f_locals)} # Split between locals and arguments (keeping arguments in the right order). nargs = frame.f_code.co_argcount if frame.f_code.co_flags & inspect.CO_VARARGS: nargs += 1 if frame.f_code.co_flags & inspect.CO_VARKEYWORDS: nargs += 1 frame_arguments = [] for argname in frame.f_code.co_varnames[:nargs]: if argname in variables: frame_arguments.append(variables.pop(argname)) return (frame_arguments, list(six.viewvalues(variables)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def CaptureNamedVariable(self, name, value, depth, limits): """Appends name to the product of CaptureVariable. Args: name: name of the variable. value: data to capture depth: nested depth of dictionaries and vectors so far. limits: Per-object limits for capturing variable data. Returns: Formatted captured data as per Variable proto with name. """
if not hasattr(name, '__dict__'): name = str(name) else: # TODO(vlif): call str(name) with immutability verifier here. name = str(id(name)) self._total_size += len(name) v = (self.CheckDataVisiblity(value) or self.CaptureVariable(value, depth, limits)) v['name'] = name return v
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def CheckDataVisiblity(self, value): """Returns a status object if the given name is not visible. Args: value: The value to check. The actual value here is not important but the value's metadata (e.g. package and type) will be checked. Returns: None if the value is visible. A variable structure with an error status if the value should not be visible. """
if not self.data_visibility_policy: return None visible, reason = self.data_visibility_policy.IsDataVisible( DetermineType(value)) if visible: return None return { 'status': { 'isError': True, 'refersTo': 'VARIABLE_NAME', 'description': { 'format': reason } } }
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def CaptureVariablesList(self, items, depth, empty_message, limits): """Captures list of named items. Args: items: iterable of (name, value) tuples. depth: nested depth of dictionaries and vectors for items. empty_message: info status message to set if items is empty. limits: Per-object limits for capturing variable data. Returns: List of formatted variable objects. """
v = [] for name, value in items: if (self._total_size >= self.max_size) or ( len(v) >= limits.max_list_items): v.append({ 'status': { 'refersTo': 'VARIABLE_VALUE', 'description': { 'format': ('Only first $0 items were captured. Use in an ' 'expression to see all items.'), 'parameters': [str(len(v))]}}}) break v.append(self.CaptureNamedVariable(name, value, depth, limits)) if not v: return [{'status': { 'refersTo': 'VARIABLE_NAME', 'description': {'format': empty_message}}}] return v
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def CaptureVariable(self, value, depth, limits, can_enqueue=True): """Try-Except wrapped version of CaptureVariableInternal."""
try: return self.CaptureVariableInternal(value, depth, limits, can_enqueue) except BaseException as e: # pylint: disable=broad-except return { 'status': { 'isError': True, 'refersTo': 'VARIABLE_VALUE', 'description': { 'format': ('Failed to capture variable: $0'), 'parameters': [str(e)] } } }
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _CaptureExpression(self, frame, expression): """Evalutes the expression and captures it into a Variable object. Args: frame: evaluation context. expression: watched expression to compile and evaluate. Returns: Variable object (which will have error status if the expression fails to evaluate). """
rc, value = _EvaluateExpression(frame, expression) if not rc: return {'name': expression, 'status': value} return self.CaptureNamedVariable(expression, value, 0, self.expression_capture_limits)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def TrimVariableTable(self, new_size): """Trims the variable table in the formatted breakpoint message. Removes trailing entries in variables table. Then scans the entire breakpoint message and replaces references to the trimmed variables to point to var_index of 0 ("buffer full"). Args: new_size: desired size of variables table. """
def ProcessBufferFull(variables): for variable in variables: var_index = variable.get('varTableIndex') if var_index is not None and (var_index >= new_size): variable['varTableIndex'] = 0 # Buffer full. members = variable.get('members') if members is not None: ProcessBufferFull(members) del self._var_table[new_size:] ProcessBufferFull(self.breakpoint['evaluatedExpressions']) for stack_frame in self.breakpoint['stackFrames']: ProcessBufferFull(stack_frame['arguments']) ProcessBufferFull(stack_frame['locals']) ProcessBufferFull(self._var_table)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _CaptureEnvironmentLabels(self): """Captures information about the environment, if possible."""
if 'labels' not in self.breakpoint: self.breakpoint['labels'] = {} if callable(breakpoint_labels_collector): for (key, value) in six.iteritems(breakpoint_labels_collector()): self.breakpoint['labels'][key] = value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _CaptureRequestLogId(self): """Captures the request log id if possible. The request log id is stored inside the breakpoint labels. """
# pylint: disable=not-callable if callable(request_log_id_collector): request_log_id = request_log_id_collector() if request_log_id: # We have a request_log_id, save it into the breakpoint labels self.breakpoint['labels'][ labels.Breakpoint.REQUEST_LOG_ID] = request_log_id
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _CaptureUserId(self): """Captures the user id of the end user, if possible."""
user_kind, user_id = user_id_collector() if user_kind and user_id: self.breakpoint['evaluatedUserId'] = {'kind': user_kind, 'id': user_id}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def Log(self, frame): """Captures the minimal application states, formats it and logs the message. Args: frame: Python stack frame of breakpoint hit. Returns: None on success or status message on error. """
# Return error if log methods were not configured globally. if not self._log_message: return {'isError': True, 'description': {'format': LOG_ACTION_NOT_SUPPORTED}} if self._quota_recovery_start_time: ms_elapsed = (time.time() - self._quota_recovery_start_time) * 1000 if ms_elapsed > self.quota_recovery_ms: # We are out of the recovery period, clear the time and continue self._quota_recovery_start_time = None else: # We are in the recovery period, exit return # Evaluate watched expressions. message = 'LOGPOINT: ' + _FormatMessage( self._definition.get('logMessageFormat', ''), self._EvaluateExpressions(frame)) line = self._definition['location']['line'] cdbg_logging_location = (NormalizePath(frame.f_code.co_filename), line, _GetFrameCodeObjectName(frame)) if native.ApplyDynamicLogsQuota(len(message)): self._log_message(message) else: self._quota_recovery_start_time = time.time() self._log_message(DYNAMIC_LOG_OUT_OF_QUOTA) del cdbg_logging_location return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _EvaluateExpressions(self, frame): """Evaluates watched expressions into a string form. If expression evaluation fails, the error message is used as evaluated expression string. Args: frame: Python stack frame of breakpoint hit. Returns: Array of strings where each string corresponds to the breakpoint expression with the same index. """
return [self._FormatExpression(frame, expression) for expression in self._definition.get('expressions') or []]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _FormatExpression(self, frame, expression): """Evaluates a single watched expression and formats it into a string form. If expression evaluation fails, returns error message string. Args: frame: Python stack frame in which the expression is evaluated. expression: string expression to evaluate. Returns: Formatted expression value that can be used in the log message. """
rc, value = _EvaluateExpression(frame, expression) if not rc: message = _FormatMessage(value['description']['format'], value['description'].get('parameters')) return '<' + message + '>' return self._FormatValue(value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _FormatValue(self, value, level=0): """Pretty-prints an object for a logger. This function is very similar to the standard pprint. The main difference is that it enforces limits to make sure we never produce an extremely long string or take too much time. Args: value: Python object to print. level: current recursion level. Returns: Formatted string. """
def FormatDictItem(key_value): """Formats single dictionary item.""" key, value = key_value return (self._FormatValue(key, level + 1) + ': ' + self._FormatValue(value, level + 1)) def LimitedEnumerate(items, formatter, level=0): """Returns items in the specified enumerable enforcing threshold.""" count = 0 limit = self.max_sublist_items if level > 0 else self.max_list_items for item in items: if count == limit: yield '...' break yield formatter(item) count += 1 def FormatList(items, formatter, level=0): """Formats a list using a custom item formatter enforcing threshold.""" return ', '.join(LimitedEnumerate(items, formatter, level=level)) if isinstance(value, _PRIMITIVE_TYPES): return _TrimString(repr(value), # Primitive type, always immutable. self.max_value_len) if isinstance(value, _DATE_TYPES): return str(value) if level > self.max_depth: return str(type(value)) if isinstance(value, dict): return '{' + FormatList(six.iteritems(value), FormatDictItem) + '}' if isinstance(value, _VECTOR_TYPES): return _ListTypeFormatString(value).format(FormatList( value, lambda item: self._FormatValue(item, level + 1), level=level)) if isinstance(value, types.FunctionType): return 'function ' + value.__name__ if hasattr(value, '__dict__') and value.__dict__: return self._FormatValue(value.__dict__, level) return str(type(value))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def OpenAndRead(relative_path='debugger-blacklist.yaml'): """Attempts to find the yaml configuration file, then read it. Args: relative_path: Optional relative path override. Returns: A Config object if the open and read were successful, None if the file does not exist (which is not considered an error). Raises: Error (some subclass): As thrown by the called Read() function. """
# Note: This logic follows the convention established by source-context.json try: with open(os.path.join(sys.path[0], relative_path), 'r') as f: return Read(f) except IOError: return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def Read(f): """Reads and returns Config data from a yaml file. Args: f: Yaml file to parse. Returns: Config object as defined in this file. Raises: Error (some subclass): If there is a problem loading or parsing the file. """
try: yaml_data = yaml.load(f) except yaml.YAMLError as e: raise ParseError('%s' % e) except IOError as e: raise YAMLLoadError('%s' % e) _CheckData(yaml_data) try: return Config( yaml_data.get('blacklist', ()), yaml_data.get('whitelist', ('*'))) except UnicodeDecodeError as e: raise YAMLLoadError('%s' % e)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _CheckData(yaml_data): """Checks data for illegal keys and formatting."""
legal_keys = set(('blacklist', 'whitelist')) unknown_keys = set(yaml_data) - legal_keys if unknown_keys: raise UnknownConfigKeyError( 'Unknown keys in configuration: %s' % unknown_keys) for key, data in six.iteritems(yaml_data): _AssertDataIsList(key, data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _AssertDataIsList(key, lst): """Assert that lst contains list data and is not structured."""
# list and tuple are supported. Not supported are direct strings # and dictionary; these indicate too much or two little structure. if not isinstance(lst, list) and not isinstance(lst, tuple): raise NotAListError('%s must be a list' % key) # each list entry must be a string for element in lst: if not isinstance(element, str): raise ElementNotAStringError('Unsupported list element %s found in %s', (element, lst))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _StripCommonPathPrefix(paths): """Removes path common prefix from a list of path strings."""
# Find the longest common prefix in terms of characters. common_prefix = os.path.commonprefix(paths) # Truncate at last segment boundary. E.g. '/aa/bb1/x.py' and '/a/bb2/x.py' # have '/aa/bb' as the common prefix, but we should strip '/aa/' instead. # If there's no '/' found, returns -1+1=0. common_prefix_len = common_prefix.rfind('/') + 1 return [path[common_prefix_len:] for path in paths]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _MultipleModulesFoundError(path, candidates): """Generates an error message to be used when multiple matches are found. Args: path: The breakpoint location path that the user provided. candidates: List of paths that match the user provided path. Must contain at least 2 entries (throws AssertionError otherwise). Returns: A (format, parameters) tuple that should be used in the description field of the breakpoint error status. """
assert len(candidates) > 1 params = [path] + _StripCommonPathPrefix(candidates[:2]) if len(candidates) == 2: fmt = ERROR_LOCATION_MULTIPLE_MODULES_3 else: fmt = ERROR_LOCATION_MULTIPLE_MODULES_4 params.append(str(len(candidates) - 2)) return fmt, params
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _NormalizePath(path): """Removes surrounding whitespace, leading separator and normalize."""
# TODO(emrekultursay): Calling os.path.normpath "may change the meaning of a # path that contains symbolic links" (e.g., "A/foo/../B" != "A/B" if foo is a # symlink). This might cause trouble when matching against loaded module # paths. We should try to avoid using it. # Example: # > import symlink.a # > symlink.a.__file__ # symlink/a.py # > import target.a # > starget.a.__file__ # target/a.py # Python interpreter treats these as two separate modules. So, we also need to # handle them the same way. return os.path.normpath(path.strip().lstrip(os.sep))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def Clear(self): """Clears the breakpoint and releases all breakpoint resources. This function is assumed to be called by BreakpointsManager. Therefore we don't call CompleteBreakpoint from here. """
self._RemoveImportHook() if self._cookie is not None: native.LogInfo('Clearing breakpoint %s' % self.GetBreakpointId()) native.ClearConditionalBreakpoint(self._cookie) self._cookie = None self._completed = True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def GetExpirationTime(self): """Computes the timestamp at which this breakpoint will expire."""
# TODO(emrekultursay): Move this to a common method. if '.' not in self.definition['createTime']: fmt = '%Y-%m-%dT%H:%M:%S%Z' else: fmt = '%Y-%m-%dT%H:%M:%S.%f%Z' create_datetime = datetime.strptime( self.definition['createTime'].replace('Z', 'UTC'), fmt) return create_datetime + self.expiration_period
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ExpireBreakpoint(self): """Expires this breakpoint."""
# Let only one thread capture the data and complete the breakpoint. if not self._SetCompleted(): return if self.definition.get('action') == 'LOG': message = ERROR_AGE_LOGPOINT_EXPIRED_0 else: message = ERROR_AGE_SNAPSHOT_EXPIRED_0 self._CompleteBreakpoint({ 'status': { 'isError': True, 'refersTo': 'BREAKPOINT_AGE', 'description': {'format': message}}})
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _ActivateBreakpoint(self, module): """Sets the breakpoint in the loaded module, or complete with error."""
# First remove the import hook (if installed). self._RemoveImportHook() line = self.definition['location']['line'] # Find the code object in which the breakpoint is being set. status, codeobj = module_explorer.GetCodeObjectAtLine(module, line) if not status: # First two parameters are common: the line of the breakpoint and the # module we are trying to insert the breakpoint in. # TODO(emrekultursay): Do not display the entire path of the file. Either # strip some prefix, or display the path in the breakpoint. params = [str(line), os.path.splitext(module.__file__)[0] + '.py'] # The next 0, 1, or 2 parameters are the alternative lines to set the # breakpoint at, displayed for the user's convenience. alt_lines = (str(l) for l in codeobj if l is not None) params += alt_lines if len(params) == 4: fmt = ERROR_LOCATION_NO_CODE_FOUND_AT_LINE_4 elif len(params) == 3: fmt = ERROR_LOCATION_NO_CODE_FOUND_AT_LINE_3 else: fmt = ERROR_LOCATION_NO_CODE_FOUND_AT_LINE_2 self._CompleteBreakpoint({ 'status': { 'isError': True, 'refersTo': 'BREAKPOINT_SOURCE_LOCATION', 'description': { 'format': fmt, 'parameters': params}}}) return # Compile the breakpoint condition. condition = None if self.definition.get('condition'): try: condition = compile(self.definition.get('condition'), '<condition_expression>', 'eval') except (TypeError, ValueError) as e: # condition string contains null bytes. self._CompleteBreakpoint({ 'status': { 'isError': True, 'refersTo': 'BREAKPOINT_CONDITION', 'description': { 'format': 'Invalid expression', 'parameters': [str(e)]}}}) return except SyntaxError as e: self._CompleteBreakpoint({ 'status': { 'isError': True, 'refersTo': 'BREAKPOINT_CONDITION', 'description': { 'format': 'Expression could not be compiled: $0', 'parameters': [e.msg]}}}) return native.LogInfo('Creating new Python breakpoint %s in %s, line %d' % ( self.GetBreakpointId(), codeobj, line)) self._cookie = native.SetConditionalBreakpoint( codeobj, line, condition, self._BreakpointEvent)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _CompleteBreakpoint(self, data, is_incremental=True): """Sends breakpoint update and deactivates the breakpoint."""
if is_incremental: data = dict(self.definition, **data) data['isFinalState'] = True self._hub_client.EnqueueBreakpointUpdate(data) self._breakpoints_manager.CompleteBreakpoint(self.GetBreakpointId()) self.Clear()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _SetCompleted(self): """Atomically marks the breakpoint as completed. Returns: True if the breakpoint wasn't marked already completed or False if the breakpoint was already completed. """
with self._lock: if self._completed: return False self._completed = True return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _BreakpointEvent(self, event, frame): """Callback invoked by cdbg_native when breakpoint hits. Args: event: breakpoint event (see kIntegerConstants in native_module.cc). frame: Python stack frame of breakpoint hit or None for other events. """
error_status = None if event != native.BREAKPOINT_EVENT_HIT: error_status = _BREAKPOINT_EVENT_STATUS[event] elif self.definition.get('action') == 'LOG': error_status = self._collector.Log(frame) if not error_status: return # Log action successful, no need to clear the breakpoint. # Let only one thread capture the data and complete the breakpoint. if not self._SetCompleted(): return self.Clear() if error_status: self._CompleteBreakpoint({'status': error_status}) return collector = capture_collector.CaptureCollector( self.definition, self.data_visibility_policy) # TODO(b/69119299): This is a temporary try/except. All exceptions should be # caught inside Collect and converted into breakpoint error messages. try: collector.Collect(frame) except BaseException as e: # pylint: disable=broad-except native.LogInfo('Internal error during data capture: %s' % repr(e)) error_status = {'isError': True, 'description': { 'format': ('Internal error while capturing data: %s' % repr(e))}} self._CompleteBreakpoint({'status': error_status}) return except: # pylint: disable=bare-except native.LogInfo('Unknown exception raised') error_status = {'isError': True, 'description': { 'format': 'Unknown internal error'}} self._CompleteBreakpoint({'status': error_status}) return self._CompleteBreakpoint(collector.breakpoint, is_incremental=False)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def Search(path): """Search sys.path to find a source file that matches path. The provided input path may have an unknown number of irrelevant outer directories (e.g., /garbage1/garbage2/real1/real2/x.py'). This function does multiple search iterations until an actual Python module file that matches the input path is found. At each iteration, it strips one leading directory from the path and searches the directories at sys.path for a match. Examples: sys.path: ['/x1/x2', '/y1/y2'] Search order: [.pyo|.pyc|.py] /x1/x2/a/b/c /x1/x2/b/c /x1/x2/c /y1/y2/a/b/c /y1/y2/b/c /y1/y2/c Filesystem: ['/y1/y2/a/b/c.pyc'] 1) Search('a/b/c.py') Returns '/y1/y2/a/b/c.pyc' 2) Search('q/w/a/b/c.py') Returns '/y1/y2/a/b/c.pyc' 3) Search('q/w/c.py') Returns 'q/w/c.py' The provided input path may also be relative to an unknown directory. The path may include some or all outer package names. Examples (continued): 4) Search('c.py') Returns 'c.py' 5) Search('b/c.py') Returns 'b/c.py' Args: path: Path that describes a source file. Must contain .py file extension. Must not contain any leading os.sep character. Returns: Full path to the matched source file, if a match is found. Otherwise, returns the input path. Raises: AssertionError: if the provided path is an absolute path, or if it does not have a .py extension. """
def SearchCandidates(p): """Generates all candidates for the fuzzy search of p.""" while p: yield p (_, _, p) = p.partition(os.sep) # Verify that the os.sep is already stripped from the input. assert not path.startswith(os.sep) # Strip the file extension, it will not be needed. src_root, src_ext = os.path.splitext(path) assert src_ext == '.py' # Search longer suffixes first. Move to shorter suffixes only if longer # suffixes do not result in any matches. for src_part in SearchCandidates(src_root): # Search is done in sys.path order, which gives higher priority to earlier # entries in sys.path list. for sys_path in sys.path: f = os.path.join(sys_path, src_part) # The order in which we search the extensions does not matter. for ext in ('.pyo', '.pyc', '.py'): # The os.path.exists check internally follows symlinks and flattens # relative paths, so we don't have to deal with it. fext = f + ext if os.path.exists(fext): # Once we identify a matching file in the filesystem, we should # preserve the (1) potentially-symlinked and (2) # potentially-non-flattened file path (f+ext), because that's exactly # how we expect it to appear in sys.modules when we search the file # there. return fext # A matching file was not found in sys.path directories. return path
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _StartDebugger(): """Configures and starts the debugger."""
global _hub_client global _breakpoints_manager cdbg_native.InitializeModule(_flags) _hub_client = gcp_hub_client.GcpHubClient() visibility_policy = _GetVisibilityPolicy() _breakpoints_manager = breakpoints_manager.BreakpointsManager( _hub_client, visibility_policy) # Set up loggers for logpoints. capture_collector.SetLogger(logging.getLogger()) capture_collector.CaptureCollector.pretty_printers.append( appengine_pretty_printers.PrettyPrinter) _hub_client.on_active_breakpoints_changed = ( _breakpoints_manager.SetActiveBreakpoints) _hub_client.on_idle = _breakpoints_manager.CheckBreakpointsExpiration _hub_client.SetupAuth( _flags.get('project_id'), _flags.get('project_number'), _flags.get('service_account_json_file')) _hub_client.InitializeDebuggeeLabels(_flags) _hub_client.Start()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _GetVisibilityPolicy(): """If a debugger configuration is found, create a visibility policy."""
try: visibility_config = yaml_data_visibility_config_reader.OpenAndRead() except yaml_data_visibility_config_reader.Error as err: return error_data_visibility_policy.ErrorDataVisibilityPolicy( 'Could not process debugger config: %s' % err) if visibility_config: return glob_data_visibility_policy.GlobDataVisibilityPolicy( visibility_config.blacklist_patterns, visibility_config.whitelist_patterns) return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _DebuggerMain(): """Starts the debugger and runs the application with debugger attached."""
global _flags # The first argument is cdbg module, which we don't care. del sys.argv[0] # Parse debugger flags until we encounter '--'. _flags = {} while sys.argv[0]: arg = sys.argv[0] del sys.argv[0] if arg == '--': break (name, value) = arg.strip('-').split('=', 2) _flags[name] = value _StartDebugger() # Run the app. The following code was mostly copied from pdb.py. app_path = sys.argv[0] sys.path[0] = os.path.dirname(app_path) import __main__ # pylint: disable=g-import-not-at-top __main__.__dict__.clear() __main__.__dict__.update({'__name__': '__main__', '__file__': app_path, '__builtins__': __builtins__}) locals = globals = __main__.__dict__ # pylint: disable=redefined-builtin sys.modules['__main__'] = __main__ with open(app_path) as f: code = compile(f.read(), app_path, 'exec') exec(code, globals, locals)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _Matches(path, pattern_list): """Returns true if path matches any patten found in pattern_list. Args: path: A dot separated path to a package, class, method or variable pattern_list: A list of wildcard patterns Returns: True if path matches any wildcard found in pattern_list. """
# Note: This code does not scale to large pattern_list sizes. return any(fnmatch.fnmatchcase(path, pattern) for pattern in pattern_list)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def SetActiveBreakpoints(self, breakpoints_data): """Adds new breakpoints and removes missing ones. Args: breakpoints_data: updated list of active breakpoints. """
with self._lock: ids = set([x['id'] for x in breakpoints_data]) # Clear breakpoints that no longer show up in active breakpoints list. for breakpoint_id in six.viewkeys(self._active) - ids: self._active.pop(breakpoint_id).Clear() # Create new breakpoints. self._active.update([ (x['id'], python_breakpoint.PythonBreakpoint( x, self._hub_client, self, self.data_visibility_policy)) for x in breakpoints_data if x['id'] in ids - six.viewkeys(self._active) - self._completed]) # Remove entries from completed_breakpoints_ that weren't listed in # breakpoints_data vector. These are confirmed to have been removed by the # hub and the debuglet can now assume that they will never show up ever # again. The backend never reuses breakpoint IDs. self._completed &= ids if self._active: self._next_expiration = datetime.min # Not known. else: self._next_expiration = datetime.max
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def CompleteBreakpoint(self, breakpoint_id): """Marks the specified breaking as completed. Appends the ID to set of completed breakpoints and clears it. Args: breakpoint_id: breakpoint ID to complete. """
with self._lock: self._completed.add(breakpoint_id) if breakpoint_id in self._active: self._active.pop(breakpoint_id).Clear()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def CheckBreakpointsExpiration(self): """Completes all breakpoints that have been active for too long."""
with self._lock: current_time = BreakpointsManager.GetCurrentTime() if self._next_expiration > current_time: return expired_breakpoints = [] self._next_expiration = datetime.max for breakpoint in six.itervalues(self._active): expiration_time = breakpoint.GetExpirationTime() if expiration_time <= current_time: expired_breakpoints.append(breakpoint) else: self._next_expiration = min(self._next_expiration, expiration_time) for breakpoint in expired_breakpoints: breakpoint.ExpireBreakpoint()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def PrettyPrinter(obj): """Pretty printers for AppEngine objects."""
if ndb and isinstance(obj, ndb.Model): return six.iteritems(obj.to_dict()), 'ndb.Model(%s)' % type(obj).__name__ if messages and isinstance(obj, messages.Enum): return [('name', obj.name), ('number', obj.number)], type(obj).__name__ return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def IsPathSuffix(mod_path, path): """Checks whether path is a full path suffix of mod_path. Args: mod_path: Must be an absolute path to a source file. Must not have file extension. path: A relative path. Must not have file extension. Returns: True if path is a full path suffix of mod_path. False otherwise. """
return (mod_path.endswith(path) and (len(mod_path) == len(path) or mod_path[:-len(path)].endswith(os.sep)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def GetLoadedModuleBySuffix(path): """Searches sys.modules to find a module with the given file path. Args: path: Path to the source file. It can be relative or absolute, as suffix match can handle both. If absolute, it must have already been sanitized. Algorithm: The given path must be a full suffix of a loaded module to be a valid match. File extensions are ignored when performing suffix match. Example: path: 'a/b/c.py' modules: {'a': 'a.py', 'a.b': 'a/b.py', 'a.b.c': 'a/b/c.pyc'] returns: module('a.b.c') Returns: The module that corresponds to path, or None if such module was not found. """
root = os.path.splitext(path)[0] for module in sys.modules.values(): mod_root = os.path.splitext(getattr(module, '__file__', None) or '')[0] if not mod_root: continue # While mod_root can contain symlinks, we cannot eliminate them. This is # because, we must perform exactly the same transformations on mod_root and # path, yet path can be relative to an unknown directory which prevents # identifying and eliminating symbolic links. # # Therefore, we only convert relative to absolute path. if not os.path.isabs(mod_root): mod_root = os.path.join(os.getcwd(), mod_root) if IsPathSuffix(mod_root, root): return module return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def GetCodeObjectAtLine(module, line): """Searches for a code object at the specified line in the specified module. Args: module: module to explore. line: 1-based line number of the statement. Returns: (True, Code object) on success or (False, (prev_line, next_line)) on failure, where prev_line and next_line are the closest lines with code above and below the specified line, or None if they do not exist. """
if not hasattr(module, '__file__'): return (False, (None, None)) prev_line = 0 next_line = six.MAXSIZE for code_object in _GetModuleCodeObjects(module): for co_line_number in _GetLineNumbers(code_object): if co_line_number == line: return (True, code_object) elif co_line_number < line: prev_line = max(prev_line, co_line_number) elif co_line_number > line: next_line = min(next_line, co_line_number) break prev_line = None if prev_line == 0 else prev_line next_line = None if next_line == six.MAXSIZE else next_line return (False, (prev_line, next_line))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _GetLineNumbers(code_object): """Generator for getting the line numbers of a code object. Args: code_object: the code object. Yields: The next line number in the code object. """
# Get the line number deltas, which are the odd number entries, from the # lnotab. See # https://svn.python.org/projects/python/branches/pep-0384/Objects/lnotab_notes.txt # In Python 3, this is just a byte array. In Python 2 it is a string so the # numerical values have to be extracted from the individual characters. if six.PY3: line_incrs = code_object.co_lnotab[1::2] else: line_incrs = (ord(c) for c in code_object.co_lnotab[1::2]) current_line = code_object.co_firstlineno for line_incr in line_incrs: current_line += line_incr yield current_line
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _GetModuleCodeObjects(module): """Gets all code objects defined in the specified module. There are two BFS traversals involved. One in this function and the other in _FindCodeObjectsReferents. Only the BFS in _FindCodeObjectsReferents has a depth limit. This function does not. The motivation is that this function explores code object of the module and they can have any arbitrary nesting level. _FindCodeObjectsReferents, on the other hand, traverses through class definitions and random references. It's much more expensive and will likely go into unrelated objects. There is also a limit on how many total objects are going to be traversed in all. This limit makes sure that if something goes wrong, the lookup doesn't hang. Args: module: module to explore. Returns: Set of code objects defined in module. """
visit_recorder = _VisitRecorder() current = [module] code_objects = set() while current: current = _FindCodeObjectsReferents(module, current, visit_recorder) code_objects |= current # Unfortunately Python code objects don't implement tp_traverse, so this # type can't be used with gc.get_referents. The workaround is to get the # relevant objects explicitly here. current = [code_object.co_consts for code_object in current] return code_objects
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _FindCodeObjectsReferents(module, start_objects, visit_recorder): """Looks for all the code objects referenced by objects in start_objects. The traversal implemented by this function is a shallow one. In other words if the reference chain is a -> b -> co1 -> c -> co2, this function will return [co1] only. The traversal is implemented with BFS. The maximum depth is limited to avoid touching all the objects in the process. Each object is only visited once using visit_recorder. Args: module: module in which we are looking for code objects. start_objects: initial set of objects for the BFS traversal. visit_recorder: instance of _VisitRecorder class to ensure each object is visited at most once. Returns: List of code objects. """
def CheckIgnoreCodeObject(code_object): """Checks if the code object can be ignored. Code objects that are not implemented in the module, or are from a lambda or generator expression can be ignored. If the module was precompiled, the code object may point to .py file, while the module says that it originated from .pyc file. We just strip extension altogether to work around it. Args: code_object: code object that we want to check against module. Returns: True if the code object can be ignored, False otherwise. """ if code_object.co_name in ('<lambda>', '<genexpr>'): return True code_object_file = os.path.splitext(code_object.co_filename)[0] module_file = os.path.splitext(module.__file__)[0] # The simple case. if code_object_file == module_file: return False return True def CheckIgnoreClass(cls): """Returns True if the class is definitely not coming from "module".""" cls_module = sys.modules.get(cls.__module__) if not cls_module: return False # We can't tell for sure, so explore this class. return ( cls_module is not module and getattr(cls_module, '__file__', None) != module.__file__) code_objects = set() current = start_objects for obj in current: visit_recorder.Record(current) depth = 0 while current and depth < _MAX_REFERENTS_BFS_DEPTH: new_current = [] for current_obj in current: referents = gc.get_referents(current_obj) if (current_obj is not module.__dict__ and len(referents) > _MAX_OBJECT_REFERENTS): continue for obj in referents: if isinstance(obj, _BFS_IGNORE_TYPES) or not visit_recorder.Record(obj): continue if isinstance(obj, types.CodeType) and CheckIgnoreCodeObject(obj): continue if isinstance(obj, six.class_types) and CheckIgnoreClass(obj): continue if isinstance(obj, types.CodeType): code_objects.add(obj) else: new_current.append(obj) current = new_current depth += 1 return code_objects
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def Record(self, obj): """Records the object as visited. Args: obj: visited object. Returns: True if the object hasn't been previously visited or False if it has already been recorded or the quota has been exhausted. """
if len(self._visit_recorder_objects) >= _MAX_VISIT_OBJECTS: return False obj_id = id(obj) if obj_id in self._visit_recorder_objects: return False self._visit_recorder_objects[obj_id] = obj return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def Failed(self): """Indicates that a request has failed. Returns: Time interval to wait before retrying (in seconds). """
interval = self._current_interval_sec self._current_interval_sec = min( self.max_interval_sec, self._current_interval_sec * self.multiplier) return interval
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ComputeApplicationUniquifier(hash_obj): """Computes hash of application files. Application files can be anywhere on the disk. The application is free to import a Python module from an arbitrary path ok the disk. It is also impossible to distinguish application files from third party libraries. Third party libraries are typically installed with "pip" and there is not a good way to guarantee that all instances of the application are going to have exactly the same version of each package. There is also a huge amount of files in all sys.path directories and it will take too much time to traverse them all. We therefore make an assumption that application files are only located in sys.path[0]. When traversing files in sys.path, we can expect both .py and .pyc files. For source deployment, we will find both .py and .pyc files. In this case we will only index .py files and ignored .pyc file. In case of binary deployment, only .pyc file will be there. The naive way to hash files would be to read the file content and compute some sort of a hash (e.g. SHA1). This can be expensive as well, so instead we just hash file name and file size. It is a good enough heuristics to identify modified files across different deployments. Args: hash_obj: hash aggregator to update with application uniquifier. """
def ProcessDirectory(path, relative_path, depth=1): """Recursively computes application uniquifier for a particular directory. Args: path: absolute path of the directory to start. relative_path: path relative to sys.path[0] depth: current recursion depth. """ if depth > _MAX_DEPTH: return try: names = os.listdir(path) except BaseException: return # Sort file names to ensure consistent hash regardless of order returned # by os.listdir. This will also put .py files before .pyc and .pyo files. modules = set() for name in sorted(names): current_path = os.path.join(path, name) if not os.path.isdir(current_path): file_name, ext = os.path.splitext(name) if ext not in ('.py', '.pyc', '.pyo'): continue # This is not an application file. if file_name in modules: continue # This is a .pyc file and we already indexed .py file. modules.add(file_name) ProcessApplicationFile(current_path, os.path.join(relative_path, name)) elif IsPackage(current_path): ProcessDirectory(current_path, os.path.join(relative_path, name), depth + 1) def IsPackage(path): """Checks if the specified directory is a valid Python package.""" init_base_path = os.path.join(path, '__init__.py') return (os.path.isfile(init_base_path) or os.path.isfile(init_base_path + 'c') or os.path.isfile(init_base_path + 'o')) def ProcessApplicationFile(path, relative_path): """Updates the hash with the specified application file.""" hash_obj.update(relative_path.encode()) hash_obj.update(':'.encode()) try: hash_obj.update(str(os.stat(path).st_size).encode()) except BaseException: pass hash_obj.update('\n'.encode()) ProcessDirectory(sys.path[0], '')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def AddImportCallbackBySuffix(path, callback): """Register import hook. This function overrides the default import process. Then whenever a module whose suffix matches path is imported, the callback will be invoked. A module may be imported multiple times. Import event only means that the Python code contained an "import" statement. The actual loading and initialization of a new module normally happens only once, at which time the callback will be invoked. This function does not validates the existence of such a module and it's the responsibility of the caller. TODO(erezh): handle module reload. Args: path: python module file path. It may be missing the directories for the outer packages, and therefore, requires suffix comparison to match against loaded modules. If it contains all outer packages, it may contain the sys.path as well. It might contain an incorrect file extension (e.g., py vs. pyc). callback: callable to invoke upon module load. Returns: Function object to invoke to remove the installed callback. """
def RemoveCallback(): # This is a read-if-del operation on _import_callbacks. Lock to prevent # callbacks from being inserted just before the key is deleted. Thus, it # must be locked also when inserting a new entry below. On the other hand # read only access, in the import hook, does not require a lock. with _import_callbacks_lock: callbacks = _import_callbacks.get(path) if callbacks: callbacks.remove(callback) if not callbacks: del _import_callbacks[path] with _import_callbacks_lock: _import_callbacks.setdefault(path, set()).add(callback) _InstallImportHookBySuffix() return RemoveCallback
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _InstallImportHookBySuffix(): """Lazily installs import hook."""
global _real_import if _real_import: return # Import hook already installed _real_import = getattr(builtins, '__import__') assert _real_import builtins.__import__ = _ImportHookBySuffix if six.PY3: # In Python 2, importlib.import_module calls __import__ internally so # overriding __import__ is enough. In Python 3, they are separate so it also # needs to be overwritten. global _real_import_module _real_import_module = importlib.import_module assert _real_import_module importlib.import_module = _ImportModuleHookBySuffix
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _IncrementNestLevel(): """Increments the per thread nest level of imports."""
# This is the top call to import (no nesting), init the per-thread nest level # and names set. if getattr(_import_local, 'nest_level', None) is None: _import_local.nest_level = 0 if _import_local.nest_level == 0: # Re-initialize names set at each top-level import to prevent any # accidental unforeseen memory leak. _import_local.names = set() _import_local.nest_level += 1
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _ProcessImportBySuffix(name, fromlist, globals): """Processes an import. Calculates the possible names generated from an import and invokes registered callbacks if needed. Args: name: Argument as passed to the importer. fromlist: Argument as passed to the importer. globals: Argument as passed to the importer. """
_import_local.nest_level -= 1 # To improve common code path performance, compute the loaded modules only # if there are any import callbacks. if _import_callbacks: # Collect the names of all modules that might be newly loaded as a result # of this import. Add them in a thread-local list. _import_local.names |= _GenerateNames(name, fromlist, globals) # Invoke the callbacks only on the top-level import call. if _import_local.nest_level == 0: _InvokeImportCallbackBySuffix(_import_local.names) # To be safe, we clear the names set every time we exit a top level import. if _import_local.nest_level == 0: _import_local.names.clear()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _ImportHookBySuffix( name, globals=None, locals=None, fromlist=None, level=None): """Callback when an import statement is executed by the Python interpreter. Argument names have to exactly match those of __import__. Otherwise calls to __import__ that use keyword syntax will fail: __import('a', fromlist=[]). """
_IncrementNestLevel() if level is None: # A level of 0 means absolute import, positive values means relative # imports, and -1 means to try both an absolute and relative import. # Since imports were disambiguated in Python 3, -1 is not a valid value. # The default values are 0 and -1 for Python 3 and 3 respectively. # https://docs.python.org/2/library/functions.html#__import__ # https://docs.python.org/3/library/functions.html#__import__ level = 0 if six.PY3 else -1 try: # Really import modules. module = _real_import(name, globals, locals, fromlist, level) finally: # This _real_import call may raise an exception (e.g., ImportError). # However, there might be several modules already loaded before the # exception was raised. For instance: # a.py # import b # success # import c # ImportError exception. # In this case, an 'import a' statement would have the side effect of # importing module 'b'. This should trigger the import hooks for module # 'b'. To achieve this, we always search/invoke import callbacks (i.e., # even when an exception is raised). # # Important Note: Do not use 'return' inside the finally block. It will # cause any pending exception to be discarded. _ProcessImportBySuffix(name, fromlist, globals) return module
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _ResolveRelativeImport(name, package): """Resolves a relative import into an absolute path. This is mostly an adapted version of the logic found in the backported version of import_module in Python 2.7. https://github.com/python/cpython/blob/2.7/Lib/importlib/__init__.py Args: name: relative name imported, such as '.a' or '..b.c' package: absolute package path, such as 'a.b.c.d.e' Returns: The absolute path of the name to be imported, or None if it is invalid. Examples: _ResolveRelativeImport('.c', 'a.b') -> 'a.b.c' _ResolveRelativeImport('..c', 'a.b') -> 'a.c' """
level = sum(1 for c in itertools.takewhile(lambda c: c == '.', name)) if level == 1: return package + name else: parts = package.split('.')[:-(level - 1)] if not parts: return None parts.append(name[level:]) return '.'.join(parts)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _ImportModuleHookBySuffix(name, package=None): """Callback when a module is imported through importlib.import_module."""
_IncrementNestLevel() try: # Really import modules. module = _real_import_module(name, package) finally: if name.startswith('.'): if package: name = _ResolveRelativeImport(name, package) else: # Should not happen. Relative imports require the package argument. name = None if name: _ProcessImportBySuffix(name, None, None) return module
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _GenerateNames(name, fromlist, globals): """Generates the names of modules that might be loaded via this import. Args: name: Argument as passed to the importer. fromlist: Argument as passed to the importer. globals: Argument as passed to the importer. Returns: A set that contains the names of all modules that are loaded by the currently executing import statement, as they would show up in sys.modules. The returned set may contain module names that were already loaded before the execution of this import statement. The returned set may contain names that are not real modules. """
def GetCurrentPackage(globals): """Finds the name of the package for the currently executing module.""" if not globals: return None # Get the name of the module/package that the current import is being # executed in. current = globals.get('__name__') if not current: return None # Check if the current module is really a module, or a package. current_file = globals.get('__file__') if not current_file: return None root = os.path.splitext(os.path.basename(current_file))[0] if root == '__init__': # The current import happened from a package. Return the package. return current else: # The current import happened from a module. Return the package that # contains the module. return current.rpartition('.')[0] # A Python module can be addressed in two ways: # 1. Using a path relative to the currently executing module's path. For # instance, module p1/p2/m3.py imports p1/p2/p3/m4.py using 'import p3.m4'. # 2. Using a path relative to sys.path. For instance, module p1/p2/m3.py # imports p1/p2/p3/m4.py using 'import p1.p2.p3.m4'. # # The Python importer uses the 'globals' argument to identify the module that # the current import is being performed in. The actual logic is very # complicated, and we only approximate it here to limit the performance # overhead (See import.c in the interpreter for details). Here, we only use # the value of the globals['__name__'] for this purpose. # # Note: The Python importer prioritizes the current package over sys.path. For # instance, if 'p1.p2.m3' imports 'm4', then 'p1.p2.m4' is a better match than # the top level 'm4'. However, the debugger does not have to implement this, # because breakpoint paths are not described relative to some other file. They # are always assumed to be relative to the sys.path directories. If the user # sets breakpoint inside 'm4.py', then we can map it to either the top level # 'm4' or 'p1.p2.m4', i.e., both are valid matches. curpkg = GetCurrentPackage(globals) names = set() # A Python module can be imported using two syntaxes: # 1. import p1.p2.m3 # 2. from p1.p2 import m3 # # When the regular 'import p1.p2.m3' syntax is used, the name of the module # being imported is passed in the 'name' argument (e.g., name='p1.p2.m3', # fromlist=None). # # When the from-import syntax is used, then fromlist contains the leaf names # of the modules, and name contains the containing package. For instance, if # name='a.b', fromlist=['c', 'd'], then we add ['a.b.c', 'a.b.d']. # # Corner cases: # 1. The fromlist syntax can be used to import a function from a module. # For instance, 'from p1.p2.m3 import func'. # 2. Sometimes, the importer is passed a dummy fromlist=['__doc__'] (see # import.c in the interpreter for details). # Due to these corner cases, the returned set may contain entries that are not # names of real modules. for from_entry in fromlist or []: # Name relative to sys.path. # For relative imports such as 'from . import x', name will be the empty # string. Thus we should not prepend a '.' to the entry. entry = (name + '.' + from_entry) if name else from_entry names.add(entry) # Name relative to the currently executing module's package. if curpkg: names.add(curpkg + '.' + entry) # Generate all names from name. For instance, if name='a.b.c', then # we need to add ['a.b.c', 'a.b', 'a']. while name: # Name relative to sys.path. names.add(name) # Name relative to currently executing module's package. if curpkg: names.add(curpkg + '.' + name) name = name.rpartition('.')[0] return names
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _InvokeImportCallbackBySuffix(names): """Invokes import callbacks for newly loaded modules. Uses a path suffix match to identify whether a loaded module matches the file path provided by the user. Args: names: A set of names for modules that are loaded by the current import. The set may contain some superfluous entries that were already loaded before this import, or some entries that do not correspond to a module. The list is expected to be much smaller than the exact sys.modules so that a linear search is not as costly. """
def GetModuleFromName(name, path): """Returns the loaded module for this name/path, or None if not found. Args: name: A string that may represent the name of a loaded Python module. path: If 'name' ends with '.*', then the last path component in 'path' is used to identify what the wildcard may map to. Does not contain file extension. Returns: The loaded module for the given name and path, or None if a loaded module was not found. """ # The from-import syntax can be used as 'from p1.p2 import *'. In this case, # we cannot know what modules will match the wildcard. However, we know that # the wildcard can only be used to import leaf modules. So, we guess that # the leaf module will have the same name as the leaf file name the user # provided. For instance, # User input path = 'foo.py' # Currently executing import: # from pkg1.pkg2 import * # Then, we combine: # 1. 'pkg1.pkg2' from import's outer package and # 2. Add 'foo' as our guess for the leaf module name. # So, we will search for modules with name similar to 'pkg1.pkg2.foo'. if name.endswith('.*'): # Replace the final '*' with the name of the module we are looking for. name = name.rpartition('.')[0] + '.' + path.split('/')[-1] # Check if the module was loaded. return sys.modules.get(name) # _import_callbacks might change during iteration because RemoveCallback() # might delete items. Iterate over a copy to avoid a # 'dictionary changed size during iteration' error. for path, callbacks in list(_import_callbacks.items()): root = os.path.splitext(path)[0] nonempty_names = (n for n in names if n) modules = (GetModuleFromName(name, root) for name in nonempty_names) nonempty_modules = (m for m in modules if m) for module in nonempty_modules: # TODO(emrekultursay): Write unit test to cover None case. mod_file = getattr(module, '__file__', None) if not mod_file: continue mod_root = os.path.splitext(mod_file)[0] # If the module is relative, add the curdir prefix to convert it to # absolute path. Note that we don't use os.path.abspath because it # also normalizes the path (which has side effects we don't want). if not os.path.isabs(mod_root): mod_root = os.path.join(os.curdir, mod_root) if module_utils2.IsPathSuffix(mod_root, root): for callback in callbacks.copy(): callback(module) break
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def init(driverName=None, debug=False): ''' Constructs a new TTS engine instance or reuses the existing instance for the driver name. @param driverName: Name of the platform specific driver to use. If None, selects the default driver for the operating system. @type: str @param debug: Debugging output enabled or not @type debug: bool @return: Engine instance @rtype: L{engine.Engine} ''' try: eng = _activeEngines[driverName] except KeyError: eng = Engine(driverName, debug) _activeEngines[driverName] = eng return eng
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def startLoop(self): ''' Starts a blocking run loop in which driver callbacks are properly invoked. @precondition: There was no previous successful call to L{startLoop} without an intervening call to L{stopLoop}. ''' first = True self._looping = True while self._looping: if first: self._proxy.setBusy(False) first = False time.sleep(0.5)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _notify(self, topic, **kwargs): """ Invokes callbacks for an event topic. @param topic: String event name @type topic: str @param kwargs: Values associated with the event @type kwargs: dict """
for cb in self._connects.get(topic, []): try: cb(**kwargs) except Exception: if self._debug: traceback.print_exc()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def disconnect(self, token): """ Unregisters a callback for an event topic. @param token: Token of the callback to unregister @type token: dict """
topic = token['topic'] try: arr = self._connects[topic] except KeyError: return arr.remove(token['cb']) if len(arr) == 0: del self._connects[topic]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def save_to_file(self, text, filename, name=None): ''' Adds an utterance to speak to the event queue. @param text: Text to sepak @type text: unicode @param filename: the name of file to save. @param name: Name to associate with this utterance. Included in notifications about this utterance. @type name: str ''' self.proxy.save_to_file(text, filename, name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def runAndWait(self): """ Runs an event loop until all commands queued up until this method call complete. Blocks during the event loop and returns when the queue is cleared. @raise RuntimeError: When the loop is already running """
if self._inLoop: raise RuntimeError('run loop already started') self._inLoop = True self._driverLoop = True self.proxy.runAndWait()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def startLoop(self, useDriverLoop=True): """ Starts an event loop to process queued commands and callbacks. @param useDriverLoop: If True, uses the run loop provided by the driver (the default). If False, assumes the caller will enter its own run loop which will pump any events for the TTS engine properly. @type useDriverLoop: bool @raise RuntimeError: When the loop is already running """
if self._inLoop: raise RuntimeError('run loop already started') self._inLoop = True self._driverLoop = useDriverLoop self.proxy.startLoop(self._driverLoop)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def endLoop(self): """ Stops a running event loop. @raise RuntimeError: When the loop is not running """
if not self._inLoop: raise RuntimeError('run loop not started') self.proxy.endLoop(self._driverLoop) self._inLoop = False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def iterate(self): """ Must be called regularly when using an external event loop. """
if not self._inLoop: raise RuntimeError('run loop not started') elif self._driverLoop: raise RuntimeError('iterate not valid in driver run loop') self.proxy.iterate()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _push(self, mtd, args, name=None): ''' Adds a command to the queue. @param mtd: Method to invoke to process the command @type mtd: method @param args: Arguments to apply when invoking the method @type args: tuple @param name: Name associated with the command @type name: str ''' self._queue.append((mtd, args, name)) self._pump()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _pump(self): ''' Attempts to process the next command in the queue if one exists and the driver is not currently busy. ''' while (not self._busy) and len(self._queue): cmd = self._queue.pop(0) self._name = cmd[2] try: cmd[0](*cmd[1]) except Exception as e: self.notify('error', exception=e) if self._debug: traceback.print_exc()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def notify(self, topic, **kwargs): ''' Sends a notification to the engine from the driver. @param topic: Notification topic @type topic: str @param kwargs: Arbitrary keyword arguments @type kwargs: dict ''' kwargs['name'] = self._name self._engine._notify(topic, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def setBusy(self, busy): ''' Called by the driver to indicate it is busy. @param busy: True when busy, false when idle @type busy: bool ''' self._busy = busy if not self._busy: self._pump()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def stop(self): ''' Called by the engine to stop the current utterance and clear the queue of commands. ''' # clear queue up to first end loop command while(True): try: mtd, args, name = self._queue[0] except IndexError: break if(mtd == self._engine.endLoop): break self._queue.pop(0) self._driver.stop()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def setProperty(self, name, value): ''' Called by the engine to set a driver property value. @param name: Name of the property @type name: str @param value: Property value @type value: object ''' self._push(self._driver.setProperty, (name, value))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def runAndWait(self): ''' Called by the engine to start an event loop, process all commands in the queue at the start of the loop, and then exit the loop. ''' self._push(self._engine.endLoop, tuple()) self._driver.startLoop()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def startLoop(self, useDriverLoop): ''' Called by the engine to start an event loop. ''' if useDriverLoop: self._driver.startLoop() else: self._iterator = self._driver.iterate()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def endLoop(self, useDriverLoop): ''' Called by the engine to stop an event loop. ''' self._queue = [] self._driver.stop() if useDriverLoop: self._driver.endLoop() else: self._iterator = None self.setBusy(True)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def post_slack(): """Post slack message."""
try: token = os.environ['SLACK_TOKEN'] slack = Slacker(token) obj = slack.chat.post_message('#general', 'Hello fellow slackers!') print(obj.successful, obj.__dict__['body']['channel'], obj.__dict__[ 'body']['ts']) except KeyError as ex: print('Environment variable %s not set.' % str(ex))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def list_slack(): """List channels & users in slack."""
try: token = os.environ['SLACK_TOKEN'] slack = Slacker(token) # Get channel list response = slack.channels.list() channels = response.body['channels'] for channel in channels: print(channel['id'], channel['name']) # if not channel['is_archived']: # slack.channels.join(channel['name']) print() # Get users list response = slack.users.list() users = response.body['members'] for user in users: if not user['deleted']: print(user['id'], user['name'], user['is_admin'], user[ 'is_owner']) print() except KeyError as ex: print('Environment variable %s not set.' % str(ex))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run(self): """ Starts the main loop that is necessary to receive Bluetooth events from the Bluetooth adapter. This call blocks until you call `stop()` to stop the main loop. """
if self._main_loop: return self._interface_added_signal = self._bus.add_signal_receiver( self._interfaces_added, dbus_interface='org.freedesktop.DBus.ObjectManager', signal_name='InterfacesAdded') # TODO: Also listen to 'interfaces removed' events? self._properties_changed_signal = self._bus.add_signal_receiver( self._properties_changed, dbus_interface=dbus.PROPERTIES_IFACE, signal_name='PropertiesChanged', arg0='org.bluez.Device1', path_keyword='path') def disconnect_signals(): for device in self._devices.values(): device.invalidate() self._properties_changed_signal.remove() self._interface_added_signal.remove() self._main_loop = GObject.MainLoop() try: self._main_loop.run() disconnect_signals() except Exception: disconnect_signals() raise
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def start_discovery(self, service_uuids=[]): """Starts a discovery for BLE devices with given service UUIDs. :param service_uuids: Filters the search to only return devices with given UUIDs. """
discovery_filter = {'Transport': 'le'} if service_uuids: # D-Bus doesn't like empty lists, it needs to guess the type discovery_filter['UUIDs'] = service_uuids try: self._adapter.SetDiscoveryFilter(discovery_filter) self._adapter.StartDiscovery() except dbus.exceptions.DBusException as e: if e.get_dbus_name() == 'org.bluez.Error.NotReady': raise errors.NotReady( "Bluetooth adapter not ready. " "Set `is_adapter_powered` to `True` or run 'echo \"power on\" | sudo bluetoothctl'.") if e.get_dbus_name() == 'org.bluez.Error.InProgress': # Discovery was already started - ignore exception pass else: raise _error_from_dbus_error(e)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def stop_discovery(self): """ Stops the discovery started with `start_discovery` """
try: self._adapter.StopDiscovery() except dbus.exceptions.DBusException as e: if (e.get_dbus_name() == 'org.bluez.Error.Failed') and (e.get_dbus_message() == 'No discovery started'): pass else: raise _error_from_dbus_error(e)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def properties_changed(self, sender, changed_properties, invalidated_properties): """ Called when a device property has changed or got invalidated. """
if 'Connected' in changed_properties: if changed_properties['Connected']: self.connect_succeeded() else: self.disconnect_succeeded() if ('ServicesResolved' in changed_properties and changed_properties['ServicesResolved'] == 1 and not self.services): self.services_resolved()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def services_resolved(self): """ Called when all device's services and characteristics got resolved. """
self._disconnect_service_signals() services_regex = re.compile(self._device_path + '/service[0-9abcdef]{4}$') managed_services = [ service for service in self._object_manager.GetManagedObjects().items() if services_regex.match(service[0])] self.services = [Service( device=self, path=service[0], uuid=service[1]['org.bluez.GattService1']['UUID']) for service in managed_services] self._connect_service_signals()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def characteristics_resolved(self): """ Called when all service's characteristics got resolved. """
self._disconnect_characteristic_signals() characteristics_regex = re.compile(self._path + '/char[0-9abcdef]{4}$') managed_characteristics = [ char for char in self._object_manager.GetManagedObjects().items() if characteristics_regex.match(char[0])] self.characteristics = [Characteristic( service=self, path=c[0], uuid=c[1]['org.bluez.GattCharacteristic1']['UUID']) for c in managed_characteristics] self._connect_characteristic_signals()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read_value(self, offset=0): """ Reads the value of this descriptor. When successful, the value will be returned, otherwise `descriptor_read_value_failed()` of the related device is invoked. """
try: val = self._object.ReadValue( {'offset': dbus.UInt16(offset, variant_level=1)}, dbus_interface='org.bluez.GattDescriptor1') return val except dbus.exceptions.DBusException as e: error = _error_from_dbus_error(e) self.service.device.descriptor_read_value_failed(self, error=error)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def properties_changed(self, properties, changed_properties, invalidated_properties): value = changed_properties.get('Value') """ Called when a Characteristic property has changed. """
if value is not None: self.service.device.characteristic_value_updated(characteristic=self, value=bytes(value))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def write_value(self, value, offset=0): """ Attempts to write a value to the characteristic. Success or failure will be notified by calls to `write_value_succeeded` or `write_value_failed` respectively. :param value: array of bytes to be written :param offset: offset from where to start writing the bytes (defaults to 0) """
bytes = [dbus.Byte(b) for b in value] try: self._object.WriteValue( bytes, {'offset': dbus.UInt16(offset, variant_level=1)}, reply_handler=self._write_value_succeeded, error_handler=self._write_value_failed, dbus_interface='org.bluez.GattCharacteristic1') except dbus.exceptions.DBusException as e: self._write_value_failed(self, error=e)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _write_value_failed(self, dbus_error): """ Called when the write request has failed. """
error = _error_from_dbus_error(dbus_error) self.service.device.characteristic_write_value_failed(characteristic=self, error=error)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def enable_notifications(self, enabled=True): """ Enables or disables value change notifications. Success or failure will be notified by calls to `characteristic_enable_notifications_succeeded` or `enable_notifications_failed` respectively. Each time when the device notifies a new value, `characteristic_value_updated()` of the related device will be called. """
try: if enabled: self._object.StartNotify( reply_handler=self._enable_notifications_succeeded, error_handler=self._enable_notifications_failed, dbus_interface='org.bluez.GattCharacteristic1') else: self._object.StopNotify( reply_handler=self._enable_notifications_succeeded, error_handler=self._enable_notifications_failed, dbus_interface='org.bluez.GattCharacteristic1') except dbus.exceptions.DBusException as e: self._enable_notifications_failed(error=e)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _enable_notifications_failed(self, dbus_error): """ Called when notification enabling has failed. """
if ((dbus_error.get_dbus_name() == 'org.bluez.Error.Failed') and ((dbus_error.get_dbus_message() == "Already notifying") or (dbus_error.get_dbus_message() == "No notify session started"))): # Ignore cases where notifications where already enabled or already disabled return error = _error_from_dbus_error(dbus_error) self.service.device.characteristic_enable_notifications_failed(characteristic=self, error=error)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _split(string, splitters): """Splits a string into parts at multiple characters"""
part = '' for character in string: if character in splitters: yield part part = '' else: part += character yield part
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _hash(number, alphabet): """Hashes `number` using the given `alphabet` sequence."""
hashed = '' len_alphabet = len(alphabet) while True: hashed = alphabet[number % len_alphabet] + hashed number //= len_alphabet if not number: return hashed
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _unhash(hashed, alphabet): """Restores a number tuple from hashed using the given `alphabet` index."""
number = 0 len_alphabet = len(alphabet) for character in hashed: position = alphabet.index(character) number *= len_alphabet number += position return number
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _reorder(string, salt): """Reorders `string` according to `salt`."""
len_salt = len(salt) if len_salt != 0: string = list(string) index, integer_sum = 0, 0 for i in range(len(string) - 1, 0, -1): integer = ord(salt[index]) integer_sum += integer j = (integer + index + integer_sum) % i string[i], string[j] = string[j], string[i] index = (index + 1) % len_salt string = ''.join(string) return string
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _ensure_length(encoded, min_length, alphabet, guards, values_hash): """Ensures the minimal hash length"""
len_guards = len(guards) guard_index = (values_hash + ord(encoded[0])) % len_guards encoded = guards[guard_index] + encoded if len(encoded) < min_length: guard_index = (values_hash + ord(encoded[2])) % len_guards encoded += guards[guard_index] split_at = len(alphabet) // 2 while len(encoded) < min_length: alphabet = _reorder(alphabet, alphabet) encoded = alphabet[split_at:] + encoded + alphabet[:split_at] excess = len(encoded) - min_length if excess > 0: from_index = excess // 2 encoded = encoded[from_index:from_index+min_length] return encoded
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _encode(values, salt, min_length, alphabet, separators, guards): """Helper function that does the hash building without argument checks."""
len_alphabet = len(alphabet) len_separators = len(separators) values_hash = sum(x % (i + 100) for i, x in enumerate(values)) encoded = lottery = alphabet[values_hash % len(alphabet)] for i, value in enumerate(values): alphabet_salt = (lottery + salt + alphabet)[:len_alphabet] alphabet = _reorder(alphabet, alphabet_salt) last = _hash(value, alphabet) encoded += last value %= ord(last[0]) + i encoded += separators[value % len_separators] encoded = encoded[:-1] # cut off last separator return (encoded if len(encoded) >= min_length else _ensure_length(encoded, min_length, alphabet, guards, values_hash))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _decode(hashid, salt, alphabet, separators, guards): """Helper method that restores the values encoded in a hashid without argument checks."""
parts = tuple(_split(hashid, guards)) hashid = parts[1] if 2 <= len(parts) <= 3 else parts[0] if not hashid: return lottery_char = hashid[0] hashid = hashid[1:] hash_parts = _split(hashid, separators) for part in hash_parts: alphabet_salt = (lottery_char + salt + alphabet)[:len(alphabet)] alphabet = _reorder(alphabet, alphabet_salt) yield _unhash(part, alphabet)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _deprecated(func): """A decorator that warns about deprecation when the passed-in function is invoked."""
@wraps(func) def with_warning(*args, **kwargs): warnings.warn( ('The %s method is deprecated and will be removed in v2.*.*' % func.__name__), DeprecationWarning ) return func(*args, **kwargs) return with_warning
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def encode(self, *values): """Builds a hash from the passed `values`. :param values The values to transform into a hashid '1d6216i30h53elk3' """
if not (values and all(_is_uint(x) for x in values)): return '' return _encode(values, self._salt, self._min_length, self._alphabet, self._separators, self._guards)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def decode(self, hashid): """Restore a tuple of numbers from the passed `hashid`. :param hashid The hashid to decode (1, 23, 456) """
if not hashid or not _is_str(hashid): return () try: numbers = tuple(_decode(hashid, self._salt, self._alphabet, self._separators, self._guards)) return numbers if hashid == self.encode(*numbers) else () except ValueError: return ()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_suppressions(relative_filepaths, root, messages): """ Given every message which was emitted by the tools, and the list of files to inspect, create a list of files to ignore, and a map of filepath -> line-number -> codes to ignore """
paths_to_ignore = set() lines_to_ignore = defaultdict(set) messages_to_ignore = defaultdict(lambda: defaultdict(set)) # first deal with 'noqa' style messages for filepath in relative_filepaths: abspath = os.path.join(root, filepath) try: file_contents = encoding.read_py_file(abspath).split('\n') except encoding.CouldNotHandleEncoding as err: # TODO: this output will break output formats such as JSON warnings.warn('{0}: {1}'.format(err.path, err.cause), ImportWarning) continue ignore_file, ignore_lines = get_noqa_suppressions(file_contents) if ignore_file: paths_to_ignore.add(filepath) lines_to_ignore[filepath] |= ignore_lines # now figure out which messages were suppressed by pylint pylint_ignore_files, pylint_ignore_messages = _parse_pylint_informational(messages) paths_to_ignore |= pylint_ignore_files for filepath, line in pylint_ignore_messages.items(): for line_number, codes in line.items(): for code in codes: messages_to_ignore[filepath][line_number].add(('pylint', code)) if code in _PYLINT_EQUIVALENTS: for equivalent in _PYLINT_EQUIVALENTS[code]: messages_to_ignore[filepath][line_number].add(equivalent) return paths_to_ignore, lines_to_ignore, messages_to_ignore
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_parser(): """ This is a helper method to return an argparse parser, to be used with the Sphinx argparse plugin for documentation. """
manager = cfg.build_manager() source = cfg.build_command_line_source(prog='prospector', description=None) return source.build_parser(manager.settings, None)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _combine_w0614(self, messages): """ For the "unused import from wildcard import" messages, we want to combine all warnings about the same line into a single message. """
by_loc = defaultdict(list) out = [] for message in messages: if message.code == 'unused-wildcard-import': by_loc[message.location].append(message) else: out.append(message) for location, message_list in by_loc.items(): names = [] for msg in message_list: names.append( _UNUSED_WILDCARD_IMPORT_RE.match(msg.message).group(1)) msgtxt = 'Unused imports from wildcard import: %s' % ', '.join( names) combined_message = Message('pylint', 'unused-wildcard-import', location, msgtxt) out.append(combined_message) return out