docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
计算输入流的etag: etag规格参考 https://developer.qiniu.com/kodo/manual/1231/appendix#3 Args: input_stream: 待计算etag的二进制流 Returns: 输入流的etag值
def etag_stream(input_stream): array = [_sha1(block) for block in _file_iter(input_stream, _BLOCK_SIZE)] if len(array) == 0: array = [_sha1(b'')] if len(array) == 1: data = array[0] prefix = b'\x16' else: sha1_str = b('').join(array) data = _sha1(sha1_str) prefix = b'\x96' return urlsafe_base64_encode(prefix + data)
325,808
计算七牛API中的数据格式: entry规格参考 https://developer.qiniu.com/kodo/api/1276/data-format Args: bucket: 待操作的空间名 key: 待操作的文件名 Returns: 符合七牛API规格的数据格式
def entry(bucket, key): if key is None: return urlsafe_base64_encode('{0}'.format(bucket)) else: return urlsafe_base64_encode('{0}:{1}'.format(bucket, key))
325,809
将时间戳转换为HTTP RFC格式 Args: timestamp: 整型Unix时间戳(单位秒)
def rfc_from_timestamp(timestamp): last_modified_date = datetime.utcfromtimestamp(timestamp) last_modified_str = last_modified_date.strftime( '%a, %d %b %Y %H:%M:%S GMT') return last_modified_str
325,810
创建时间戳防盗链 Args: host: 带访问协议的域名 file_name: 原始文件名,不需要urlencode query_string: 查询参数,不需要urlencode encrypt_key: 时间戳防盗链密钥 deadline: 链接有效期时间戳(以秒为单位) Returns: 带时间戳防盗链鉴权访问链接
def create_timestamp_anti_leech_url(host, file_name, query_string, encrypt_key, deadline): if query_string: url_to_sign = '{0}/{1}?{2}'.format(host, urlencode(file_name), query_string) else: url_to_sign = '{0}/{1}'.format(host, urlencode(file_name)) path = '/{0}'.format(urlencode(file_name)) expire_hex = str(hex(deadline))[2:] str_to_sign = '{0}{1}{2}'.format(encrypt_key, path, expire_hex).encode() sign_str = hashlib.md5(str_to_sign).hexdigest() if query_string: signed_url = '{0}&sign={1}&t={2}'.format(url_to_sign, sign_str, expire_hex) else: signed_url = '{0}?sign={1}&t={2}'.format(url_to_sign, sign_str, expire_hex) return signed_url
325,828
刷新文件目录,文档 http://developer.qiniu.com/article/fusion/api/refresh.html Args: urls: 待刷新的目录列表 dirs: 待刷新的文件列表 Returns: 一个dict变量和一个ResponseInfo对象 参考代码 examples/cdn_manager.py
def refresh_urls_and_dirs(self, urls, dirs): req = {} if urls is not None and len(urls) > 0: req.update({"urls": urls}) if dirs is not None and len(dirs) > 0: req.update({"dirs": dirs}) body = json.dumps(req) url = '{0}/v2/tune/refresh'.format(self.server) return self.__post(url, body)
325,829
预取文件列表,文档 http://developer.qiniu.com/article/fusion/api/prefetch.html Args: urls: 待预取的文件外链列表 Returns: 一个dict变量和一个ResponseInfo对象 参考代码 examples/cdn_manager.py
def prefetch_urls(self, urls): req = {} req.update({"urls": urls}) body = json.dumps(req) url = '{0}/v2/tune/prefetch'.format(self.server) return self.__post(url, body)
325,830
查询带宽数据,文档 http://developer.qiniu.com/article/fusion/api/traffic-bandwidth.html Args: domains: 域名列表 start_date: 起始日期 end_date: 结束日期 granularity: 数据间隔 Returns: 一个dict变量和一个ResponseInfo对象 参考代码 examples/cdn_manager.py
def get_bandwidth_data(self, domains, start_date, end_date, granularity): req = {} req.update({"domains": ';'.join(domains)}) req.update({"startDate": start_date}) req.update({"endDate": end_date}) req.update({"granularity": granularity}) body = json.dumps(req) url = '{0}/v2/tune/bandwidth'.format(self.server) return self.__post(url, body)
325,831
获取日志下载链接,文档 http://developer.qiniu.com/article/fusion/api/log.html Args: domains: 域名列表 log_date: 日志日期 Returns: 一个dict变量和一个ResponseInfo对象 参考代码 examples/cdn_manager.py
def get_log_list_data(self, domains, log_date): req = {} req.update({"domains": ';'.join(domains)}) req.update({"day": log_date}) body = json.dumps(req) url = '{0}/v2/tune/log/list'.format(self.server) return self.__post(url, body)
325,832
创建域名,文档 https://developer.qiniu.com/fusion/api/4246/the-domain-name Args: name: 域名, 如果是泛域名,必须以点号 . 开头 bosy: 创建域名参数 Returns: {}
def create_domain(self, name, body): url = '{0}/domain/{1}'.format(self.server, name) return self.__post(url, body)
325,834
获取域名信息,文档 https://developer.qiniu.com/fusion/api/4246/the-domain-name Args: name: 域名, 如果是泛域名,必须以点号 . 开头 Returns: 返回一个tuple对象,其格式为(<result>, <ResponseInfo>) - result 成功返回dict{},失败返回{"error": "<errMsg string>"} - ResponseInfo 请求的Response信息
def get_domain(self, name): url = '{0}/domain/{1}'.format(self.server, name) return self.__post(url)
325,835
修改证书,文档 https://developer.qiniu.com/fusion/api/4246/the-domain-name#11 Args: domains: 域名name CertID: 证书id,从上传或者获取证书列表里拿到证书id ForceHttps: 是否强制https跳转 Returns: {}
def put_httpsconf(self, name, certid, forceHttps): req = {} req.update({"certid": certid}) req.update({"forceHttps": forceHttps}) body = json.dumps(req) url = '{0}/domain/{1}/httpsconf'.format(self.server, name) return self.__put(url, body)
325,836
修改证书,文档 https://developer.qiniu.com/fusion/api/4246/the-domain-name#11 Args: name: 证书名称 common_name: 相关域名 pri: 证书私钥 ca: 证书内容 Returns: 返回一个tuple对象,其格式为(<result>, <ResponseInfo>) - result 成功返回dict{certID: <CertID>},失败返回{"error": "<errMsg string>"} - ResponseInfo 请求的Response信息
def create_sslcert(self, name, common_name, pri, ca): req = {} req.update({"name": name}) req.update({"common_name": common_name}) req.update({"pri": pri}) req.update({"ca": ca}) body = json.dumps(req) url = '{0}/sslcert'.format(self.server) return self.__post(url, body)
325,837
Screenshot with PNG format Args: png_filename(string): optional, save file name format(string): return format, pillow or raw(default) Returns: raw data or PIL.Image Raises: WDAError
def screenshot(self, png_filename=None, format='raw'): value = self.http.get('screenshot').value raw_value = base64.b64decode(value) png_header = b"\x89PNG\r\n\x1a\n" if not raw_value.startswith(png_header) and png_filename: raise WDAError(-1, "screenshot png format error") if png_filename: with open(png_filename, 'wb') as f: f.write(raw_value) if format == 'raw': return raw_value elif format == 'pillow': from PIL import Image buff = io.BytesIO(raw_value) return Image.open(buff) else: raise ValueError("unknown format")
326,038
Tap and hold for a moment Args: - x, y(int): position - duration(float): seconds of hold time [[FBRoute POST:@"/wda/touchAndHold"] respondWithTarget:self action:@selector(handleTouchAndHoldCoordinate:)],
def tap_hold(self, x, y, duration=1.0): data = {'x': x, 'y': y, 'duration': duration} return self.http.post('/wda/touchAndHold', data=data)
326,046
Wait element and perform click Args: timeout (float): timeout for wait Returns: bool: if successfully clicked
def click_exists(self, timeout=0): e = self.get(timeout=timeout, raise_error=False) if e is None: return False e.click() return True
326,068
alias of get Args: timeout (float): timeout seconds raise_error (bool): default true, whether to raise error if element not found Raises: WDAElementNotFoundError
def wait(self, timeout=None, raise_error=True): return self.get(timeout=timeout, raise_error=raise_error)
326,069
Initialize debuggee labels from environment variables and flags. The caller passes all the flags that the the debuglet got. This function will only use the flags used to label the debuggee. Flags take precedence over environment variables. Debuggee description is formatted from available flags. Args: flags: dictionary of debuglet command line flags.
def InitializeDebuggeeLabels(self, flags): self._debuggee_labels = {} for (label, var_names) in six.iteritems(_DEBUGGEE_LABELS): # var_names is a list of possible environment variables that may contain # the label value. Find the first one that is set. for name in var_names: value = os.environ.get(name) if value: # Special case for module. We omit the "default" module # to stay consistent with AppEngine. if label == labels.Debuggee.MODULE and value == 'default': break self._debuggee_labels[label] = value break if flags: self._debuggee_labels.update( {name: value for (name, value) in six.iteritems(flags) if name in _DEBUGGEE_LABELS}) self._debuggee_labels['projectid'] = self._project_id
326,261
Asynchronously updates the specified breakpoint on the backend. This function returns immediately. The worker thread is actually doing all the work. The worker thread is responsible to retry the transmission in case of transient errors. Args: breakpoint: breakpoint in either final or non-final state.
def EnqueueBreakpointUpdate(self, breakpoint): with self._transmission_thread_startup_lock: if self._transmission_thread is None: self._transmission_thread = threading.Thread( target=self._TransmissionThreadProc) self._transmission_thread.name = 'Cloud Debugger transmission thread' self._transmission_thread.daemon = True self._transmission_thread.start() self._transmission_queue.append((breakpoint, 0)) self._new_updates.set()
326,265
Single attempt to register the debuggee. If the registration succeeds, sets self._debuggee_id to the registered debuggee ID. Args: service: client to use for API calls Returns: (registration_required, delay) tuple
def _RegisterDebuggee(self, service): try: request = {'debuggee': self._GetDebuggee()} try: response = service.debuggees().register(body=request).execute() # self._project_number will refer to the project id on initialization if # the project number is not available. The project field in the debuggee # will always refer to the project number. Update so the server will not # have to do id->number translations in the future. project_number = response['debuggee'].get('project') self._project_number = project_number or self._project_number self._debuggee_id = response['debuggee']['id'] native.LogInfo('Debuggee registered successfully, ID: %s' % ( self._debuggee_id)) self.register_backoff.Succeeded() return (False, 0) # Proceed immediately to list active breakpoints. except BaseException: native.LogInfo('Failed to register debuggee: %s, %s' % (request, traceback.format_exc())) except BaseException: native.LogWarning('Debuggee information not available: ' + traceback.format_exc()) return (True, self.register_backoff.Failed())
326,269
Single attempt query the list of active breakpoints. Must not be called before the debuggee has been registered. If the request fails, this function resets self._debuggee_id, which triggers repeated debuggee registration. Args: service: client to use for API calls Returns: (registration_required, delay) tuple
def _ListActiveBreakpoints(self, service): try: response = service.debuggees().breakpoints().list( debuggeeId=self._debuggee_id, waitToken=self._wait_token, successOnTimeout=True).execute() if not response.get('waitExpired'): self._wait_token = response.get('nextWaitToken') breakpoints = response.get('breakpoints') or [] if self._breakpoints != breakpoints: self._breakpoints = breakpoints native.LogInfo( 'Breakpoints list changed, %d active, wait token: %s' % ( len(self._breakpoints), self._wait_token)) self.on_active_breakpoints_changed(copy.deepcopy(self._breakpoints)) except BaseException: native.LogInfo('Failed to query active breakpoints: ' + traceback.format_exc()) # Forget debuggee ID to trigger repeated debuggee registration. Once the # registration succeeds, the worker thread will retry this query self._debuggee_id = None return (True, self.list_backoff.Failed()) self.list_backoff.Succeeded() return (False, 0)
326,270
Computes debuggee uniquifier. The debuggee uniquifier has to be identical on all instances. Therefore the uniquifier should not include any random numbers and should only be based on inputs that are guaranteed to be the same on all instances. Args: debuggee: complete debuggee message without the uniquifier Returns: Hex string of SHA1 hash of project information, debuggee labels and debuglet version.
def _ComputeUniquifier(self, debuggee): uniquifier = hashlib.sha1() # Compute hash of application files if we don't have source context. This # way we can still distinguish between different deployments. if ('minorversion' not in debuggee.get('labels', []) and 'sourceContexts' not in debuggee): uniquifier_computer.ComputeApplicationUniquifier(uniquifier) return uniquifier.hexdigest()
326,274
Reads JSON file from an application directory. Args: relative_path: file name relative to application root directory. Returns: Parsed JSON data or None if the file does not exist, can't be read or not a valid JSON file.
def _ReadAppJsonFile(self, relative_path): try: with open(os.path.join(sys.path[0], relative_path), 'r') as f: return json.load(f) except (IOError, ValueError): return None
326,275
Removes any Python system path prefix from the given path. Python keeps almost all paths absolute. This is not what we actually want to return. This loops through system paths (directories in which Python will load modules). If "path" is relative to one of them, the directory prefix is removed. Args: path: absolute path to normalize (relative paths will not be altered) Returns: Relative path if "path" is within one of the sys.path directories or the input otherwise.
def NormalizePath(path): path = os.path.normpath(path) for sys_path in sys.path: if not sys_path: continue # Append '/' at the end of the path if it's not there already. sys_path = os.path.join(sys_path, '') if path.startswith(sys_path): return path[len(sys_path):] return path
326,276
Determines the type of val, returning a "full path" string. For example: DetermineType(5) -> __builtin__.int DetermineType(Foo()) -> com.google.bar.Foo Args: value: Any value, the value is irrelevant as only the type metadata is checked Returns: Type path string. None if type cannot be determined.
def DetermineType(value): object_type = type(value) if not hasattr(object_type, '__name__'): return None type_string = getattr(object_type, '__module__', '') if type_string: type_string += '.' type_string += object_type.__name__ return type_string
326,277
Compiles and evaluates watched expression. Args: frame: evaluation context. expression: watched expression to compile and evaluate. Returns: (False, status) on error or (True, value) on success.
def _EvaluateExpression(frame, expression): try: code = compile(expression, '<watched_expression>', 'eval') except (TypeError, ValueError) as e: # expression string contains null bytes. return (False, { 'isError': True, 'refersTo': 'VARIABLE_NAME', 'description': { 'format': 'Invalid expression', 'parameters': [str(e)]}}) except SyntaxError as e: return (False, { 'isError': True, 'refersTo': 'VARIABLE_NAME', 'description': { 'format': 'Expression could not be compiled: $0', 'parameters': [e.msg]}}) try: return (True, native.CallImmutable(frame, code)) except BaseException as e: # pylint: disable=broad-except return (False, { 'isError': True, 'refersTo': 'VARIABLE_VALUE', 'description': { 'format': 'Exception occurred: $0', 'parameters': [str(e)]}})
326,280
Gets the code object name for the frame. Args: frame: the frame to get the name from Returns: The function name if the code is a static function or the class name with the method name if it is an member function.
def _GetFrameCodeObjectName(frame): # This functions under the assumption that member functions will name their # first parameter argument 'self' but has some edge-cases. if frame.f_code.co_argcount >= 1 and 'self' == frame.f_code.co_varnames[0]: return (frame.f_locals['self'].__class__.__name__ + '.' + frame.f_code.co_name) else: return frame.f_code.co_name
326,281
Formats the message. Unescapes '$$' with '$'. Args: template: message template (e.g. 'a = $0, b = $1'). parameters: substitution parameters for the format. Returns: Formatted message with parameters embedded in template placeholders.
def _FormatMessage(template, parameters): def GetParameter(m): try: return parameters[int(m.group(0)[1:])] except IndexError: return INVALID_EXPRESSION_INDEX parts = template.split('$$') return '$'.join(re.sub(r'\$\d+', GetParameter, part) for part in parts)
326,282
Class constructor. Args: definition: breakpoint definition that this class will augment with captured data. data_visibility_policy: An object used to determine the visibiliy of a captured variable. May be None if no policy is available.
def __init__(self, definition, data_visibility_policy): self.data_visibility_policy = data_visibility_policy self.breakpoint = copy.deepcopy(definition) self.breakpoint['stackFrames'] = [] self.breakpoint['evaluatedExpressions'] = [] self.breakpoint['variableTable'] = [{ 'status': { 'isError': True, 'refersTo': 'VARIABLE_VALUE', 'description': { 'format': 'Buffer full. Use an expression to see more data' } } }] # Shortcut to variables table in the breakpoint message. self._var_table = self.breakpoint['variableTable'] # Maps object ID to its index in variables table. self._var_table_index = {} # Total size of data collected so far. Limited by max_size. self._total_size = 0 # Maximum number of stack frame to capture. The limit is aimed to reduce # the overall collection time. self.max_frames = 20 # Only collect locals and arguments on the few top frames. For the rest of # the frames we only collect the source location. self.max_expand_frames = 5 # Maximum amount of data to capture. The application will usually have a # lot of objects and we need to stop somewhere to keep the delay # reasonable. # This constant only counts the collected payload. Overhead due to key # names is not counted. self.max_size = 32768 # 32 KB self.default_capture_limits = _CaptureLimits() # When the user provides an expression, they've indicated that they're # interested in some specific data. Use higher per-object capture limits # for expressions. We don't want to globally increase capture limits, # because in the case where the user has not indicated a preference, we # don't want a single large object on the stack to use the entire max_size # quota and hide the rest of the data. self.expression_capture_limits = _CaptureLimits(max_value_len=32768, max_list_items=32768)
326,285
Collects call stack, local variables and objects. Starts collection from the specified frame. We don't start from the top frame to exclude the frames due to debugger. Updates the content of self.breakpoint. Args: top_frame: top frame to start data collection.
def Collect(self, top_frame): # Evaluate call stack. frame = top_frame top_line = self.breakpoint['location']['line'] breakpoint_frames = self.breakpoint['stackFrames'] try: # Evaluate watched expressions. if 'expressions' in self.breakpoint: self.breakpoint['evaluatedExpressions'] = [ self._CaptureExpression(top_frame, expression) for expression in self.breakpoint['expressions']] while frame and (len(breakpoint_frames) < self.max_frames): line = top_line if frame == top_frame else frame.f_lineno code = frame.f_code if len(breakpoint_frames) < self.max_expand_frames: frame_arguments, frame_locals = self.CaptureFrameLocals(frame) else: frame_arguments = [] frame_locals = [] breakpoint_frames.append({ 'function': _GetFrameCodeObjectName(frame), 'location': { 'path': NormalizePath(code.co_filename), 'line': line }, 'arguments': frame_arguments, 'locals': frame_locals }) frame = frame.f_back except BaseException as e: # pylint: disable=broad-except # The variable table will get serialized even though there was a failure. # The results can be useful for diagnosing the internal error. self.breakpoint['status'] = { 'isError': True, 'description': { 'format': ('INTERNAL ERROR: Failed while capturing locals ' 'of frame $0: $1'), 'parameters': [str(len(breakpoint_frames)), str(e)]}} # Number of entries in _var_table. Starts at 1 (index 0 is the 'buffer full' # status value). num_vars = 1 # Explore variables table in BFS fashion. The variables table will grow # inside CaptureVariable as we encounter new references. while (num_vars < len(self._var_table)) and ( self._total_size < self.max_size): self._var_table[num_vars] = self.CaptureVariable( self._var_table[num_vars], 0, self.default_capture_limits, can_enqueue=False) # Move on to the next entry in the variable table. num_vars += 1 # Trim variables table and change make all references to variables that # didn't make it point to var_index of 0 ("buffer full") self.TrimVariableTable(num_vars) self._CaptureEnvironmentLabels() self._CaptureRequestLogId() self._CaptureUserId()
326,286
Captures local variables and arguments of the specified frame. Args: frame: frame to capture locals and arguments. Returns: (arguments, locals) tuple.
def CaptureFrameLocals(self, frame): # Capture all local variables (including method arguments). variables = {n: self.CaptureNamedVariable(n, v, 1, self.default_capture_limits) for n, v in six.viewitems(frame.f_locals)} # Split between locals and arguments (keeping arguments in the right order). nargs = frame.f_code.co_argcount if frame.f_code.co_flags & inspect.CO_VARARGS: nargs += 1 if frame.f_code.co_flags & inspect.CO_VARKEYWORDS: nargs += 1 frame_arguments = [] for argname in frame.f_code.co_varnames[:nargs]: if argname in variables: frame_arguments.append(variables.pop(argname)) return (frame_arguments, list(six.viewvalues(variables)))
326,287
Appends name to the product of CaptureVariable. Args: name: name of the variable. value: data to capture depth: nested depth of dictionaries and vectors so far. limits: Per-object limits for capturing variable data. Returns: Formatted captured data as per Variable proto with name.
def CaptureNamedVariable(self, name, value, depth, limits): if not hasattr(name, '__dict__'): name = str(name) else: # TODO(vlif): call str(name) with immutability verifier here. name = str(id(name)) self._total_size += len(name) v = (self.CheckDataVisiblity(value) or self.CaptureVariable(value, depth, limits)) v['name'] = name return v
326,288
Returns a status object if the given name is not visible. Args: value: The value to check. The actual value here is not important but the value's metadata (e.g. package and type) will be checked. Returns: None if the value is visible. A variable structure with an error status if the value should not be visible.
def CheckDataVisiblity(self, value): if not self.data_visibility_policy: return None visible, reason = self.data_visibility_policy.IsDataVisible( DetermineType(value)) if visible: return None return { 'status': { 'isError': True, 'refersTo': 'VARIABLE_NAME', 'description': { 'format': reason } } }
326,289
Captures list of named items. Args: items: iterable of (name, value) tuples. depth: nested depth of dictionaries and vectors for items. empty_message: info status message to set if items is empty. limits: Per-object limits for capturing variable data. Returns: List of formatted variable objects.
def CaptureVariablesList(self, items, depth, empty_message, limits): v = [] for name, value in items: if (self._total_size >= self.max_size) or ( len(v) >= limits.max_list_items): v.append({ 'status': { 'refersTo': 'VARIABLE_VALUE', 'description': { 'format': ('Only first $0 items were captured. Use in an ' 'expression to see all items.'), 'parameters': [str(len(v))]}}}) break v.append(self.CaptureNamedVariable(name, value, depth, limits)) if not v: return [{'status': { 'refersTo': 'VARIABLE_NAME', 'description': {'format': empty_message}}}] return v
326,290
Captures a single nameless object into Variable message. TODO(vlif): safely evaluate iterable types. TODO(vlif): safely call str(value) Args: value: data to capture depth: nested depth of dictionaries and vectors so far. limits: Per-object limits for capturing variable data. can_enqueue: allows referencing the object in variables table. Returns: Formatted captured data as per Variable proto.
def CaptureVariableInternal(self, value, depth, limits, can_enqueue=True): if depth == limits.max_depth: return {'varTableIndex': 0} # Buffer full. if value is None: self._total_size += 4 return {'value': 'None'} if isinstance(value, _PRIMITIVE_TYPES): r = _TrimString(repr(value), # Primitive type, always immutable. min(limits.max_value_len, self.max_size - self._total_size)) self._total_size += len(r) return {'value': r, 'type': type(value).__name__} if isinstance(value, _DATE_TYPES): r = str(value) # Safe to call str(). self._total_size += len(r) return {'value': r, 'type': 'datetime.'+ type(value).__name__} if isinstance(value, dict): # Do not use iteritems() here. If GC happens during iteration (which it # often can for dictionaries containing large variables), you will get a # RunTimeError exception. items = [(repr(k), v) for (k, v) in value.items()] return {'members': self.CaptureVariablesList(items, depth + 1, EMPTY_DICTIONARY, limits), 'type': 'dict'} if isinstance(value, _VECTOR_TYPES): fields = self.CaptureVariablesList( (('[%d]' % i, x) for i, x in enumerate(value)), depth + 1, EMPTY_COLLECTION, limits) return {'members': fields, 'type': type(value).__name__} if isinstance(value, types.FunctionType): self._total_size += len(value.__name__) # TODO(vlif): set value to func_name and type to 'function' return {'value': 'function ' + value.__name__} if isinstance(value, Exception): fields = self.CaptureVariablesList( (('[%d]' % i, x) for i, x in enumerate(value.args)), depth + 1, EMPTY_COLLECTION, limits) return {'members': fields, 'type': type(value).__name__} if can_enqueue: index = self._var_table_index.get(id(value)) if index is None: index = len(self._var_table) self._var_table_index[id(value)] = index self._var_table.append(value) self._total_size += 4 # number of characters to accommodate a number. return {'varTableIndex': index} for pretty_printer in CaptureCollector.pretty_printers: pretty_value = pretty_printer(value) if not pretty_value: continue fields, object_type = pretty_value return {'members': self.CaptureVariablesList(fields, depth + 1, OBJECT_HAS_NO_FIELDS, limits), 'type': object_type} if not hasattr(value, '__dict__'): # TODO(vlif): keep "value" empty and populate the "type" field instead. r = str(type(value)) self._total_size += len(r) return {'value': r} # Add an additional depth for the object itself items = value.__dict__.items() if six.PY3: # Make a list of the iterator in Python 3, to avoid 'dict changed size # during iteration' errors from GC happening in the middle. # Only limits.max_list_items + 1 items are copied, anything past that will # get ignored by CaptureVariablesList(). items = list(itertools.islice(items, limits.max_list_items + 1)) members = self.CaptureVariablesList(items, depth + 2, OBJECT_HAS_NO_FIELDS, limits) v = {'members': members} type_string = DetermineType(value) if type_string: v['type'] = type_string return v
326,292
Evalutes the expression and captures it into a Variable object. Args: frame: evaluation context. expression: watched expression to compile and evaluate. Returns: Variable object (which will have error status if the expression fails to evaluate).
def _CaptureExpression(self, frame, expression): rc, value = _EvaluateExpression(frame, expression) if not rc: return {'name': expression, 'status': value} return self.CaptureNamedVariable(expression, value, 0, self.expression_capture_limits)
326,293
Trims the variable table in the formatted breakpoint message. Removes trailing entries in variables table. Then scans the entire breakpoint message and replaces references to the trimmed variables to point to var_index of 0 ("buffer full"). Args: new_size: desired size of variables table.
def TrimVariableTable(self, new_size): def ProcessBufferFull(variables): for variable in variables: var_index = variable.get('varTableIndex') if var_index is not None and (var_index >= new_size): variable['varTableIndex'] = 0 # Buffer full. members = variable.get('members') if members is not None: ProcessBufferFull(members) del self._var_table[new_size:] ProcessBufferFull(self.breakpoint['evaluatedExpressions']) for stack_frame in self.breakpoint['stackFrames']: ProcessBufferFull(stack_frame['arguments']) ProcessBufferFull(stack_frame['locals']) ProcessBufferFull(self._var_table)
326,294
Class constructor. Args: definition: breakpoint definition indicating log level, message, etc.
def __init__(self, definition): self._definition = definition # Maximum number of character to allow for a single value. Longer strings # are truncated. self.max_value_len = 256 # Maximum recursion depth. self.max_depth = 2 # Maximum number of items in a list to capture at the top level. self.max_list_items = 10 # When capturing recursively, limit on the size of sublists. self.max_sublist_items = 5 # Time to pause after dynamic log quota has run out. self.quota_recovery_ms = 500 # The time when we first entered the quota period self._quota_recovery_start_time = None # Select log function. level = self._definition.get('logLevel') if not level or level == 'INFO': self._log_message = log_info_message elif level == 'WARNING': self._log_message = log_warning_message elif level == 'ERROR': self._log_message = log_error_message else: self._log_message = None
326,298
Captures the minimal application states, formats it and logs the message. Args: frame: Python stack frame of breakpoint hit. Returns: None on success or status message on error.
def Log(self, frame): # Return error if log methods were not configured globally. if not self._log_message: return {'isError': True, 'description': {'format': LOG_ACTION_NOT_SUPPORTED}} if self._quota_recovery_start_time: ms_elapsed = (time.time() - self._quota_recovery_start_time) * 1000 if ms_elapsed > self.quota_recovery_ms: # We are out of the recovery period, clear the time and continue self._quota_recovery_start_time = None else: # We are in the recovery period, exit return # Evaluate watched expressions. message = 'LOGPOINT: ' + _FormatMessage( self._definition.get('logMessageFormat', ''), self._EvaluateExpressions(frame)) line = self._definition['location']['line'] cdbg_logging_location = (NormalizePath(frame.f_code.co_filename), line, _GetFrameCodeObjectName(frame)) if native.ApplyDynamicLogsQuota(len(message)): self._log_message(message) else: self._quota_recovery_start_time = time.time() self._log_message(DYNAMIC_LOG_OUT_OF_QUOTA) del cdbg_logging_location return None
326,299
Evaluates watched expressions into a string form. If expression evaluation fails, the error message is used as evaluated expression string. Args: frame: Python stack frame of breakpoint hit. Returns: Array of strings where each string corresponds to the breakpoint expression with the same index.
def _EvaluateExpressions(self, frame): return [self._FormatExpression(frame, expression) for expression in self._definition.get('expressions') or []]
326,300
Evaluates a single watched expression and formats it into a string form. If expression evaluation fails, returns error message string. Args: frame: Python stack frame in which the expression is evaluated. expression: string expression to evaluate. Returns: Formatted expression value that can be used in the log message.
def _FormatExpression(self, frame, expression): rc, value = _EvaluateExpression(frame, expression) if not rc: message = _FormatMessage(value['description']['format'], value['description'].get('parameters')) return '<' + message + '>' return self._FormatValue(value)
326,301
Pretty-prints an object for a logger. This function is very similar to the standard pprint. The main difference is that it enforces limits to make sure we never produce an extremely long string or take too much time. Args: value: Python object to print. level: current recursion level. Returns: Formatted string.
def _FormatValue(self, value, level=0): def FormatDictItem(key_value): key, value = key_value return (self._FormatValue(key, level + 1) + ': ' + self._FormatValue(value, level + 1)) def LimitedEnumerate(items, formatter, level=0): count = 0 limit = self.max_sublist_items if level > 0 else self.max_list_items for item in items: if count == limit: yield '...' break yield formatter(item) count += 1 def FormatList(items, formatter, level=0): return ', '.join(LimitedEnumerate(items, formatter, level=level)) if isinstance(value, _PRIMITIVE_TYPES): return _TrimString(repr(value), # Primitive type, always immutable. self.max_value_len) if isinstance(value, _DATE_TYPES): return str(value) if level > self.max_depth: return str(type(value)) if isinstance(value, dict): return '{' + FormatList(six.iteritems(value), FormatDictItem) + '}' if isinstance(value, _VECTOR_TYPES): return _ListTypeFormatString(value).format(FormatList( value, lambda item: self._FormatValue(item, level + 1), level=level)) if isinstance(value, types.FunctionType): return 'function ' + value.__name__ if hasattr(value, '__dict__') and value.__dict__: return self._FormatValue(value.__dict__, level) return str(type(value))
326,302
Attempts to find the yaml configuration file, then read it. Args: relative_path: Optional relative path override. Returns: A Config object if the open and read were successful, None if the file does not exist (which is not considered an error). Raises: Error (some subclass): As thrown by the called Read() function.
def OpenAndRead(relative_path='debugger-blacklist.yaml'): # Note: This logic follows the convention established by source-context.json try: with open(os.path.join(sys.path[0], relative_path), 'r') as f: return Read(f) except IOError: return None
326,303
Reads and returns Config data from a yaml file. Args: f: Yaml file to parse. Returns: Config object as defined in this file. Raises: Error (some subclass): If there is a problem loading or parsing the file.
def Read(f): try: yaml_data = yaml.load(f) except yaml.YAMLError as e: raise ParseError('%s' % e) except IOError as e: raise YAMLLoadError('%s' % e) _CheckData(yaml_data) try: return Config( yaml_data.get('blacklist', ()), yaml_data.get('whitelist', ('*'))) except UnicodeDecodeError as e: raise YAMLLoadError('%s' % e)
326,304
Generates an error message to be used when multiple matches are found. Args: path: The breakpoint location path that the user provided. candidates: List of paths that match the user provided path. Must contain at least 2 entries (throws AssertionError otherwise). Returns: A (format, parameters) tuple that should be used in the description field of the breakpoint error status.
def _MultipleModulesFoundError(path, candidates): assert len(candidates) > 1 params = [path] + _StripCommonPathPrefix(candidates[:2]) if len(candidates) == 2: fmt = ERROR_LOCATION_MULTIPLE_MODULES_3 else: fmt = ERROR_LOCATION_MULTIPLE_MODULES_4 params.append(str(len(candidates) - 2)) return fmt, params
326,309
Callback invoked by cdbg_native when breakpoint hits. Args: event: breakpoint event (see kIntegerConstants in native_module.cc). frame: Python stack frame of breakpoint hit or None for other events.
def _BreakpointEvent(self, event, frame): error_status = None if event != native.BREAKPOINT_EVENT_HIT: error_status = _BREAKPOINT_EVENT_STATUS[event] elif self.definition.get('action') == 'LOG': error_status = self._collector.Log(frame) if not error_status: return # Log action successful, no need to clear the breakpoint. # Let only one thread capture the data and complete the breakpoint. if not self._SetCompleted(): return self.Clear() if error_status: self._CompleteBreakpoint({'status': error_status}) return collector = capture_collector.CaptureCollector( self.definition, self.data_visibility_policy) # TODO(b/69119299): This is a temporary try/except. All exceptions should be # caught inside Collect and converted into breakpoint error messages. try: collector.Collect(frame) except BaseException as e: # pylint: disable=broad-except native.LogInfo('Internal error during data capture: %s' % repr(e)) error_status = {'isError': True, 'description': { 'format': ('Internal error while capturing data: %s' % repr(e))}} self._CompleteBreakpoint({'status': error_status}) return except: # pylint: disable=bare-except native.LogInfo('Unknown exception raised') error_status = {'isError': True, 'description': { 'format': 'Unknown internal error'}} self._CompleteBreakpoint({'status': error_status}) return self._CompleteBreakpoint(collector.breakpoint, is_incremental=False)
326,318
Returns true if path matches any patten found in pattern_list. Args: path: A dot separated path to a package, class, method or variable pattern_list: A list of wildcard patterns Returns: True if path matches any wildcard found in pattern_list.
def _Matches(path, pattern_list): # Note: This code does not scale to large pattern_list sizes. return any(fnmatch.fnmatchcase(path, pattern) for pattern in pattern_list)
326,323
Returns a tuple (visible, reason) stating if the data should be visible. Args: path: A dot separated path that represents a package, class, method or variable. The format is identical to pythons "import" statement. Returns: (visible, reason) where visible is a boolean that is True if the data should be visible. Reason is a string reason that can be displayed to the user and indicates why data is visible or not visible.
def IsDataVisible(self, path): if path is None: return (False, RESPONSES['UNKNOWN_TYPE']) if _Matches(path, self.blacklist_patterns): return (False, RESPONSES['BLACKLISTED']) if not _Matches(path, self.whitelist_patterns): return (False, RESPONSES['NOT_WHITELISTED']) return (True, RESPONSES['VISIBLE'])
326,324
Adds new breakpoints and removes missing ones. Args: breakpoints_data: updated list of active breakpoints.
def SetActiveBreakpoints(self, breakpoints_data): with self._lock: ids = set([x['id'] for x in breakpoints_data]) # Clear breakpoints that no longer show up in active breakpoints list. for breakpoint_id in six.viewkeys(self._active) - ids: self._active.pop(breakpoint_id).Clear() # Create new breakpoints. self._active.update([ (x['id'], python_breakpoint.PythonBreakpoint( x, self._hub_client, self, self.data_visibility_policy)) for x in breakpoints_data if x['id'] in ids - six.viewkeys(self._active) - self._completed]) # Remove entries from completed_breakpoints_ that weren't listed in # breakpoints_data vector. These are confirmed to have been removed by the # hub and the debuglet can now assume that they will never show up ever # again. The backend never reuses breakpoint IDs. self._completed &= ids if self._active: self._next_expiration = datetime.min # Not known. else: self._next_expiration = datetime.max
326,326
Marks the specified breaking as completed. Appends the ID to set of completed breakpoints and clears it. Args: breakpoint_id: breakpoint ID to complete.
def CompleteBreakpoint(self, breakpoint_id): with self._lock: self._completed.add(breakpoint_id) if breakpoint_id in self._active: self._active.pop(breakpoint_id).Clear()
326,327
Checks whether path is a full path suffix of mod_path. Args: mod_path: Must be an absolute path to a source file. Must not have file extension. path: A relative path. Must not have file extension. Returns: True if path is a full path suffix of mod_path. False otherwise.
def IsPathSuffix(mod_path, path): return (mod_path.endswith(path) and (len(mod_path) == len(path) or mod_path[:-len(path)].endswith(os.sep)))
326,332
Searches for a code object at the specified line in the specified module. Args: module: module to explore. line: 1-based line number of the statement. Returns: (True, Code object) on success or (False, (prev_line, next_line)) on failure, where prev_line and next_line are the closest lines with code above and below the specified line, or None if they do not exist.
def GetCodeObjectAtLine(module, line): if not hasattr(module, '__file__'): return (False, (None, None)) prev_line = 0 next_line = six.MAXSIZE for code_object in _GetModuleCodeObjects(module): for co_line_number in _GetLineNumbers(code_object): if co_line_number == line: return (True, code_object) elif co_line_number < line: prev_line = max(prev_line, co_line_number) elif co_line_number > line: next_line = min(next_line, co_line_number) break prev_line = None if prev_line == 0 else prev_line next_line = None if next_line == six.MAXSIZE else next_line return (False, (prev_line, next_line))
326,334
Generator for getting the line numbers of a code object. Args: code_object: the code object. Yields: The next line number in the code object.
def _GetLineNumbers(code_object): # Get the line number deltas, which are the odd number entries, from the # lnotab. See # https://svn.python.org/projects/python/branches/pep-0384/Objects/lnotab_notes.txt # In Python 3, this is just a byte array. In Python 2 it is a string so the # numerical values have to be extracted from the individual characters. if six.PY3: line_incrs = code_object.co_lnotab[1::2] else: line_incrs = (ord(c) for c in code_object.co_lnotab[1::2]) current_line = code_object.co_firstlineno for line_incr in line_incrs: current_line += line_incr yield current_line
326,335
Records the object as visited. Args: obj: visited object. Returns: True if the object hasn't been previously visited or False if it has already been recorded or the quota has been exhausted.
def Record(self, obj): if len(self._visit_recorder_objects) >= _MAX_VISIT_OBJECTS: return False obj_id = id(obj) if obj_id in self._visit_recorder_objects: return False self._visit_recorder_objects[obj_id] = obj return True
326,338
Class constructor. Args: min_interval_sec: initial small delay. max_interval_sec: maximum delay between retries. multiplier: factor for exponential increase.
def __init__(self, min_interval_sec=10, max_interval_sec=600, multiplier=2): self.min_interval_sec = min_interval_sec self.max_interval_sec = max_interval_sec self.multiplier = multiplier self.Succeeded()
326,339
Processes an import. Calculates the possible names generated from an import and invokes registered callbacks if needed. Args: name: Argument as passed to the importer. fromlist: Argument as passed to the importer. globals: Argument as passed to the importer.
def _ProcessImportBySuffix(name, fromlist, globals): _import_local.nest_level -= 1 # To improve common code path performance, compute the loaded modules only # if there are any import callbacks. if _import_callbacks: # Collect the names of all modules that might be newly loaded as a result # of this import. Add them in a thread-local list. _import_local.names |= _GenerateNames(name, fromlist, globals) # Invoke the callbacks only on the top-level import call. if _import_local.nest_level == 0: _InvokeImportCallbackBySuffix(_import_local.names) # To be safe, we clear the names set every time we exit a top level import. if _import_local.nest_level == 0: _import_local.names.clear()
326,345
Write filter to FilterScript object or filename Args: script (FilterScript object or filename str): the FilterScript object or script filename to write the filter to. filter_xml (str): the xml filter string
def write_filter(script, filter_xml): if isinstance(script, mlx.FilterScript): script.filters.append(filter_xml) elif isinstance(script, str): script_file = open(script, 'a') script_file.write(filter_xml) script_file.close() else: print(filter_xml) return None
326,368
Split non-manifold vertices until it becomes two-manifold. Args: script: the FilterScript object or script filename to write the filter to. vert_displacement_ratio (float): When a vertex is split it is moved along the average vector going from its position to the centroid of the FF connected faces sharing it. Layer stack: No impacts MeshLab versions: 2016.12 1.3.4BETA
def split_vert_on_nonmanifold_face(script, vert_displacement_ratio=0.0): filter_xml = ''.join([ ' <filter name="Split Vertexes Incident on Non Manifold Faces">\n', ' <Param name="VertDispRatio" ', 'value="{}" '.format(vert_displacement_ratio), 'description="Vertex Displacement Ratio" ', 'type="RichFloat" ', '/>\n', ' </filter>\n']) util.write_filter(script, filter_xml) return None
326,372
Freeze the current transformation matrix into the coordinates of the vertices of the mesh (and set this matrix to the identity). In other words it applies in a definitive way the current matrix to the vertex coordinates. Args: script: the FilterScript object or script filename to write the filter to. all_layers (bool): If selected the filter will be applied to all visible mesh layers.
def freeze_matrix(script, all_layers=False): filter_xml = ''.join([ ' <filter name="Freeze Current Matrix">\n', ' <Param name="allLayers" ', 'value="%s" ' % str(all_layers).lower(), 'description="Apply to all visible Layers" ', 'type="RichBool" ', '/>\n', ' </filter>\n']) util.write_filter(script, filter_xml) return None
326,380
Transfer mesh colors to face colors Args: script: the FilterScript object or script filename to write the filter to. all_visible_layers (bool): If true the color mapping is applied to all the meshes
def mesh2fc(script, all_visible_layers=False): filter_xml = ''.join([ ' <filter name="Transfer Color: Mesh to Face">\n', ' <Param name="allVisibleMesh" ', 'value="%s" ' % str(all_visible_layers).lower(), 'description="Apply to all Meshes" ', 'type="RichBool" ', '/>\n', ' </filter>\n']) util.write_filter(script, filter_xml) return None
326,393
Turn a model into a surface with Voronoi style holes in it References: http://meshlabstuff.blogspot.com/2009/03/creating-voronoi-sphere.html http://meshlabstuff.blogspot.com/2009/04/creating-voronoi-sphere-2.html Requires FilterScript object Args: script: the FilterScript object to write the filter to. Does not work with a script filename. Layer stack: No impacts MeshLab versions: 2016.12 1.3.4BETA
def voronoi(script, hole_num=50, target_layer=None, sample_layer=None, thickness=0.5, backward=True): if target_layer is None: target_layer = script.current_layer() if sample_layer is None: # Current layer is currently not changed after poisson_disk is run sampling.poisson_disk(script, sample_num=hole_num) sample_layer = script.last_layer() vert_color.voronoi(script, target_layer=target_layer, source_layer=sample_layer, backward=backward) select.vert_quality(script, min_quality=0.0, max_quality=thickness) if backward: select.invert(script) delete.selected(script) smooth.laplacian(script, iterations=3) return None
326,403
Select all the faces of the current mesh Args: script: the FilterScript object or script filename to write the filter to. faces (bool): If True the filter will select all the faces. verts (bool): If True the filter will select all the vertices. Layer stack: No impacts MeshLab versions: 2016.12 1.3.4BETA
def all(script, face=True, vert=True): filter_xml = ''.join([ ' <filter name="Select All">\n', ' <Param name="allFaces" ', 'value="{}" '.format(str(face).lower()), 'description="DSelect all Faces" ', 'type="RichBool" ', '/>\n', ' <Param name="allVerts" ', 'value="{}" '.format(str(vert).lower()), 'description="Select all Vertices" ', 'type="RichBool" ', '/>\n', ' </filter>\n']) util.write_filter(script, filter_xml) return None
326,404
Grow (dilate, expand) the current set of selected faces Args: script: the FilterScript object or script filename to write the filter to. iterations (int): the number of times to grow the selection. Layer stack: No impacts MeshLab versions: 2016.12 1.3.4BETA
def grow(script, iterations=1): filter_xml = ' <filter name="Dilate Selection"/>\n' for _ in range(iterations): util.write_filter(script, filter_xml) return None
326,405
Shrink (erode, reduce) the current set of selected faces Args: script: the FilterScript object or script filename to write the filter to. iterations (int): the number of times to shrink the selection. Layer stack: No impacts MeshLab versions: 2016.12 1.3.4BETA
def shrink(script, iterations=1): filter_xml = ' <filter name="Erode Selection"/>\n' for _ in range(iterations): util.write_filter(script, filter_xml) return None
326,406
Select all vertices within a cylindrical radius Args: radius (float): radius of the sphere center_pt (3 coordinate tuple or list): center point of the sphere Layer stack: No impacts MeshLab versions: 2016.12 1.3.4BETA
def cylindrical_vert(script, radius=1.0, inside=True): if inside: function = 'sqrt(x^2+y^2)<={}'.format(radius) else: function = 'sqrt(x^2+y^2)>={}'.format(radius) vert_function(script, function=function) return None
326,411
Select all vertices within a spherical radius Args: radius (float): radius of the sphere center_pt (3 coordinate tuple or list): center point of the sphere Layer stack: No impacts MeshLab versions: 2016.12 1.3.4BETA
def spherical_vert(script, radius=1.0, center_pt=(0.0, 0.0, 0.0)): function = 'sqrt((x-{})^2+(y-{})^2+(z-{})^2)<={}'.format( center_pt[0], center_pt[1], center_pt[2], radius) vert_function(script, function=function) return None
326,412
Delete layer Args: script: the mlx.FilterScript object or script filename to write the filter to. layer_num (int): the number of the layer to delete. Default is the current layer. Not supported on the file base API. Layer stack: Deletes a layer will change current layer if deleted layer is lower in the stack MeshLab versions: 2016.12 1.3.4BETA
def delete(script, layer_num=None): filter_xml = ' <filter name="Delete Current Mesh"/>\n' if isinstance(script, mlx.FilterScript): if (layer_num is None) or (layer_num == script.current_layer()): util.write_filter(script, filter_xml) script.del_layer(script.current_layer()) else: cur_layer = script.current_layer() change(script, layer_num) util.write_filter(script, filter_xml) if layer_num < script.current_layer(): change(script, cur_layer - 1) else: change(script, cur_layer) script.del_layer(layer_num) else: util.write_filter(script, filter_xml) return None
326,414
Rename layer label Can be useful for outputting mlp files, as the output file names use the labels. Args: script: the mlx.FilterScript object or script filename to write the filter to. label (str): new label for the mesh layer layer_num (int): layer number to rename. Default is the current layer. Not supported on the file base API. Layer stack: Renames a layer MeshLab versions: 2016.12 1.3.4BETA
def rename(script, label='blank', layer_num=None): filter_xml = ''.join([ ' <filter name="Rename Current Mesh">\n', ' <Param name="newName" ', 'value="{}" '.format(label), 'description="New Label" ', 'type="RichString" ', '/>\n', ' </filter>\n']) if isinstance(script, mlx.FilterScript): if (layer_num is None) or (layer_num == script.current_layer()): util.write_filter(script, filter_xml) script.layer_stack[script.current_layer()] = label else: cur_layer = script.current_layer() change(script, layer_num) util.write_filter(script, filter_xml) change(script, cur_layer) script.layer_stack[layer_num] = label else: util.write_filter(script, filter_xml) return None
326,415
Change the current layer by specifying the new layer number. Args: script: the mlx.FilterScript object or script filename to write the filter to. layer_num (int): the number of the layer to change to. Default is the last layer if script is a mlx.FilterScript object; if script is a filename the default is the first layer. Layer stack: Modifies current layer MeshLab versions: 2016.12 1.3.4BETA
def change(script, layer_num=None): if layer_num is None: if isinstance(script, mlx.FilterScript): layer_num = script.last_layer() else: layer_num = 0 filter_xml = ''.join([ ' <filter name="Change the current layer">\n', ' <Param name="mesh" ', 'value="{:d}" '.format(layer_num), 'description="Mesh" ', 'type="RichMesh" ', '/>\n', ' </filter>\n']) util.write_filter(script, filter_xml) if isinstance(script, mlx.FilterScript): script.set_current_layer(layer_num) #script.layer_stack[len(self.layer_stack) - 1] = layer_num return None
326,416
Duplicate a layer. New layer label is '*_copy'. Args: script: the mlx.FilterScript object or script filename to write the filter to. layer_num (int): layer number to duplicate. Default is the current layer. Not supported on the file base API. Layer stack: Creates a new layer Changes current layer to the new layer MeshLab versions: 2016.12 1.3.4BETA
def duplicate(script, layer_num=None): filter_xml = ' <filter name="Duplicate Current layer"/>\n' if isinstance(script, mlx.FilterScript): if (layer_num is None) or (layer_num == script.current_layer()): util.write_filter(script, filter_xml) script.add_layer('{}_copy'.format(script.layer_stack[script.current_layer()]), True) else: change(script, layer_num) util.write_filter(script, filter_xml) script.add_layer('{}_copy'.format(script.layer_stack[layer_num]), True) else: util.write_filter(script, filter_xml) return None
326,417
Subprocess program error handling Args: program_name (str): name of the subprocess program Returns: break_now (bool): indicate whether calling program should break out of loop
def handle_error(program_name, cmd, log=None): print('\nHouston, we have a problem.', '\n%s did not finish successfully. Review the log' % program_name, 'file and the input file(s) to see what went wrong.') print('%s command: "%s"' % (program_name, cmd)) if log is not None: print('log: "%s"' % log) print('Where do we go from here?') print(' r - retry running %s (probably after' % program_name, 'you\'ve fixed any problems with the input files)') print(' c - continue on with the script (probably after', 'you\'ve manually re-run and generated the desired', 'output file(s)') print(' x - exit, keeping the TEMP3D files and log') print(' xd - exit, deleting the TEMP3D files and log') while True: choice = input('Select r, c, x (default), or xd: ') if choice not in ('r', 'c', 'x', 'xd'): #print('Please enter a valid option.') choice = 'x' #else: break if choice == 'x': print('Exiting ...') sys.exit(1) elif choice == 'xd': print('Deleting TEMP3D* and log files and exiting ...') util.delete_all('TEMP3D*') if log is not None: os.remove(log) sys.exit(1) elif choice == 'c': print('Continuing on ...') break_now = True elif choice == 'r': print('Retrying %s cmd ...' % program_name) break_now = False return break_now
326,420
Add new mesh layer to the end of the stack Args: label (str): new label for the mesh layer change_layer (bool): change to the newly created layer
def add_layer(self, label, change_layer=True): self.layer_stack.insert(self.last_layer() + 1, label) if change_layer: self.set_current_layer(self.last_layer()) return None
326,427
Check for every vertex on the mesh: if it is NOT referenced by a face, removes it. Args: script: the FilterScript object or script filename to write the filter to. Layer stack: No impacts MeshLab versions: 2016.12 1.3.4BETA
def unreferenced_vert(script): if script.ml_version == '1.3.4BETA': filter_xml = ' <filter name="Remove Unreferenced Vertex"/>\n' else: filter_xml = ' <filter name="Remove Unreferenced Vertices"/>\n' util.write_filter(script, filter_xml) return None
326,434
"Check for every vertex on the mesh: if there are two vertices with the same coordinates they are merged into a single one. Args: script: the FilterScript object or script filename to write the filter to. Layer stack: No impacts MeshLab versions: 2016.12 1.3.4BETA
def duplicate_verts(script): if script.ml_version == '1.3.4BETA': filter_xml = ' <filter name="Remove Duplicated Vertex"/>\n' else: filter_xml = ' <filter name="Remove Duplicate Vertices"/>\n' util.write_filter(script, filter_xml) return None
326,435
Compute a set of topological measures over a mesh Args: script: the mlx.FilterScript object or script filename to write the filter to. Layer stack: No impacts MeshLab versions: 2016.12 1.3.4BETA
def measure_topology(script): filter_xml = ' <xmlfilter name="Compute Topological Measures"/>\n' util.write_filter(script, filter_xml) if isinstance(script, mlx.FilterScript): script.parse_topology = True return None
326,455
Parse the ml_log file generated by the measure_geometry function. Warnings: Not all keys may exist if mesh is not watertight or manifold Args: ml_log (str): MeshLab log file to parse log (str): filename to log output
def parse_geometry(ml_log, log=None, ml_version='2016.12', print_output=False): # TODO: read more than one occurrence per file. Record in list. aabb = {} geometry = {'aabb':aabb} with open(ml_log) as fread: for line in fread: if 'Mesh Bounding Box min' in line: #2016.12 geometry['aabb']['min'] = (line.split()[4:7]) geometry['aabb']['min'] = [util.to_float(val) for val in geometry['aabb']['min']] if 'Mesh Bounding Box max' in line: #2016.12 geometry['aabb']['max'] = (line.split()[4:7]) geometry['aabb']['max'] = [util.to_float(val) for val in geometry['aabb']['max']] if 'Mesh Bounding Box Size' in line: #2016.12 geometry['aabb']['size'] = (line.split()[4:7]) geometry['aabb']['size'] = [util.to_float(val) for val in geometry['aabb']['size']] if 'Mesh Bounding Box Diag' in line: #2016.12 geometry['aabb']['diagonal'] = util.to_float(line.split()[4]) if 'Mesh Volume' in line: geometry['volume_mm3'] = util.to_float(line.split()[3]) geometry['volume_cm3'] = geometry['volume_mm3'] * 0.001 if 'Mesh Surface' in line: if ml_version == '1.3.4BETA': geometry['area_mm2'] = util.to_float(line.split()[3]) else: geometry['area_mm2'] = util.to_float(line.split()[4]) geometry['area_cm2'] = geometry['area_mm2'] * 0.01 if 'Mesh Total Len of' in line: if 'including faux edges' in line: geometry['total_edge_length_incl_faux'] = util.to_float( line.split()[7]) else: geometry['total_edge_length'] = util.to_float( line.split()[7]) if 'Thin shell barycenter' in line: geometry['barycenter'] = (line.split()[3:6]) geometry['barycenter'] = [util.to_float(val) for val in geometry['barycenter']] if 'Thin shell (faces) barycenter' in line: #2016.12 geometry['barycenter'] = (line.split()[4:7]) geometry['barycenter'] = [util.to_float(val) for val in geometry['barycenter']] if 'Vertices barycenter' in line: #2016.12 geometry['vert_barycenter'] = (line.split()[2:5]) geometry['vert_barycenter'] = [util.to_float(val) for val in geometry['vert_barycenter']] if 'Center of Mass' in line: geometry['center_of_mass'] = (line.split()[4:7]) geometry['center_of_mass'] = [util.to_float(val) for val in geometry['center_of_mass']] if 'Inertia Tensor' in line: geometry['inertia_tensor'] = [] for val in range(3): row = (next(fread, val).split()[1:4]) row = [util.to_float(b) for b in row] geometry['inertia_tensor'].append(row) if 'Principal axes' in line: geometry['principal_axes'] = [] for val in range(3): row = (next(fread, val).split()[1:4]) row = [util.to_float(b) for b in row] geometry['principal_axes'].append(row) if 'axis momenta' in line: geometry['axis_momenta'] = (next(fread).split()[1:4]) geometry['axis_momenta'] = [util.to_float(val) for val in geometry['axis_momenta']] break # stop after we find the first match for key, value in geometry.items(): if log is not None: log_file = open(log, 'a') log_file.write('{:27} = {}\n'.format(key, value)) log_file.close() elif print_output: print('{:27} = {}'.format(key, value)) return geometry
326,456
muparser atan2 function Implements an atan2(y,x) function for older muparser versions (<2.1.0); atan2 was added as a built-in function in muparser 2.1.0 Args: y (str): y argument of the atan2(y,x) function x (str): x argument of the atan2(y,x) function Returns: A muparser string that calculates atan2(y,x)
def mp_atan2(y, x): return 'if((x)>0, atan((y)/(x)), if(((x)<0) and ((y)>=0), atan((y)/(x))+pi, if(((x)<0) and ((y)<0), atan((y)/(x))-pi, if(((x)==0) and ((y)>0), pi/2, if(((x)==0) and ((y)<0), -pi/2, 0)))))'.replace( 'pi', str(math.pi)).replace('y', y).replace('x', x)
326,462
muparser cross product function Compute the cross product of two 3x1 vectors Args: u (list or tuple of 3 strings): first vector v (list or tuple of 3 strings): second vector Returns: A list containing a muparser string of the cross product
def v_cross(u, v): i = '(({u1})*({v2}) - ({u2})*({v1}))'.format(u1=u[1], u2=u[2], v1=v[1], v2=v[2]) j = '(({u2})*({v0}) - ({u0})*({v2}))'.format(u0=u[0], u2=u[2], v0=v[0], v2=v[2]) k = '(({u0})*({v1}) - ({u1})*({v0}))'.format(u0=u[0], u1=u[1], v0=v[0], v1=v[1]) return [i, j, k]
326,463
Returns module filenames from package. Args: package_path: Path to Python package. Returns: A set of module filenames.
def get_pkg_module_names(package_path): module_names = set() for fobj, modname, _ in pkgutil.iter_modules(path=[package_path]): filename = os.path.join(fobj.path, '%s.py' % modname) if os.path.exists(filename): module_names.add(os.path.abspath(filename)) return module_names
327,236
Initializes profiler. Args: run_object: object to be profiled.
def __init__(self, run_object): run_obj_type = self.get_run_object_type(run_object) if run_obj_type == 'module': self.init_module(run_object) elif run_obj_type == 'package': self.init_package(run_object) else: self.init_function(run_object)
327,240
Samples current stack and adds result in self._stats. Args: signum: Signal that activates handler. frame: Frame on top of the stack when signal is handled.
def sample(self, signum, frame): #pylint: disable=unused-argument stack = [] while frame and frame != self.base_frame: stack.append(( frame.f_code.co_name, frame.f_code.co_filename, frame.f_code.co_firstlineno)) frame = frame.f_back self._stats[tuple(stack)] += 1 signal.setitimer(signal.ITIMER_PROF, _SAMPLE_INTERVAL)
327,249
Inserts stack into the call tree. Args: stack: Call stack. sample_count: Sample count of call stack. call_tree: Call tree.
def _insert_stack(stack, sample_count, call_tree): curr_level = call_tree for func in stack: next_level_index = { node['stack']: node for node in curr_level['children']} if func not in next_level_index: new_node = {'stack': func, 'children': [], 'sampleCount': 0} curr_level['children'].append(new_node) curr_level = new_node else: curr_level = next_level_index[func] curr_level['sampleCount'] = sample_count
327,250
Starts HTTP server with specified parameters. Args: host: Server host name. port: Server port. profiler_stats: A dict with collected program stats. dont_start_browser: Whether to open browser after profiling. debug_mode: Whether to redirect stderr to /dev/null.
def start(host, port, profiler_stats, dont_start_browser, debug_mode): stats_handler = functools.partial(StatsHandler, profiler_stats) if not debug_mode: sys.stderr = open(os.devnull, 'w') print('Starting HTTP server...') if not dont_start_browser: webbrowser.open('http://{}:{}/'.format(host, port)) try: StatsServer((host, port), stats_handler).serve_forever() except KeyboardInterrupt: print('Stopping...') sys.exit(0)
327,265
Runs profilers on run_object. Args: run_object: An object (string or tuple) for profiling. prof_config: A string with profilers configuration. verbose: True if info about running profilers should be shown. Returns: An ordered dictionary with collected stats. Raises: AmbiguousConfigurationError: when prof_config is ambiguous. BadOptionError: when unknown options are present in configuration.
def run_profilers(run_object, prof_config, verbose=False): if len(prof_config) > len(set(prof_config)): raise AmbiguousConfigurationError( 'Profiler configuration %s is ambiguous' % prof_config) available_profilers = {opt for opt, _ in _PROFILERS} for option in prof_config: if option not in available_profilers: raise BadOptionError('Unknown option: %s' % option) run_stats = OrderedDict() present_profilers = ((o, p) for o, p in _PROFILERS if o in prof_config) for option, prof in present_profilers: curr_profiler = prof(run_object) if verbose: print('Running %s...' % curr_profiler.__class__.__name__) run_stats[option] = curr_profiler.run() return run_stats
327,285
Runs profilers on a function. Args: func: A Python function. options: A string with profilers configuration (i.e. 'cmh'). args: func non-keyword arguments. kwargs: func keyword arguments. host: Host name to send collected data. port: Port number to send collected data. Returns: A result of func execution.
def run(func, options, args=(), kwargs={}, host='localhost', port=8000): # pylint: disable=dangerous-default-value run_stats = run_profilers((func, args, kwargs), options) result = None for prof in run_stats: if not result: result = run_stats[prof]['result'] del run_stats[prof]['result'] # Don't send result to remote host post_data = gzip.compress( json.dumps(run_stats).encode('utf-8')) urllib.request.urlopen('http://%s:%s' % (host, port), post_data) return result
327,286
Creates a _InjectionContext. Args: injection_site_fn: the initial function being injected into Returns: a new empty _InjectionContext in the default scope
def new(self, injection_site_fn): return _InjectionContext( injection_site_fn, binding_stack=[], scope_id=scoping.UNSCOPED, is_scope_usable_from_scope_fn=self._is_scope_usable_from_scope_fn)
328,061
Creates a child injection context. A "child" injection context is a context for a binding used to inject something into the current binding's provided value. Args: injection_site_fn: the child function being injected into binding: a Binding Returns: a new _InjectionContext
def get_child(self, injection_site_fn, binding): child_scope_id = binding.scope_id new_binding_stack = self._binding_stack + [binding] if binding in self._binding_stack: raise errors.CyclicInjectionError(new_binding_stack) if not self._is_scope_usable_from_scope_fn( child_scope_id, self._scope_id): raise errors.BadDependencyScopeError( self.get_injection_site_desc(), self._scope_id, child_scope_id, binding.binding_key) return _InjectionContext( injection_site_fn, new_binding_stack, child_scope_id, self._is_scope_usable_from_scope_fn)
328,063
Provides an instance of the given class. Args: cls: a class (not an instance) Returns: an instance of cls Raises: Error: an instance of cls is not providable
def provide(self, cls): support.verify_class_type(cls, 'cls') if not self._is_injectable_fn(cls): provide_loc = locations.get_back_frame_loc() raise errors.NonExplicitlyBoundClassError(provide_loc, cls) try: return self._obj_provider.provide_class( cls, self._injection_context_factory.new(cls.__init__), direct_init_pargs=[], direct_init_kwargs={}) except errors.Error as e: if self._use_short_stack_traces: raise e else: raise
328,072
Determines which args have no arg binding keys. Args: arg_names: a sequence of the names of possibly bound args arg_binding_keys: a sequence of ArgBindingKey each of whose arg names is in arg_names Returns: a sequence of arg names that is a (possibly empty, possibly non-proper) subset of arg_names
def get_unbound_arg_names(arg_names, arg_binding_keys): bound_arg_names = [abk._arg_name for abk in arg_binding_keys] return [arg_name for arg_name in arg_names if arg_name not in bound_arg_names]
328,079
Creates an ArgBindingKey. Args: arg_name: the name of the bound arg annotation: an Annotation, or None to create an unannotated arg binding key Returns: a new ArgBindingKey
def new(arg_name, annotated_with=None): if arg_name.startswith(_PROVIDE_PREFIX): binding_key_name = arg_name[_PROVIDE_PREFIX_LEN:] provider_indirection = provider_indirections.INDIRECTION else: binding_key_name = arg_name provider_indirection = provider_indirections.NO_INDIRECTION binding_key = binding_keys.new(binding_key_name, annotated_with) return ArgBindingKey(arg_name, binding_key, provider_indirection)
328,080
Converts normal class names into normal arg names. Normal class names are assumed to be CamelCase with an optional leading underscore. Normal arg names are assumed to be lower_with_underscores. Args: class_name: a class name, e.g., "FooBar" or "_FooBar" Returns: all likely corresponding arg names, e.g., ["foo_bar"]
def default_get_arg_names_from_class_name(class_name): parts = [] rest = class_name if rest.startswith('_'): rest = rest[1:] while True: m = re.match(r'([A-Z][a-z]+)(.*)', rest) if m is None: break parts.append(m.group(1)) rest = m.group(2) if not parts: return [] return ['_'.join(part.lower() for part in parts)]
328,087
Retrieves the provider method-relevant info set by decorators. If any info wasn't set by decorators, then defaults are returned. Args: provider_fn: a (possibly decorated) provider function default_arg_names: the (possibly empty) arg names to use if none were specified via @provides() Returns: a sequence of ProviderDecoration
def get_provider_fn_decorations(provider_fn, default_arg_names): if hasattr(provider_fn, _IS_WRAPPER_ATTR): provider_decorations = getattr(provider_fn, _PROVIDER_DECORATIONS_ATTR) if provider_decorations: expanded_provider_decorations = [] for provider_decoration in provider_decorations: # TODO(kurts): seems like default scope should be done at # ProviderDecoration instantiation time. if provider_decoration.in_scope_id is None: provider_decoration.in_scope_id = scoping.DEFAULT_SCOPE if provider_decoration.arg_name is not None: expanded_provider_decorations.append(provider_decoration) else: expanded_provider_decorations.extend( [ProviderDecoration(default_arg_name, provider_decoration.annotated_with, provider_decoration.in_scope_id) for default_arg_name in default_arg_names]) return expanded_provider_decorations return [ProviderDecoration(default_arg_name, annotated_with=None, in_scope_id=scoping.DEFAULT_SCOPE) for default_arg_name in default_arg_names]
328,112
Creates a BindingKey. Args: arg_name: the name of the bound arg annotation: an Annotation, or None to create an unannotated binding key Returns: a new BindingKey
def new(arg_name, annotated_with=None): if annotated_with is not None: annotation = annotations.Annotation(annotated_with) else: annotation = annotations.NO_ANNOTATION return BindingKey(arg_name, annotation)
328,141
Initializer. Args: name: the name of the bound arg annotation: an Annotation
def __init__(self, name, annotation): self._name = name self._annotation = annotation
328,142
Converts either bytes or unicode to `bytes`, using utf-8 encoding for text. Args: bytes_or_text: A `bytes`, `str`, or `unicode` object. encoding: A string indicating the charset for encoding unicode. Returns: A `bytes` object. Raises: TypeError: If `bytes_or_text` is not a binary or unicode string.
def as_bytes(bytes_or_text, encoding='utf-8'): if isinstance(bytes_or_text, _six.text_type): return bytes_or_text.encode(encoding) elif isinstance(bytes_or_text, bytes): return bytes_or_text else: raise TypeError('Expected binary or unicode string, got %r' % (bytes_or_text,))
328,168
Returns the given argument as a unicode string. Args: bytes_or_text: A `bytes`, `str, or `unicode` object. encoding: A string indicating the charset for decoding unicode. Returns: A `unicode` (Python 2) or `str` (Python 3) object. Raises: TypeError: If `bytes_or_text` is not a binary or unicode string.
def as_text(bytes_or_text, encoding='utf-8'): if isinstance(bytes_or_text, _six.text_type): return bytes_or_text elif isinstance(bytes_or_text, bytes): return bytes_or_text.decode(encoding) else: raise TypeError('Expected binary or unicode string, got %r' % bytes_or_text)
328,169
Initializes the pool. Args: size: size of pool (default 3) **kwargs: arguments for Browser(...)
def __init__(self, size=3, **kwargs): self.size = size self.kwargs = kwargs self._in_use = set() self._lock = threading.Lock()
330,273
Initializes the Browser. Args: **kwargs: arguments for Chrome(...)
def __init__(self, **kwargs): self.chrome = Chrome(**kwargs) self.websock_url = None self.websock = None self.websock_thread = None self.is_browsing = False self._command_id = Counter() self._wait_interval = 0.5
330,287
Starts chrome if it's not running. Args: **kwargs: arguments for self.chrome.start(...)
def start(self, **kwargs): if not self.is_running(): self.websock_url = self.chrome.start(**kwargs) self.websock = websocket.WebSocketApp(self.websock_url) self.websock_thread = WebsockReceiverThread( self.websock, name='WebsockThread:%s' % self.chrome.port) self.websock_thread.start() self._wait_for(lambda: self.websock_thread.is_open, timeout=30) # tell browser to send us messages we're interested in self.send_to_chrome(method='Network.enable') self.send_to_chrome(method='Page.enable') self.send_to_chrome(method='Console.enable') self.send_to_chrome(method='Runtime.enable') self.send_to_chrome(method='ServiceWorker.enable') self.send_to_chrome(method='ServiceWorker.setForceUpdateOnPageLoad') # disable google analytics self.send_to_chrome( method='Network.setBlockedURLs', params={'urls': ['*google-analytics.com/analytics.js', '*google-analytics.com/ga.js']})
330,290
Initializes instance of this class. Doesn't start the browser, start() does that. Args: chrome_exe: filesystem path to chrome/chromium executable port: chrome debugging protocol port (default 9222) ignore_cert_errors: configure chrome to accept all certs (default False)
def __init__(self, chrome_exe, port=9222, ignore_cert_errors=False): self.port = port self.chrome_exe = chrome_exe self.ignore_cert_errors = ignore_cert_errors self._shutdown = threading.Event() self.chrome_process = None
330,378
Fully describes a lambda function. Args: lambda_function: Name, ARN, or dictionary of lambda function. If dictionary, should likely be the return value from list_functions. At a minimum, must contain a key titled 'FunctionName'. flags: Flags describing which sections should be included in the return value. Default ALL Returns: dictionary describing the requested lambda function.
def get_lambda_function(lambda_function, flags=FLAGS.ALL, **conn): # Python 2 and 3 support: try: basestring except NameError as _: basestring = str # If STR is passed in, determine if it's a name or ARN and built a dict. if isinstance(lambda_function, basestring): lambda_function_arn = ARN(lambda_function) if lambda_function_arn.error: lambda_function = dict(FunctionName=lambda_function) else: lambda_function = dict(FunctionName=lambda_function_arn.name, FunctionArn=lambda_function) # If an ARN is available, override the account_number/region from the conn dict. if 'FunctionArn' in lambda_function: lambda_function_arn = ARN(lambda_function['FunctionArn']) if not lambda_function_arn.error: if lambda_function_arn.account_number: conn['account_number'] = lambda_function_arn.account_number if lambda_function_arn.region: conn['region'] = lambda_function_arn.region return registry.build_out(flags, start_with=lambda_function, pass_datastructure=True, **conn)
331,028