docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Retrieve a BlobInfo by key. Args: blob_key: A blob key. This may be a str, unicode or BlobKey instance. **ctx_options: Context options for Model().get_by_id(). Returns: A BlobInfo entity associated with the provided key, If there was no such entity, returns None.
def get(cls, blob_key, **ctx_options): fut = cls.get_async(blob_key, **ctx_options) return fut.get_result()
770,593
Multi-key version of get(). Args: blob_keys: A list of blob keys. **ctx_options: Context options for Model().get_by_id(). Returns: A list whose items are each either a BlobInfo entity or None.
def get_multi(cls, blob_keys, **ctx_options): futs = cls.get_multi_async(blob_keys, **ctx_options) return [fut.get_result() for fut in futs]
770,595
Permanently delete this blob from Blobstore. Args: **options: Options for create_rpc().
def delete(self, **options): fut = delete_async(self.key(), **options) fut.get_result()
770,597
Fills the internal buffer. Args: size: Number of bytes to read. Will be clamped to [self.__buffer_size, MAX_BLOB_FETCH_SIZE].
def __fill_buffer(self, size=0): read_size = min(max(size, self.__buffer_size), MAX_BLOB_FETCH_SIZE) self.__buffer = fetch_data(self.__blob_key, self.__position, self.__position + read_size - 1) self.__buffer_position = 0 self.__eof = len(self.__buffer) < read_size
770,598
Convert a date to a datetime for Cloud Datastore storage. Args: value: A datetime.date object. Returns: A datetime object with time set to 0:00.
def _date_to_datetime(value): if not isinstance(value, datetime.date): raise TypeError('Cannot convert to datetime expected date value; ' 'received %s' % value) return datetime.datetime(value.year, value.month, value.day)
770,603
Convert a time to a datetime for Cloud Datastore storage. Args: value: A datetime.time object. Returns: A datetime object with date set to 1970-01-01.
def _time_to_datetime(value): if not isinstance(value, datetime.time): raise TypeError('Cannot convert to datetime expected time value; ' 'received %s' % value) return datetime.datetime(1970, 1, 1, value.hour, value.minute, value.second, value.microsecond)
770,604
Decorator to make a function automatically run in a transaction. Args: **ctx_options: Transaction options (see transaction(), but propagation default to TransactionOptions.ALLOWED). This supports two forms: (1) Vanilla: @transactional def callback(arg): ... (2) With options: @transactional(retries=1) def callback(arg): ...
def transactional(func, args, kwds, **options): return transactional_async.wrapped_decorator( func, args, kwds, **options).get_result()
770,605
Constructor. Args: default_model: If an implementation for the kind cannot be found, use this model class. If none is specified, an exception will be thrown (default). id_resolver: A datastore_pbs.IdResolver that can resolve application ids. This is only necessary when running on the Cloud Datastore v1 API.
def __init__(self, default_model=None, id_resolver=None): # TODO(pcostello): Remove this once AbstractAdapter's constructor makes # it into production. try: super(ModelAdapter, self).__init__(id_resolver) except: pass self.default_model = default_model self.want_pbs = 0
770,620
Internal helper for comparison operators. Args: op: The operator ('=', '<' etc.). Returns: A FilterNode instance representing the requested comparison.
def _comparison(self, op, value): # NOTE: This is also used by query.gql(). if not self._indexed: raise datastore_errors.BadFilterError( 'Cannot query for unindexed property %s' % self._name) from .query import FilterNode # Import late to avoid circular imports. if value is not None: value = self._do_validate(value) value = self._call_to_base_type(value) value = self._datastore_type(value) return FilterNode(self._name, op, value)
770,627
Compute a list of composable methods. Because this is a common operation and the class hierarchy is static, the outcome is cached (assuming that for a particular list of names the reversed flag is either always on, or always off). Args: *names: One or more method names. reverse: Optional flag, default False; if True, the list is reversed. Returns: A list of callable class method objects.
def _find_methods(cls, *names, **kwds): reverse = kwds.pop('reverse', False) assert not kwds, repr(kwds) cache = cls.__dict__.get('_find_methods_cache') if cache: hit = cache.get(names) if hit is not None: return hit else: cls._find_methods_cache = cache = {} methods = [] for c in cls.__mro__: for name in names: method = c.__dict__.get(name) if method is not None: methods.append(method) if reverse: methods.reverse() cache[names] = methods return methods
770,639
Internal helper to deserialize this property from a protocol buffer. Subclasses may override this method. Args: entity: The entity, a Model (subclass) instance. p: A Property Message object (a protocol buffer). depth: Optional nesting depth, default 1 (unused here, but used by some subclasses that override this method).
def _deserialize(self, entity, p, unused_depth=1): if p.meaning() == entity_pb.Property.EMPTY_LIST: self._store_value(entity, []) return val = self._db_get_value(p.value(), p) if val is not None: val = _BaseValue(val) # TODO: replace the remainder of the function with the following commented # out code once its feasible to make breaking changes such as not calling # _store_value(). # if self._repeated: # entity._values.setdefault(self._name, []).append(val) # else: # entity._values[self._name] = val if self._repeated: if self._has_value(entity): value = self._retrieve_value(entity) assert isinstance(value, list), repr(value) value.append(val) else: # We promote single values to lists if we are a list property value = [val] else: value = val self._store_value(entity, value)
770,646
Internal helper to check this property for specific requirements. Called by Model._check_properties(). Args: rest: Optional subproperty to check, of the form 'name1.name2...nameN'. Raises: InvalidPropertyError if this property does not meet the given requirements or if a subproperty is specified. (StructuredProperty overrides this method to handle subproperties.)
def _check_property(self, rest=None, require_indexed=True): if require_indexed and not self._indexed: raise InvalidPropertyError('Property is unindexed %s' % self._name) if rest: raise InvalidPropertyError('Referencing subproperty %s.%s ' 'but %s is not a structured property' % (self._name, rest, self._name))
770,647
Constructor. Args: func: A function that takes one argument, the model instance, and returns a calculated value.
def __init__(self, func, name=None, indexed=None, repeated=None, verbose_name=None): super(ComputedProperty, self).__init__(name=name, indexed=indexed, repeated=repeated, verbose_name=verbose_name) self._func = func
770,716
Get the model class for the kind. Args: kind: A string representing the name of the kind to lookup. default_model: The model class to use if the kind can't be found. Returns: The model class for the requested kind. Raises: KindError: The kind was not found and no default_model was provided.
def _lookup_model(cls, kind, default_model=None): modelclass = cls._kind_map.get(kind, default_model) if modelclass is None: raise KindError( "No model class found for kind '%s'. Did you forget to import it?" % kind) return modelclass
770,728
Return a dict containing the entity's property values. Args: include: Optional set of property names to include, default all. exclude: Optional set of property names to skip, default none. A name contained in both include and exclude is excluded.
def _to_dict(self, include=None, exclude=None): if (include is not None and not isinstance(include, (list, tuple, set, frozenset))): raise TypeError('include should be a list, tuple or set') if (exclude is not None and not isinstance(exclude, (list, tuple, set, frozenset))): raise TypeError('exclude should be a list, tuple or set') values = {} for prop in self._properties.itervalues(): name = prop._code_name if include is not None and name not in include: continue if exclude is not None and name in exclude: continue try: values[name] = prop._get_for_dict(self) except UnprojectedPropertyError: pass # Ignore unprojected properties rather than failing. return values
770,738
Internal helper to check the given properties exist and meet specified requirements. Called from query.py. Args: property_names: List or tuple of property names -- each being a string, possibly containing dots (to address subproperties of structured properties). Raises: InvalidPropertyError if one of the properties is invalid. AssertionError if the argument is not a list or tuple of strings.
def _check_properties(cls, property_names, require_indexed=True): assert isinstance(property_names, (list, tuple)), repr(property_names) for name in property_names: assert isinstance(name, basestring), repr(name) if '.' in name: name, rest = name.split('.', 1) else: rest = None prop = cls._properties.get(name) if prop is None: cls._unknown_property(name) else: prop._check_property(rest, require_indexed=require_indexed)
770,741
Create a Query object for this class. Args: distinct: Optional bool, short hand for group_by = projection. *args: Used to apply an initial filter **kwds: are passed to the Query() constructor. Returns: A Query object.
def _query(cls, *args, **kwds): # Validating distinct. if 'distinct' in kwds: if 'group_by' in kwds: raise TypeError( 'cannot use distinct= and group_by= at the same time') projection = kwds.get('projection') if not projection: raise TypeError( 'cannot use distinct= without projection=') if kwds.pop('distinct'): kwds['group_by'] = projection # TODO: Disallow non-empty args and filter=. from .query import Query # Import late to avoid circular imports. qry = Query(kind=cls._get_kind(), **kwds) qry = qry.filter(*cls._default_filters()) qry = qry.filter(*args) return qry
770,742
Allocates a range of key IDs for this model class. Args: size: Number of IDs to allocate. Either size or max can be specified, not both. max: Maximum ID to allocate. Either size or max can be specified, not both. parent: Parent key for which the IDs will be allocated. **ctx_options: Context options. Returns: A tuple with (start, end) for the allocated range, inclusive.
def _allocate_ids(cls, size=None, max=None, parent=None, **ctx_options): return cls._allocate_ids_async(size=size, max=max, parent=parent, **ctx_options).get_result()
770,747
Returns an instance of Model class by ID. This is really just a shorthand for Key(cls, id, ...).get(). Args: id: A string or integer key ID. parent: Optional parent key of the model to get. namespace: Optional namespace. app: Optional app ID. **ctx_options: Context options. Returns: A model instance or None if not found.
def _get_by_id(cls, id, parent=None, **ctx_options): return cls._get_by_id_async(id, parent=parent, **ctx_options).get_result()
770,749
Checks whether a specific hook is in its default state. Args: cls: A ndb.model.Model class. default_hook: Callable specified by ndb internally (do not override). hook: The hook defined by a model class using _post_*_hook. Raises: TypeError if either the default hook or the tested hook are not callable.
def _is_default_hook(default_hook, hook): if not hasattr(default_hook, '__call__'): raise TypeError('Default hooks for ndb.model.Model must be callable') if not hasattr(hook, '__call__'): raise TypeError('Hooks must be callable') return default_hook.im_func is hook.im_func
770,751
Recursive helper for _to_base_type() to convert a message to an entity. Args: msg: A Message instance. modelclass: A Model subclass. Returns: An instance of modelclass.
def _message_to_entity(msg, modelclass): ent = modelclass() for prop_name, prop in modelclass._properties.iteritems(): if prop._code_name == 'blob_': # TODO: Devise a cleaner test. continue # That's taken care of later. value = getattr(msg, prop_name) if value is not None and isinstance(prop, model.StructuredProperty): if prop._repeated: value = [_message_to_entity(v, prop._modelclass) for v in value] else: value = _message_to_entity(value, prop._modelclass) setattr(ent, prop_name, value) return ent
770,844
Recursive helper for _from_base_type() to convert an entity to a message. Args: ent: A Model instance. message_type: A Message subclass. Returns: An instance of message_type.
def _projected_entity_to_message(ent, message_type): msg = message_type() analyzed = _analyze_indexed_fields(ent._projection) for name, sublist in analyzed.iteritems(): prop = ent._properties[name] val = prop._get_value(ent) assert isinstance(prop, model.StructuredProperty) == bool(sublist) if sublist: field = message_type.field_by_name(name) assert isinstance(field, messages.MessageField) assert prop._repeated == field.repeated if prop._repeated: assert isinstance(val, list) val = [_projected_entity_to_message(v, field.type) for v in val] else: assert isinstance(val, prop._modelclass) val = _projected_entity_to_message(val, field.type) setattr(msg, name, val) return msg
770,845
Constructor. Args: enum_type: A subclass of protorpc.messages.Enum. name: Optional datastore name (defaults to the property name). Additional keywords arguments specify the same options as supported by IntegerProperty.
def __init__(self, enum_type, name=None, default=None, choices=None, **kwds): self._enum_type = enum_type if default is not None: self._validate(default) if choices is not None: map(self._validate, choices) super(EnumProperty, self).__init__(name, default=default, choices=choices, **kwds)
770,846
Constructor. Args: message_tyoe: A subclass of protorpc.messages.Message. name: Optional datastore name (defaults to the property name). indexed_fields: Optional list of dotted and undotted field names. protocol: Optional protocol name default 'protobuf'. Additional keywords arguments specify the same options as supported by StructuredProperty, except 'indexed'.
def __init__(self, message_type, name=None, indexed_fields=None, protocol=None, **kwds): if not (isinstance(message_type, type) and issubclass(message_type, messages.Message)): raise TypeError('MessageProperty argument must be a Message subclass') self._message_type = message_type if indexed_fields is not None: # TODO: Check they are all strings naming fields. self._indexed_fields = tuple(indexed_fields) # NOTE: Otherwise the class default i.e. (), prevails. if protocol is None: protocol = _default_protocol self._protocol = protocol self._protocol_impl = _protocols_registry.lookup_by_name(protocol) blob_prop = model.BlobProperty('__%s__' % self._protocol) # TODO: Solve this without reserving 'blob_'. message_class = _make_model_class(message_type, self._indexed_fields, blob_=blob_prop) super(MessageProperty, self).__init__(message_class, name, **kwds)
770,848
Returns all possible paths to the root node Each path includes the term given. The order of the path is top -> bottom, i.e. it starts with the root and ends with the given term (inclusively). Parameters: ----------- - term: the id of the GO term, where the paths begin (i.e. the accession 'GO:0003682') Returns: -------- - a list of lists of GO Terms
def paths_to_top(self, term): # error handling consistent with original authors if term not in self: print("Term %s not found!" % term, file=sys.stderr) return def _paths_to_top_recursive(rec): if rec.level == 0: return [[rec]] paths = [] for parent in rec.parents: top_paths = _paths_to_top_recursive(parent) for top_path in top_paths: top_path.append(rec) paths.append(top_path) return paths go_term = self[term] return _paths_to_top_recursive(go_term)
771,423
Find the necessary file for the given test case. Args: device(napalm device connection): for which device filename(str): file to find path(str): where to find it relative to where the module is installed
def find_yang_file(profile, filename, path): # Find base_dir of submodule module_dir = os.path.dirname(__file__) full_path = os.path.join(module_dir, "mappings", profile, path, filename) if os.path.exists(full_path): return full_path else: msg = "Couldn't find parsing file: {}".format(full_path) logger.error(msg) raise IOError(msg)
772,348
修改语音通知模版 注意:模板成功修改之后需要重新审核才能使用!同时提醒您如果修改了变量,务必重新测试,以免替换出错! 参数: 参数名 类型 是否必须 描述 示例 apikey String 是 用户唯一标识 9b11127a9701975c734b8aee81ee3526 tpl_id Long 是 模板id,64位长整形。指定id时返回id对应的模板。未指定时返回所有模板 9527 tpl_content String 是 模板id,64位长整形。指定id时返回id对应的模板。未指定时返回所有模板模板内容 您的验证码是#code# Args: param: Results: Result
def update_voice_notify(self, param, must=[APIKEY, TPL_ID, TPL_CONTENT]): r = self.verify_param(param, must) if not r.is_succ(): return r h = CommonResultHandler(lambda rsp: {VERSION_V2:rsp}[self.version()]) return self.path('update_voice_notify.json').post(param, h, r)
775,532
查账户信息 参数名 类型 是否必须 描述 示例 apikey String 是 用户唯一标识 9b11127a9701975c734b8aee81ee3526 Args: param: (Optional) Results: Result
def get(self, param=None, must=[APIKEY]): param = {} if param is None else param r = self.verify_param(param, must) if not r.is_succ(): return r handle = CommonResultHandler(lambda rsp: {VERSION_V1:rsp.get(USER), VERSION_V2:rsp}[self.version()]) return self.path('get.json').post(param, handle, r)
775,540
sepr.join(urlencode(seq)) Args: seq: string list to be urlencoded sepr: join seq with sepr Returns: str
def urlEncodeAndJoin(self, seq, sepr=','): try: from urllib.parse import quote_plus as encode return sepr.join([encode(x, encoding=CHARSET_UTF8) for x in seq]) except ImportError: from urllib import quote as encode return sepr.join([i for i in map(lambda x: encode(x), seq)])
775,563
获取回复短信 参数名 类型 是否必须 描述 示例 apikey String 是 用户唯一标识 9b11127a9701975c734b8aee81ee3526 page_size Integer 否 每页个数,最大100个,默认20个 20 Args: param: Results: Result
def pull_reply(self, param=None, must=[APIKEY]): param = {} if param is None else param r = self.verify_param(param, must) if not r.is_succ(): return r h = CommonResultHandler(lambda rsp: {VERSION_V1:rsp[SMS_REPLY] if SMS_REPLY in rsp else None, VERSION_V2:rsp}[self.version()]) return self.path('pull_reply.json').post(param, h, r)
775,568
查短信发送记录 参数名 类型 是否必须 描述 示例 apikey String 是 用户唯一标识 9b11127a9701975c734b8aee81ee3526 mobile String 否 需要查询的手机号 15205201314 start_time String 是 短信发送开始时间 2013-08-11 00:00:00 end_time String 是 短信发送结束时间 2013-08-12 00:00:00 page_num Integer 否 页码,默认值为1 1 page_size Integer 否 每页个数,最大100个 20 Args: param: Results: Result
def get_record(self, param, must=[APIKEY, START_TIME, END_TIME]): r = self.verify_param(param, must) if not r.is_succ(): return r h = CommonResultHandler(lambda rsp: {VERSION_V1:rsp[SMS] if SMS in rsp else None, VERSION_V2:rsp}[self.version()]) return self.path('get_record.json').post(param, h, r)
775,570
统计短信条数 参数名 类型 是否必须 描述 示例 apikey String 是 用户唯一标识 9b11127a9701975c734b8aee81ee3526 start_time String 是 短信发送开始时间 2013-08-11 00:00:00 end_time String 是 短信发送结束时间 2013-08-12 00:00:00 mobile String 否 需要查询的手机号 15205201314 page_num Integer 否 页码,默认值为1 1 page_size Integer 否 每页个数,最大100个 20 Args: param: Results: Result
def count(self, param, must=[APIKEY, START_TIME, END_TIME]): r = self.verify_param(param, must) if not r.is_succ(): return r h = CommonResultHandler(lambda rsp: int(rsp[TOTAL]) if TOTAL in rsp else 0) return self.path('count.json').post(param, h, r)
775,571
查询流量包 参数名 类型 是否必须 描述 示例 apikey String 是 用户唯一标识 9b11127a9701975c734b8aee81ee3526 carrier String 否 运营商ID 传入该参数则获取指定运营商的流量包, 否则获取所有运营商的流量包 移动:10086 联通:10010 电信:10000 Args: param: Results: Result
def get_package(self, param=None, must=[APIKEY]): param = {} if param is None else param r = self.verify_param(param, must) if not r.is_succ(): return r h = CommonResultHandler(lambda rsp: {VERSION_V1:rsp[FLOW_PACKAGE] if FLOW_PACKAGE in rsp else None, VERSION_V2:rsp}[self.version()]) return self.path('get_package.json').post(param, h, r)
775,576
充值流量 参数名 类型 是否必须 描述 示例 apikey String 是 用户唯一标识 9b11127a9701975c734b8aee81ee3526 mobile String 是 接收的手机号(仅支持大陆号码) 15205201314 sn String 是 流量包的唯一ID 点击查看 1008601 callback_url String 否 本条流量充值的状态报告推送地址 http://your_receive_url_address encrypt String 否 加密方式 使用加密 tea (不再使用) _sign String 否 签名字段 参考使用加密 393d079e0a00912335adfe46f4a2e10f (不再使用) Args: param: Results: Result
def recharge(self, param, must=[APIKEY, MOBILE, SN]): r = self.verify_param(param, must) if not r.is_succ(): return r h = CommonResultHandler(lambda rsp: {VERSION_V1:rsp[RESULT] if RESULT in rsp else None, VERSION_V2:rsp}[self.version()]) return self.path('recharge.json').post(param, h, r)
775,577
获取状态报告 参数名 是否必须 描述 示例 apikey 是 用户唯一标识 9b11127a9701975c734b8aee81ee3526 page_size 否 每页个数,最大100个,默认20个 20 Args: param: Results: Result
def pull_status(self, param=None, must=[APIKEY]): param = {} if param is None else param r = self.verify_param(param, must) if not r.is_succ(): return r h = CommonResultHandler(lambda rsp: {VERSION_V1:rsp[FLOW_STATUS] if FLOW_STATUS in rsp else None, VERSION_V2:rsp}[self.version()]) return self.path('pull_status.json').post(param, h, r)
775,578
Initialize the ReST API HTTP wrapper object. Arguments: base_url -- Base URL for requests. Ex: http://example.com/stuff/ user -- Optional user name for basic auth. password -- Optional password for basic auth. ssl_verify -- Set to False to disable SSL verification (not secure). debug_print -- Enable debug print statements. timeout -- Number of seconds to wait for a response.
def __init__(self, base_url, user=None, password=None, ssl_verify=True, debug_print=False, timeout=None): self._base_url = base_url.strip('/') self._base_headers = {'Accept': 'application/json'} self._user = user self._password = password self._verify = ssl_verify self._dbg_print = debug_print self._timeout = None if timeout: self._timeout = timeout # autheticated API if user and password: b64string = base64.encodestring('%s:%s' % (user, password))[:-1] self._base_headers["Authorization"] = "Basic %s" % b64string
776,364
Delete the specified object. Arguments: handle -- Handle of object to delete.
def do_stc_delete(self, handle): if self._not_joined(): return if not handle: print('missing object handle') return try: self._stc.delete(handle) except resthttp.RestHttpError as e: print(e) return print('OK')
776,424
Get information on session. If session_id is None, the default, then return information about this session. If a session ID is given, then get information about that session. Arguments: session_id -- Id of session to get info for, if not this session. Return: Dictionary of session information.
def session_info(self, session_id=None): if not session_id: if not self.started(): return [] session_id = self._sid status, data = self._rest.get_request('sessions', session_id) return data
776,455
Create a new automation object. Arguments: object_type -- Type of object to create. under -- Handle of the parent of the new object. attributes -- Dictionary of attributes (name-value pairs). kwattrs -- Optional keyword attributes (name=value pairs). Return: Handle of newly created object.
def create(self, object_type, under=None, attributes=None, **kwattrs): data = self.createx(object_type, under, attributes, **kwattrs) return data['handle']
776,459
Create a new automation object. Arguments: object_type -- Type of object to create. under -- Handle of the parent of the new object. attributes -- Dictionary of attributes (name-value pairs). kwattrs -- Optional keyword attributes (name=value pairs). Return: Dictionary containing handle of newly created object.
def createx(self, object_type, under=None, attributes=None, **kwattrs): self._check_session() params = {'object_type': object_type} if under: params['under'] = under if attributes: params.update(attributes) if kwattrs: params.update(kwattrs) status, data = self._rest.post_request('objects', None, params) return data
776,460
Delete the specified object. Arguments: handle -- Handle of object to delete.
def delete(self, handle): self._check_session() self._rest.delete_request('objects', str(handle))
776,461
Execute a command. Arguments can be supplied either as a dictionary or as keyword arguments. Examples: stc.perform('LoadFromXml', {'filename':'config.xml'}) stc.perform('LoadFromXml', filename='config.xml') Arguments: command -- Command to execute. params -- Optional. Dictionary of parameters (name-value pairs). kwargs -- Optional keyword arguments (name=value pairs). Return: Data from command.
def perform(self, command, params=None, **kwargs): self._check_session() if not params: params = {} if kwargs: params.update(kwargs) params['command'] = command status, data = self._rest.post_request('perform', None, params) return data
776,462
Sets or modifies one or more object attributes or relations. Arguments can be supplied either as a dictionary or as keyword arguments. Examples: stc.config('port1', location='//10.1.2.3/1/1') stc.config('port2', {'location': '//10.1.2.3/1/2'}) Arguments: handle -- Handle of object to modify. attributes -- Dictionary of attributes (name-value pairs). kwattrs -- Optional keyword attributes (name=value pairs).
def config(self, handle, attributes=None, **kwattrs): self._check_session() if kwattrs: if attributes: attributes.update(kwattrs) else: attributes = kwattrs self._rest.put_request('objects', str(handle), attributes)
776,463
Establish connection to one or more chassis. Arguments: chassis_list -- List of chassis (IP addresses or DNS names) Return: List of chassis addresses.
def connect(self, chassis_list): self._check_session() if not isinstance(chassis_list, (list, tuple, set, dict, frozenset)): chassis_list = (chassis_list,) if len(chassis_list) == 1: status, data = self._rest.put_request( 'connections', chassis_list[0]) data = [data] else: params = {chassis: True for chassis in chassis_list} params['action'] = 'connect' status, data = self._rest.post_request('connections', None, params) return data
776,468
Remove connection with one or more chassis. Arguments: chassis_list -- List of chassis (IP addresses or DNS names)
def disconnect(self, chassis_list): self._check_session() if not isinstance(chassis_list, (list, tuple, set, dict, frozenset)): chassis_list = (chassis_list,) if len(chassis_list) == 1: self._rest.delete_request('connections', chassis_list[0]) else: params = {chassis: True for chassis in chassis_list} params['action'] = 'disconnect' self._rest.post_request('connections', None, params)
776,469
Write a diagnostic message to a log file or to standard output. Arguments: level -- Severity level of entry. One of: INFO, WARN, ERROR, FATAL. msg -- Message to write to log.
def log(self, level, msg): self._check_session() level = level.upper() allowed_levels = ('INFO', 'WARN', 'ERROR', 'FATAL') if level not in allowed_levels: raise ValueError('level must be one of: ' + ', '.join(allowed_levels)) self._rest.post_request( 'log', None, {'log_level': level.upper(), 'message': msg})
776,471
Download the specified file from the server. Arguments: file_name -- Name of file resource to save. save_as -- Optional path name to write file to. If not specified, then file named by the last part of the resource path is downloaded to current directory. Return: (save_path, bytes) save_path -- Path where downloaded file was saved. bytes -- Bytes downloaded.
def download(self, file_name, save_as=None): self._check_session() try: if save_as: save_as = os.path.normpath(save_as) save_dir = os.path.dirname(save_as) if save_dir: if not os.path.exists(save_dir): os.makedirs(save_dir) elif not os.path.isdir(save_dir): raise RuntimeError(save_dir + " is not a directory") status, save_path, bytes = self._rest.download_file( 'files', file_name, save_as, 'application/octet-stream') except resthttp.RestHttpError as e: raise RuntimeError('failed to download "%s": %s' % (file_name, e)) return save_path, bytes
776,472
Download all available files. Arguments: dst_dir -- Optional destination directory to write files to. If not specified, then files are downloaded current directory. Return: Dictionary of {file_name: file_size, ..}
def download_all(self, dst_dir=None): saved = {} save_as = None for f in self.files(): if dst_dir: save_as = os.path.join(dst_dir, f.split('/')[-1]) name, bytes = self.download(f, save_as) saved[name] = bytes return saved
776,473
Wait until sequencer is finished. This method blocks your application until the sequencer has completed its operation. It returns once the sequencer has finished. Arguments: timeout -- Optional. Seconds to wait for sequencer to finish. If this time is exceeded, then an exception is raised. Return: Sequencer testState value.
def wait_until_complete(self, timeout=None): timeout_at = None if timeout: timeout_at = time.time() + int(timeout) sequencer = self.get('system1', 'children-sequencer') while True: cur_test_state = self.get(sequencer, 'state') if 'PAUSE' in cur_test_state or 'IDLE' in cur_test_state: break time.sleep(2) if timeout_at and time.time() >= timeout_at: raise RuntimeError('wait_until_complete timed out after %s sec' % timeout) return self.get(sequencer, 'testState')
776,475
Parse a version string into a Version() object. Args: version_string (str), the version string to parse partial (bool), whether to accept incomplete input coerce (bool), whether to try to map the passed in string into a valid Version.
def parse(cls, version_string, partial=False, coerce=False): if not version_string: raise ValueError('Invalid empty version string: %r' % version_string) if partial: version_re = cls.partial_version_re else: version_re = cls.version_re match = version_re.match(version_string) if not match: raise ValueError('Invalid version string: %r' % version_string) major, minor, patch, prerelease, build = match.groups() if _has_leading_zero(major): raise ValueError("Invalid leading zero in major: %r" % version_string) if _has_leading_zero(minor): raise ValueError("Invalid leading zero in minor: %r" % version_string) if _has_leading_zero(patch): raise ValueError("Invalid leading zero in patch: %r" % version_string) major = int(major) minor = cls._coerce(minor, partial) patch = cls._coerce(patch, partial) if prerelease is None: if partial and (build is None): # No build info, strip here return (major, minor, patch, None, None) else: prerelease = () elif prerelease == '': prerelease = () else: prerelease = tuple(prerelease.split('.')) cls._validate_identifiers(prerelease, allow_leading_zeroes=False) if build is None: if partial: build = None else: build = () elif build == '': build = () else: build = tuple(build.split('.')) cls._validate_identifiers(build, allow_leading_zeroes=True) return (major, minor, patch, prerelease, build)
777,588
Retrieve comparison methods to apply on version components. This is a private API. Args: partial (bool): whether to provide 'partial' or 'strict' matching. Returns: 5-tuple of cmp-like functions.
def _comparison_functions(cls, partial=False): def prerelease_cmp(a, b): if a and b: return identifier_list_cmp(a, b) elif a: # Versions with prerelease field have lower precedence return -1 elif b: return 1 else: return 0 def build_cmp(a, b): if a == b: return 0 else: return NotImplemented def make_optional(orig_cmp_fun): @functools.wraps(orig_cmp_fun) def alt_cmp_fun(a, b): if a is None or b is None: return 0 return orig_cmp_fun(a, b) return alt_cmp_fun if partial: return [ base_cmp, # Major is still mandatory make_optional(base_cmp), make_optional(base_cmp), make_optional(prerelease_cmp), make_optional(build_cmp), ] else: return [ base_cmp, base_cmp, base_cmp, prerelease_cmp, build_cmp, ]
777,592
Creates a Local File Copy on Uploadcare Storage. Args: - effects: Adds CDN image effects. If ``self.default_effects`` property is set effects will be combined with default effects. - store: If ``store`` option is set to False the copy of your file will be deleted in 24 hour period after the upload. Works only if `autostore` is enabled in the project.
def create_local_copy(self, effects=None, store=None): effects = self._build_effects(effects) store = store or '' data = { 'source': self.cdn_path(effects) } if store: data['store'] = store return rest_request('POST', 'files/', data=data)
777,629
Uploads a file and returns ``File`` instance. Args: - file_obj: file object to upload to - store (Optional[bool]): Should the file be automatically stored upon upload. Defaults to None. - False - do not store file - True - store file (can result in error if autostore is disabled for project) - None - use project settings Returns: ``File`` instance
def upload(cls, file_obj, store=None): if store is None: store = 'auto' elif store: store = '1' else: store = '0' data = { 'UPLOADCARE_STORE': store, } files = uploading_request('POST', 'base/', data=data, files={'file': file_obj}) file_ = cls(files['file']) return file_
777,632
Makes the drone move (translate/rotate). Parameters: lr -- left-right tilt: float [-1..1] negative: left, positive: right fb -- front-back tilt: float [-1..1] negative: forwards, positive: backwards vv -- vertical speed: float [-1..1] negative: go down, positive: rise va -- angular speed: float [-1..1] negative: spin left, positive: spin right
def move(self, lr, fb, vv, va): self.at(ardrone.at.pcmd, True, lr, fb, vv, va)
777,708
Basic behaviour of the drone: take-off/landing, emergency stop/reset) Parameters: seq -- sequence number takeoff -- True: Takeoff / False: Land emergency -- True: Turn off the engines
def ref(host, seq, takeoff, emergency=False): p = 0b10001010101000000000000000000 if takeoff: p |= 0b1000000000 if emergency: p |= 0b100000000 at(host, 'REF', seq, [p])
777,729
Sends control values directly to the engines, overriding control loops. Parameters: seq -- sequence number m1 -- Integer: front left command m2 -- Integer: front right command m3 -- Integer: back right command m4 -- Integer: back left command
def pwm(host, seq, m1, m2, m3, m4): at(host, 'PWM', seq, [m1, m2, m3, m4])
777,732
Control the drones LED. Parameters: seq -- sequence number anim -- Integer: animation to play f -- Float: frequency in HZ of the animation d -- Integer: total duration in seconds of the animation
def led(host, seq, anim, f, d): at(host, 'LED', seq, [anim, float(f), d])
777,733
Makes the drone execute a predefined movement (animation). Parameters: seq -- sequcence number anim -- Integer: animation to play d -- Integer: total duration in seconds of the animation
def anim(host, seq, anim, d): at(host, 'ANIM', seq, [anim, d])
777,734
Encodes data to slip protocol and then sends over serial port Uses the SlipLib module to convert the message data into SLIP format. The message is then sent over the serial port opened with the instance of the Faraday class used when invoking send(). Args: msg (bytes): Bytes format message to send over serial port. Returns: int: Number of bytes transmitted over the serial port.
def send(self, msg): # Create a sliplib Driver slipDriver = sliplib.Driver() # Package data in slip format slipData = slipDriver.send(msg) # Send data over serial port res = self._serialPort.write(slipData) # Return number of bytes transmitted over serial port return res
777,809
Reads in data from a serial port (length bytes), decodes SLIP packets A function which reads from the serial port and then uses the SlipLib module to decode the SLIP protocol packets. Each message received is added to a receive buffer in SlipLib which is then returned. Args: length (int): Length to receive with serialPort.read(length) Returns: bytes: An iterator of the receive buffer
def receive(self, length): # Create a sliplib Driver slipDriver = sliplib.Driver() # Receive data from serial port ret = self._serialPort.read(length) # Decode data from slip format, stores msgs in sliplib.Driver.messages temp = slipDriver.receive(ret) return iter(temp)
777,810
Checks whether specified port is available. Source code derived from @lqdev suggestion per #38 Args: port: Serial port location i.e. 'COM1'. Default is /dev/ttyUSB0 Returns: available: Boolean value indicating presence of port
def isPortAvailable(port='/dev/ttyUSB0'): isPortAvailable = serial.tools.list_ports.grep(port) try: next(isPortAvailable) available = True except StopIteration: available = False return available
777,818
r"""Read in a complete file `file` as a string Parameters: - `file`: a file handle or a string (`str` or `unicode`). - `binary`: whether to read in the file in binary mode (default: False).
def slurp(file, binary=False, expand=False): r mode = "r" + ["b",""][not binary] file = _normalizeToFile(file, mode=mode, expand=expand) try: return file.read() finally: file.close()
778,053
r"""Load variables pickled with `saveVars`. Parameters: - `ask`: If `True` then don't overwrite existing variables without asking. - `only`: A list to limit the variables to or `None`. - `into`: The dictionary the variables should be loaded into (defaults to global dictionary).
def loadVars(filename, ask=True, into=None, only=None): r filename = os.path.expanduser(filename) if into is None: into = magicGlobals() varH = loadDict(filename) toUnpickle = only or varH.keys() alreadyDefined = filter(into.has_key, toUnpickle) if alreadyDefined and ask: print "The following vars already exist; overwrite (yes/NO)?\n",\ "\n".join(alreadyDefined) if raw_input() != "yes": toUnpickle = without(toUnpickle, alreadyDefined) if not toUnpickle: print "nothing to unpickle" return None print "unpickling:\n",\ "\n".join(sorted(toUnpickle)) for k in varH.keys(): if k not in toUnpickle: del varH[k] into.update(varH)
778,100
Used by cli to add this as an argument to argparse parser. Args: parser: parser to add this argument to
def add_argument_to(self, parser): from devassistant.cli.devassistant_argparse import DefaultIffUsedActionFactory if isinstance(self.kwargs.get('action', ''), list): # see documentation of DefaultIffUsedActionFactory to see why this is necessary if self.kwargs['action'][0] == 'default_iff_used': self.kwargs['action'] = DefaultIffUsedActionFactory.generate_action( self.kwargs['action'][1]) # In cli 'preserved' is not supported. # It needs to be removed because it is unknown for argparse. self.kwargs.pop('preserved', None) try: parser.add_argument(*self.flags, **self.kwargs) except Exception as ex: problem = "Error while adding argument '{name}': {error}".\ format(name=self.name, error=repr(ex)) raise exceptions.ExecutionException(problem)
778,683
Returns the value for specified gui hint (or a sensible default value, if this argument doesn't specify the hint). Args: hint: name of the hint to get value for Returns: value of the hint specified in yaml or a sensible default
def get_gui_hint(self, hint): if hint == 'type': # 'self.kwargs.get('nargs') == 0' is there for default_iff_used, which may # have nargs: 0, so that it works similarly to 'store_const' if self.kwargs.get('action') == 'store_true' or self.kwargs.get('nargs') == 0: return 'bool' # store_const is represented by checkbox, but computes default differently elif self.kwargs.get('action') == 'store_const': return 'const' return self.gui_hints.get('type', 'str') elif hint == 'default': hint_type = self.get_gui_hint('type') hint_default = self.gui_hints.get('default', None) arg_default = self.kwargs.get('default', None) preserved_value = None if 'preserved' in self.kwargs: preserved_value = config_manager.get_config_value(self.kwargs['preserved']) if hint_type == 'path': if preserved_value is not None: default = preserved_value elif hint_default is not None: default = hint_default.replace('$(pwd)', utils.get_cwd_or_homedir()) else: default = arg_default or '~' return os.path.abspath(os.path.expanduser(default)) elif hint_type == 'bool': return hint_default or arg_default or False elif hint_type == 'const': return hint_default or arg_default else: if hint_default == '$(whoami)': hint_default = getpass.getuser() return preserved_value or hint_default or arg_default or ''
778,684
Recursively searches self._tree - has format of (Assistant: [list_of_subassistants]) - for specific path from first to last selected subassistants. Args: kwargs: arguments containing names of the given assistants in form of subassistant_0 = 'name', subassistant_1 = 'another_name', ... Returns: list of subassistants objects from tree sorted from first to last
def get_selected_subassistant_path(self, **kwargs): path = [self] previous_subas_list = None currently_searching = self.get_subassistant_tree()[1] # len(path) - 1 always points to next subassistant_N, so we can use it to control iteration while settings.SUBASSISTANT_N_STRING.format(len(path) - 1) in kwargs and \ kwargs[settings.SUBASSISTANT_N_STRING.format(len(path) - 1)]: for sa, subas_list in currently_searching: if sa.name == kwargs[settings.SUBASSISTANT_N_STRING.format(len(path) - 1)]: currently_searching = subas_list path.append(sa) break # sorry if you shed a tear ;) if subas_list == previous_subas_list: raise exceptions.AssistantNotFoundException( 'No assistant {n} after path {p}.'.format( n=kwargs[settings.SUBASSISTANT_N_STRING.format(len(path) - 1)], p=path)) previous_subas_list = subas_list return path
778,688
Loads yaml files from all given directories. Args: directories: list of directories to search Returns: dict of {fullpath: loaded_yaml_structure}
def load_all_yamls(cls, directories): yaml_files = [] loaded_yamls = {} for d in directories: if d.startswith('/home') and not os.path.exists(d): os.makedirs(d) for dirname, subdirs, files in os.walk(d): yaml_files.extend(map(lambda x: os.path.join(dirname, x), filter(lambda x: x.endswith('.yaml'), files))) for f in yaml_files: loaded_yamls[f] = cls.load_yaml_by_path(f) return loaded_yamls
778,692
Load a yaml file with path that is relative to one of given directories. Args: directories: list of directories to search name: relative path of the yaml file to load log_debug: log all messages as debug Returns: tuple (fullpath, loaded yaml structure) or None if not found
def load_yaml_by_relpath(cls, directories, rel_path, log_debug=False): for d in directories: if d.startswith(os.path.expanduser('~')) and not os.path.exists(d): os.makedirs(d) possible_path = os.path.join(d, rel_path) if os.path.exists(possible_path): loaded = cls.load_yaml_by_path(possible_path, log_debug=log_debug) if loaded is not None: return (possible_path, cls.load_yaml_by_path(possible_path)) return None
778,693
Checks if the dap is valid, reports problems Parameters: network -- whether to run checks that requires network connection output -- where to write() problems, might be None raises -- whether to raise an exception immediately after problem is detected
def check(cls, dap, network=False, yamls=True, raises=False, logger=logger): dap._check_raises = raises dap._problematic = False dap._logger = logger problems = list() problems += cls.check_meta(dap) problems += cls.check_no_self_dependency(dap) problems += cls.check_topdir(dap) problems += cls.check_files(dap) if yamls: problems += cls.check_yamls(dap) if network: problems += cls.check_name_not_on_dapi(dap) for problem in problems: dap._report_problem(problem.message, problem.level) del dap._check_raises return not dap._problematic
778,718
Inits a cache objects with given cache_file. Creates the cache file if it doesn't exist. If cache_file exists, but was created with different DevAssistant version, it gets deleted. Args: cache_file: cache file to use
def __init__(self, cache_file=settings.CACHE_FILE): self.cache_file = cache_file # snippets are shared across many assistants, so we remember their ctimes # here, because doing it again for each assistant would be very costly self.snip_ctimes = {} reset_cache = False if os.path.exists(self.cache_file): self.cache = yaml_loader.YamlLoader.load_yaml_by_path(cache_file) or {} if self.cache.get('version', '0.0.0') != devassistant.__version__: reset_cache = True else: if not os.path.exists(os.path.dirname(cache_file)): os.makedirs(os.path.dirname(cache_file)) reset_cache = True # if writing the file raises, YamlAssistantLoader catches the exception # and doesn't use cache at all if reset_cache: f = open(cache_file, 'w') self.cache = {'version': devassistant.__version__} f.close()
778,788
Checks and refreshes (if needed) all assistants with given role. Args: role: role of assistants to refresh file_hierarchy: hierarchy as returned by devassistant.yaml_assistant_loader.\ YamlAssistantLoader.get_assistants_file_hierarchy
def refresh_role(self, role, file_hierarchy): if role not in self.cache: self.cache[role] = {} was_change = self._refresh_hierarchy_recursive(self.cache[role], file_hierarchy) if was_change: cf = open(self.cache_file, 'w') yaml.dump(self.cache, cf, Dumper=Dumper) cf.close()
778,789
Completely refreshes cached assistant from file. Args: cached_ass: an assistant from cache hierarchy (for format see Cache class docstring) file_ass: the respective assistant from filesystem hierarchy (for format see what refresh_role accepts)
def _ass_refresh_attrs(self, cached_ass, file_ass): # we need to process assistant in custom way to see unexpanded args, etc. loaded_ass = yaml_loader.YamlLoader.load_yaml_by_path(file_ass['source'], log_debug=True) attrs = loaded_ass yaml_checker.check(file_ass['source'], attrs) cached_ass['source'] = file_ass['source'] cached_ass['ctime'] = os.path.getctime(file_ass['source']) cached_ass['attrs'] = {} cached_ass['snippets'] = {} # only cache these attributes if they're actually found in assistant # we do this to specify the default values for them just in one place # which is currently YamlAssistant.parsed_yaml property setter for a in ['fullname', 'description', 'icon_path']: if a in attrs: cached_ass['attrs'][a] = attrs.get(a) # args have different processing, we can't just take them from assistant if 'args' in attrs: cached_ass['attrs']['args'] = {} for argname, argparams in attrs.get('args', {}).items(): if 'use' in argparams or 'snippet' in argparams: snippet_name = argparams.pop('use', None) or argparams.pop('snippet') snippet = yaml_snippet_loader.YamlSnippetLoader.get_snippet_by_name(snippet_name) cached_ass['attrs']['args'][argname] = snippet.get_arg_by_name(argname) cached_ass['attrs']['args'][argname].update(argparams) cached_ass['snippets'][snippet.name] = self._get_snippet_ctime(snippet.name) else: cached_ass['attrs']['args'][argname] = argparams
778,792
Returns a completely new cache hierarchy for given assistant file. Args: file_ass: the assistant from filesystem hierarchy to create cache hierarchy for (for format see what refresh_role accepts) Returns: the newly created cache hierarchy
def _new_ass_hierarchy(self, file_ass): ret_struct = {'source': '', 'subhierarchy': {}, 'attrs': {}, 'snippets': {}} ret_struct['source'] = file_ass['source'] self._ass_refresh_attrs(ret_struct, file_ass) for name, subhierarchy in file_ass['subhierarchy'].items(): ret_struct['subhierarchy'][name] = self._new_ass_hierarchy(subhierarchy) return ret_struct
778,793
Returns and remembers (during this DevAssistant invocation) last ctime of given snippet. Calling ctime costs lost of time and some snippets, like common_args, are used widely, so we don't want to call ctime bazillion times on them during one invocation. Args: snip_name: name of snippet to get ctime for Returns: ctime of the snippet
def _get_snippet_ctime(self, snip_name): if snip_name not in self.snip_ctimes: snippet = yaml_snippet_loader.YamlSnippetLoader.get_snippet_by_name(snip_name) self.snip_ctimes[snip_name] = os.path.getctime(snippet.path) return self.snip_ctimes[snip_name]
778,794
Returns 2-tuple with names of catch control vars, e.g. for "catch $was_exc, $exc" it returns ('was_exc', 'err'). Args: catch: the whole catch line Returns: 2-tuple with names of catch control variables Raises: exceptions.YamlSyntaxError if the catch line is malformed
def get_catch_vars(catch): catch_re = re.compile(r'catch\s+(\${?\S+}?),\s*(\${?\S+}?)') res = catch_re.match(catch) if res is None: err = 'Catch must have format "catch $x, $y", got "{0}"'.format(catch) raise exceptions.YamlSyntaxError(err) return get_var_name(res.group(1)), get_var_name(res.group(2))
778,804
If given relative path exists in one of DevAssistant load paths, return its full path. Args: relpath: a relative path, e.g. "assitants/crt/test.yaml" Returns: absolute path of the file, e.g. "/home/x/.devassistant/assistanta/crt/test.yaml or None if file is not found
def find_file_in_load_dirs(relpath): if relpath.startswith(os.path.sep): relpath = relpath.lstrip(os.path.sep) for ld in settings.DATA_DIRECTORIES: possible_path = os.path.join(ld, relpath) if os.path.exists(possible_path): return possible_path
778,832
Generates argument parser for given assistant tree and actions. Args: tree: assistant tree as returned by devassistant.assistant_base.AssistantBase.get_subassistant_tree actions: dict mapping actions (devassistant.actions.Action subclasses) to their subaction dicts Returns: instance of devassistant_argparse.ArgumentParser (subclass of argparse.ArgumentParser)
def generate_argument_parser(cls, tree, actions={}): cur_as, cur_subas = tree parser = devassistant_argparse.ArgumentParser(argument_default=argparse.SUPPRESS, usage=argparse.SUPPRESS, add_help=False) cls.add_default_arguments_to(parser) # add any arguments of the top assistant for arg in cur_as.args: arg.add_argument_to(parser) if cur_subas or actions: # then add the subassistants as arguments subparsers = cls._add_subparsers_required(parser, dest=settings.SUBASSISTANT_N_STRING.format('0')) for subas in sorted(cur_subas, key=lambda x: x[0].name): for alias in [subas[0].name] + getattr(subas[0], 'aliases', []): cls.add_subassistants_to(subparsers, subas, level=1, alias=alias) for action, subactions in sorted(actions.items(), key=lambda x: x[0].name): cls.add_action_to(subparsers, action, subactions, level=1) return parser
778,889
Adds assistant from given part of assistant tree and all its subassistants to a given argument parser. Args: parser: instance of devassistant_argparse.ArgumentParser assistant_tuple: part of assistant tree (see generate_argument_parser doc) level: level of subassistants that given assistant is at
def add_subassistants_to(cls, parser, assistant_tuple, level, alias=None): name = alias or assistant_tuple[0].name p = parser.add_parser(name, description=assistant_tuple[0].description, argument_default=argparse.SUPPRESS) for arg in assistant_tuple[0].args: arg.add_argument_to(p) if len(assistant_tuple[1]) > 0: subparsers = cls._add_subparsers_required(p, dest=settings.SUBASSISTANT_N_STRING.format(level), title=cls.subparsers_str, description=cls.subparsers_desc) for subas_tuple in sorted(assistant_tuple[1], key=lambda x: x[0].name): cls.add_subassistants_to(subparsers, subas_tuple, level + 1) elif level == 1: subparsers = cls._add_subparsers_required(p, dest=settings.SUBASSISTANT_N_STRING.format(level), title=cls.subparsers_str, description=devassistant_argparse.ArgumentParser.no_assistants_msg)
778,891
Adds given action to given parser Args: parser: instance of devassistant_argparse.ArgumentParser action: devassistant.actions.Action subclass subactions: dict with subactions - {SubA: {SubB: {}}, SubC: {}}
def add_action_to(cls, parser, action, subactions, level): p = parser.add_parser(action.name, description=action.description, argument_default=argparse.SUPPRESS) for arg in action.args: arg.add_argument_to(p) if subactions: subparsers = cls._add_subparsers_required(p, dest=settings.SUBASSISTANT_N_STRING.format(level), title=cls.subactions_str, description=cls.subactions_desc) for subact, subsubacts in sorted(subactions.items(), key=lambda x: x[0].name): cls.add_action_to(subparsers, subact, subsubacts, level + 1)
778,892
Returns list of assistants that are subassistants of given superassistants (I love this docstring). Args: roles: list of names of roles, defaults to all roles Returns: list of YamlAssistant instances with specified roles
def get_assistants(cls, superassistants): _assistants = cls.load_all_assistants(superassistants) result = [] for supa in superassistants: result.extend(_assistants[supa.name]) return result
778,925
Fills self._assistants with loaded YamlAssistant instances of requested roles. Tries to use cache (updated/created if needed). If cache is unusable, it falls back to loading all assistants. Args: roles: list of required assistant roles
def load_all_assistants(cls, superassistants): # mapping of assistant roles to lists of top-level assistant instances _assistants = {} # {'crt': CreatorAssistant, ...} superas_dict = dict(map(lambda a: (a.name, a), superassistants)) to_load = set(superas_dict.keys()) for tl in to_load: dirs = [os.path.join(d, tl) for d in cls.assistants_dirs] file_hierarchy = cls.get_assistants_file_hierarchy(dirs) # load all if we're not using cache or if we fail to load it load_all = not settings.USE_CACHE if settings.USE_CACHE: try: cch = cache.Cache() cch.refresh_role(tl, file_hierarchy) _assistants[tl] = cls.get_assistants_from_cache_hierarchy(cch.cache[tl], superas_dict[tl], role=tl) except BaseException as e: logger.debug('Failed to use DevAssistant cachefile {0}: {1}'.format( settings.CACHE_FILE, e)) load_all = True if load_all: _assistants[tl] = cls.get_assistants_from_file_hierarchy(file_hierarchy, superas_dict[tl], role=tl) return _assistants
778,926
Constructs instance of YamlAssistant loaded from given structure y, loaded from source file source. Args: source: path to assistant source file y: loaded yaml structure superassistant: superassistant of this assistant Returns: YamlAssistant instance constructed from y with source file source Raises: YamlError: if the assistant is malformed
def assistant_from_yaml(cls, source, y, superassistant, fully_loaded=True, role=settings.DEFAULT_ASSISTANT_ROLE): # In pre-0.9.0, we required assistant to be a mapping of {name: assistant_attributes} # now we allow that, but we also allow omitting the assistant name and putting # the attributes to top_level, too. name = os.path.splitext(os.path.basename(source))[0] yaml_checker.check(source, y) assistant = yaml_assistant.YamlAssistant(name, y, source, superassistant, fully_loaded=fully_loaded, role=role) return assistant
778,930
Called after instantiating with a compressed payload Params: counts_len counts size to use based on decoded settings in the header
def init_counts(self, counts_len): assert self._data and counts_len and self.counts_len == 0 self.counts_len = counts_len self._init_counts() results = decode(self._data, payload_header_size, addressof(self.counts), counts_len, self.word_size) # no longer needed self._data = None return results
779,340
Compress this payload instance Args: counts_limit how many counters should be encoded starting from index 0 (can be 0), Return: the compressed payload (python string)
def compress(self, counts_limit): if self.payload: # worst case varint encoded length is when each counter is at the maximum value # in this case 1 more byte per counter is needed due to the more bits varint_len = counts_limit * (self.word_size + 1) # allocate enough space to fit the header and the varint string encode_buf = (c_byte * (payload_header_size + varint_len))() # encode past the payload header varint_len = encode(addressof(self.counts), counts_limit, self.word_size, addressof(encode_buf) + payload_header_size, varint_len) # copy the header after updating the varint stream length self.payload.payload_len = varint_len ctypes.memmove(addressof(encode_buf), addressof(self.payload), payload_header_size) cdata = zlib.compress(ctypes.string_at(encode_buf, payload_header_size + varint_len)) return cdata # can't compress if no payload raise RuntimeError('No payload to compress')
779,342
Record a new value into the histogram Args: value: the value to record (must be in the valid range) count: incremental count (defaults to 1)
def record_value(self, value, count=1): if value < 0: return False counts_index = self._counts_index_for(value) if (counts_index < 0) or (self.counts_len <= counts_index): return False self.counts[counts_index] += count self.total_count += count self.min_value = min(self.min_value, value) self.max_value = max(self.max_value, value) return True
779,360
Record a new value into the histogram and correct for coordinated omission if needed Args: value: the value to record (must be in the valid range) expected_interval: the expected interval between 2 value samples count: incremental count (defaults to 1)
def record_corrected_value(self, value, expected_interval, count=1): while True: if not self.record_value(value, count): return False if value <= expected_interval or expected_interval <= 0: return True value -= expected_interval
779,361
Get the value for a given percentile Args: percentile: a float in [0.0..100.0] Returns: the value for the given percentile
def get_value_at_percentile(self, percentile): count_at_percentile = self.get_target_count_at_percentile(percentile) total = 0 for index in range(self.counts_len): total += self.get_count_at_index(index) if total >= count_at_percentile: value_at_index = self.get_value_from_index(index) if percentile: return self.get_highest_equivalent_value(value_at_index) return self.get_lowest_equivalent_value(value_at_index) return 0
779,367
A faster alternative to query values for a list of percentiles. Args: percentile_list: a list of percentiles in any order, dups will be ignored each element in the list must be a float value in [0.0 .. 100.0] Returns: a dict of percentile values indexed by the percentile
def get_percentile_to_value_dict(self, percentile_list): result = {} total = 0 percentile_list_index = 0 count_at_percentile = 0 # remove dups and sort percentile_list = list(set(percentile_list)) percentile_list.sort() for index in range(self.counts_len): total += self.get_count_at_index(index) while True: # recalculate target based on next requested percentile if not count_at_percentile: if percentile_list_index == len(percentile_list): return result percentile = percentile_list[percentile_list_index] percentile_list_index += 1 if percentile > 100: return result count_at_percentile = self.get_target_count_at_percentile(percentile) if total >= count_at_percentile: value_at_index = self.get_value_from_index(index) if percentile: result[percentile] = self.get_highest_equivalent_value(value_at_index) else: result[percentile] = self.get_lowest_equivalent_value(value_at_index) count_at_percentile = 0 else: break return result
779,368
Called during decoding and add to adjust the new min/max value and total count Args: min_non_zero_index min nonzero index of all added counts (-1 if none) max_index max index of all added counts (-1 if none)
def adjust_internal_tacking_values(self, min_non_zero_index, max_index, total_added): if max_index >= 0: max_value = self.get_highest_equivalent_value(self.get_value_from_index(max_index)) self.max_value = max(self.max_value, max_value) if min_non_zero_index >= 0: min_value = self.get_value_from_index(min_non_zero_index) self.min_value = min(self.min_value, min_value) self.total_count += total_added
779,376
Called during decoding and add to adjust the new min/max value and total count Args: min_non_zero_index min nonzero index of all added counts (-1 if none) max_index max index of all added counts (-1 if none)
def set_internal_tacking_values(self, min_non_zero_index, max_index, total_added): if max_index >= 0: self.max_value = self.get_highest_equivalent_value(self.get_value_from_index(max_index)) if min_non_zero_index >= 0: self.min_value = self.get_value_from_index(min_non_zero_index) self.total_count = total_added
779,377
Log a start time in the log. Params: start_time_msec time (in milliseconds) since the absolute start time (the epoch)
def output_start_time(self, start_time_msec): self.log.write("#[StartTime: %f (seconds since epoch), %s]\n" % (float(start_time_msec) / 1000.0, datetime.fromtimestamp(start_time_msec).iso_format(' ')))
779,424
Constructs a new HistogramLogReader that produces intervals read from the specified file name. Params: input_file_name The name of the file to read from reference_histogram a histogram instance used as a reference to create new instances for all subsequent decoded interval histograms
def __init__(self, input_file_name, reference_histogram): self.start_time_sec = 0.0 self.observed_start_time = False self.base_time_sec = 0.0 self.observed_base_time = False self.input_file = open(input_file_name, "r") self.reference_histogram = reference_histogram
779,425
Perform additional validation not possible merely with JSON schemas. Args: instance: The STIX object to be validated. checks: A sequence of callables which do the checks. Each callable may be written to accept 1 arg, which is the object to check, or 2 args, which are the object and a ValidationOptions instance. options: ValidationOptions instance with settings affecting how validation should be done.
def _iter_errors_custom(instance, checks, options): # Perform validation for v_function in checks: try: result = v_function(instance) except TypeError: result = v_function(instance, options) if isinstance(result, Iterable): for x in result: yield x elif result is not None: yield result # Validate any child STIX objects for field in instance: if type(instance[field]) is list: for obj in instance[field]: if _is_stix_obj(obj): for err in _iter_errors_custom(obj, checks, options): yield err
780,721
Return a list of file paths for JSON files within `directory`. Args: directory: A path to a directory. recursive: If ``True``, this function will descend into all subdirectories. Returns: A list of JSON file paths directly under `directory`.
def list_json_files(directory, recursive=False): json_files = [] for top, dirs, files in os.walk(directory): dirs.sort() # Get paths to each file in `files` paths = (os.path.join(top, f) for f in sorted(files)) # Add all the .json files to our return collection json_files.extend(x for x in paths if is_json(x)) if not recursive: break return json_files
780,722
Return a list of files to validate from `files`. If a member of `files` is a directory, its children with a ``.json`` extension will be added to the return value. Args: files: A list of file paths and/or directory paths. recursive: If ``true``, this will descend into any subdirectories of input directories. Returns: A list of file paths to validate.
def get_json_files(files, recursive=False): json_files = [] if not files: return json_files for fn in files: if os.path.isdir(fn): children = list_json_files(fn, recursive) json_files.extend(children) elif is_json(fn): json_files.append(fn) else: continue if not json_files: raise NoJSONFileFoundError("No JSON files found!") return json_files
780,723
Validate files based on command line options. Args: options: An instance of ``ValidationOptions`` containing options for this validation run.
def run_validation(options): if options.files == sys.stdin: results = validate(options.files, options) return [FileValidationResults(is_valid=results.is_valid, filepath='stdin', object_results=results)] files = get_json_files(options.files, options.recursive) results = [validate_file(fn, options) for fn in files] return results
780,724
Validate the input document `fn` according to the options passed in. If any exceptions are raised during validation, no further validation will take place. Args: fn: The filename of the JSON file to be validated. options: An instance of ``ValidationOptions``. Returns: An instance of FileValidationResults.
def validate_file(fn, options=None): file_results = FileValidationResults(filepath=fn) output.info("Performing JSON schema validation on %s" % fn) if not options: options = ValidationOptions(files=fn) try: with open(fn) as instance_file: file_results.object_results = validate(instance_file, options) except Exception as ex: if 'Expecting value' in str(ex): line_no = str(ex).split()[3] file_results.fatal = ValidationErrorResults( 'Invalid JSON input on line %s' % line_no ) else: file_results.fatal = ValidationErrorResults(ex) msg = ("Unexpected error occurred with file '{fn}'. No further " "validation will be performed: {error}") output.info(msg.format(fn=fn, error=str(ex))) file_results.is_valid = (all(object_result.is_valid for object_result in file_results.object_results) and not file_results.fatal) return file_results
780,727
Validate the input `string` according to the options passed in. If any exceptions are raised during validation, no further validation will take place. Args: string: The string containing the JSON to be validated. options: An instance of ``ValidationOptions``. Returns: An ObjectValidationResults instance, or a list of such.
def validate_string(string, options=None): output.info("Performing JSON schema validation on input string: " + string) stream = io.StringIO(string) return validate(stream, options)
780,728
Create a JSON schema validator for the given schema. Args: schema_path: The filename of the JSON schema. schema: A Python object representation of the same schema. Returns: An instance of Draft4Validator.
def load_validator(schema_path, schema): # Get correct prefix based on OS if os.name == 'nt': file_prefix = 'file:///' else: file_prefix = 'file:' resolver = RefResolver(file_prefix + schema_path.replace("\\", "/"), schema) validator = Draft4Validator(schema, resolver=resolver) return validator
780,729
Load the JSON schema at the given path as a Python object. Args: schema_path: A filename for a JSON schema. Returns: A Python object representation of the schema.
def load_schema(schema_path): try: with open(schema_path) as schema_file: schema = json.load(schema_file) except ValueError as e: raise SchemaInvalidError('Invalid JSON in schema or included schema: ' '%s\n%s' % (schema_file.name, str(e))) return schema
780,731