code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def _configure_env_source_rc(config): config.set('global', 'env_source_rc', False) if system.is_osx(): logger.info("On OSX, login shells are default, which only source sprinter's 'env' configuration.") logger.info("I.E. environment variables would be sourced, but not shell functions " ...
Configures wether to have .env source .rc
def get_members(self): res = self.__con__.search_s( self.__ldap_base_dn__, ldap.SCOPE_SUBTREE, "(memberof=%s)" % self.__dn__, ['uid']) ret = [] for val in res: val = val[1]['uid'][0] try: ...
Return all members in the group as CSHMember objects
def check_member(self, member, dn=False): if dn: res = self.__con__.search_s( self.__dn__, ldap.SCOPE_BASE, "(member=%s)" % dn, ['ipaUniqueID']) else: res = self.__con__.search_s( ...
Check if a Member is in the bound group. Arguments: member -- the CSHMember object (or distinguished name) of the member to check against Keyword arguments: dn -- whether or not member is a distinguished name
def add_member(self, member, dn=False): if dn: if self.check_member(member, dn=True): return mod = (ldap.MOD_ADD, 'member', member.encode('ascii')) else: if self.check_member(member): return mod = (ldap.MOD_ADD, 'm...
Add a member to the bound group Arguments: member -- the CSHMember object (or distinguished name) of the member Keyword arguments: dn -- whether or not member is a distinguished name
def read_object_from_yaml(desired_type: Type[Any], file_object: TextIOBase, logger: Logger, fix_imports: bool = True, errors: str = 'strict', *args, **kwargs) -> Any: return yaml.load(file_object)
Parses a yaml file. :param desired_type: :param file_object: :param logger: :param fix_imports: :param errors: :param args: :param kwargs: :return:
def read_collection_from_yaml(desired_type: Type[Any], file_object: TextIOBase, logger: Logger, conversion_finder: ConversionFinder, fix_imports: bool = True, errors: str = 'strict', **kwargs) -> Any: res = yaml.load(file_object) # convert if req...
Parses a collection from a yaml file. :param desired_type: :param file_object: :param logger: :param fix_imports: :param errors: :param args: :param kwargs: :return:
def get_default_yaml_parsers(parser_finder: ParserFinder, conversion_finder: ConversionFinder) -> List[AnyParser]: return [# yaml for any object SingleFileParserFunction(parser_function=read_object_from_yaml, streaming_mode=True, ...
Utility method to return the default parsers able to parse an object from a file. Note that MultifileObjectParser is not provided in this list, as it is already added in a hardcoded way in RootParser :return:
def pass_feature(*feature_names): def decorator(f): @functools.wraps(f) def wrapper(*args, **kwargs): for name in feature_names: kwargs[name] = feature_proxy(name) return f(*args, **kwargs) return wrapper return decorator
Injects a feature instance into the kwargs
def extract_tar(url, target_dir, additional_compression="", remove_common_prefix=False, overwrite=False): try: if not os.path.exists(target_dir): os.makedirs(target_dir) tf = tarfile.TarFile.open(fileobj=download_to_bytesio(url)) if not os.path.exists(target_dir): ...
extract a targz and install to the target directory
def remove_path(target_path): if os.path.isdir(target_path): shutil.rmtree(target_path) else: os.unlink(target_path)
Delete the target path
def ids(cls, values, itype=None): ''' http://www.elasticsearch.org/guide/reference/query-dsl/ids-filter.html Filters documents that only have the provided ids. Note, this filter does not require the _id field to be indexed since it works using the _uid field. ''' instance = cls(ids={'va...
http://www.elasticsearch.org/guide/reference/query-dsl/ids-filter.html Filters documents that only have the provided ids. Note, this filter does not require the _id field to be indexed since it works using the _uid field.
def geo_distance(cls, field, center, distance, distance_type=None): ''' http://www.elasticsearch.org/guide/reference/query-dsl/geo-distance-filter.html Filters documents that include only hits that exists within a specific distance from a geo point. field - Field name center - Ce...
http://www.elasticsearch.org/guide/reference/query-dsl/geo-distance-filter.html Filters documents that include only hits that exists within a specific distance from a geo point. field - Field name center - Center point (Geo point) distance - String for the distance distance_type ...
def geo_distance_range(cls, field, center, from_distance, to_distance, distance_type=None): ''' http://www.elasticsearch.org/guide/reference/query-dsl/geo-distance-range-filter.html Filters documents that exists within a range from a specific point ''' instance = cls(geo_distan...
http://www.elasticsearch.org/guide/reference/query-dsl/geo-distance-range-filter.html Filters documents that exists within a range from a specific point
def numeric_range(cls, field, from_value, to_value, include_lower=None, include_upper=None): ''' http://www.elasticsearch.org/guide/reference/query-dsl/numeric-range-filter.html Filters documents with fields that have values within a certain numeric range. Similar to range filter, except that it...
http://www.elasticsearch.org/guide/reference/query-dsl/numeric-range-filter.html Filters documents with fields that have values within a certain numeric range. Similar to range filter, except that it works only with numeric values, and the filter execution works differently.
def range(cls, field, from_value=None, to_value=None, include_lower=None, include_upper=None): ''' http://www.elasticsearch.org/guide/reference/query-dsl/range-filter.html Filters documents with fields that have terms within a certain range. Similar to range query, except that it acts as a filt...
http://www.elasticsearch.org/guide/reference/query-dsl/range-filter.html Filters documents with fields that have terms within a certain range. Similar to range query, except that it acts as a filter. Can be placed within queries that accept a filter.
def save(self, obj, id_code): filestream = open('{0}/{1}'.format(self.data_path, id_code), 'w+') pickle.dump(obj, filestream) filestream.close()
Save an object, and use id_code in the filename obj - any object id_code - unique identifier
def load(self, id_code): filestream = open('{0}/{1}'.format(self.data_path, id_code), 'rb') workflow = pickle.load(filestream) return workflow
Loads a workflow identified by id_code id_code - unique identifier, previously must have called save with same id_code
def init(self): if os.path.isdir(self.path): raise InvalidTodoFile if os.path.exists(self.path): with open(self.path, 'r') as f: tls = [tl.strip() for tl in f if tl] todos = map(_todo_from_file, tls) self.todos = todos ...
init `todo` file if file exists, then initialization self.todos and record current max index of todos : when add a new todo, the `idx` via only `self.current_max_idx + 1`
def _show(self, status=None, idx=None): _show('', 50) if not self.todos: self._show_no_todos() elif idx is not None: for todo in self.todos: if todo['idx'] == idx: self._show_todos(todo) elif status is not None: ...
show todos after format :param status: what status's todos wants to show. default is None, means show all
def write(self, delete_if_empty=False): with open(self.path, 'w') as f: if not self.todos: f.flush() else: for todo in _todo_to_file(self.todos): f.write(todo)
flush todos to file :param delete_if_empty: delete if todo is empty
def read_object_from_pickle(desired_type: Type[T], file_path: str, encoding: str, fix_imports: bool = True, errors: str = 'strict', *args, **kwargs) -> Any: import pickle file_object = open(file_path, mode='rb') try: return pickle.load(file_object, fix_imports=fix_im...
Parses a pickle file. :param desired_type: :param file_path: :param encoding: :param fix_imports: :param errors: :param args: :param kwargs: :return:
def should_display_warnings_for(to_type): if not hasattr(to_type, '__module__'): return True elif to_type.__module__ in {'builtins'} or to_type.__module__.startswith('parsyfiles') \ or to_type.__name__ in {'DataFrame'}: return False elif issubclass(to_type, int) or issubclas...
Central method where we control whether warnings should be displayed
def _is_valid_for_dict_to_object_conversion(strict_mode: bool, from_type: Type, to_type: Type) -> bool: # cache previous results try: res, subclasses_hash = _cache_valid_for_dict_to_object[to_type][strict_mode] # Check if are any new subclasses are available if not strict_mode and t...
Returns true if the provided types are valid for dict_to_object conversion Explicitly declare that we are not able to parse collections nor able to create an object from a dictionary if the object's constructor is non correctly PEP484-specified. None should be treated as a Joker here (but we know that nev...
def dict_to_object(desired_type: Type[T], contents_dict: Dict[str, Any], logger: Logger, options: Dict[str, Dict[str, Any]], conversion_finder: ConversionFinder = None, is_dict_of_dicts: bool = False) -> T: check_var(desired_type, var_types=type, var_name='obj_type') c...
Utility method to create an object from a dictionary of constructor arguments. Constructor arguments that dont have the correct type are intelligently converted if possible :param desired_type: :param contents_dict: :param logger: :param options: :param conversion_finder: :param is_dict_of_...
def print_dict(dict_name, dict_value, logger: Logger = None): if logger is None: print(dict_name + ' = ') try: from pprint import pprint pprint(dict_value) except: print(dict_value) else: logger.info(dict_name + ' = ') try: ...
Utility method to print a named dictionary :param dict_name: :param dict_value: :return:
def get_default_object_parsers(parser_finder: ParserFinder, conversion_finder: ConversionFinder) -> List[AnyParser]: return [SingleFileParserFunction(parser_function=read_object_from_pickle, streaming_mode=False, supported_exts={'.pyc'},...
Utility method to return the default parsers able to parse an object from a file. Note that MultifileObjectParser is not provided in this list, as it is already added in a hardcoded way in RootParser :return:
def get_default_object_converters(conversion_finder: ConversionFinder) \ -> List[Union[Converter[Any, Type[None]], Converter[Type[None], Any]]]: return [ ConverterFunction(from_type=b64str, to_type=AnyObject, conversion_method=base64_ascii_str_pickle_to_object), ConverterFuncti...
Utility method to return the default converters associated to dict (from dict to other type, and from other type to dict) :return:
def create(obj: PersistedObject, obj_type: Type[Any], arg_name: str): return MissingMandatoryAttributeFiles('Multifile object ' + str(obj) + ' cannot be built from constructor of ' 'type ' + get_pretty_type_str(obj_type) + ...
Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests https://github.com/nose-devs/nose/issues/725 :param obj: :param obj_type: :param arg_name: :return:
def create(item_type: Type[Any], constructor_atts: List[str], invalid_property_name: str): return InvalidAttributeNameForConstructorError('Cannot parse object of type <' + get_pretty_type_str(item_type) + '> using the provided configuration file: c...
Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests https://github.com/nose-devs/nose/issues/725 :param item_type: :return:
def create(item_type: Type[Any], constructor_args: Dict[str, Any], cause: Exception): return ObjectInstantiationException('Error while building object of type <' + get_pretty_type_str(item_type) + '> using its constructor and parsed contents : ' + str(constru...
Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests https://github.com/nose-devs/nose/issues/725 :param item_type: :return:
def create(desired_type: Type[Any], contents_dict: Dict, caught: Exception): msg = 'Error while trying to instantiate object of type ' + str(desired_type) + ' using dictionary input_dict:'\ + 'Caught error message is : ' + caught.__class__.__name__ + ' : ' + str(caught) + '\n' try...
Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests https://github.com/nose-devs/nose/issues/725 :param desired_type: :param contents_dict: :param caught: :return:
def is_able_to_parse_detailed(self, desired_type: Type[Any], desired_ext: str, strict: bool): if not _is_valid_for_dict_to_object_conversion(strict, None, None if desired_type is JOKER else desired_type): return False, None else: return super(MultifileObjectParser, self...
Explicitly declare that we are not able to parse collections :param desired_type: :param desired_ext: :param strict: :return:
def _get_parsing_plan_for_multifile_children(self, obj_on_fs: PersistedObject, desired_type: Type[Any], logger: Logger) -> Dict[str, Any]: if is_collection(desired_type, strict=True): # if the destination type is 'strictly a collection' (not...
Simply inspects the required type to find the names and types of its constructor arguments. Then relies on the inner ParserFinder to parse each of them. :param obj_on_fs: :param desired_type: :param logger: :return:
def parsyfiles_global_config(multiple_errors_tb_limit: int = None, full_paths_in_logs: bool = None, dict_to_object_subclass_limit: int = None): if multiple_errors_tb_limit is not None: GLOBAL_CONFIG.multiple_errors_tb_limit = multiple_errors_tb_limit if full_paths_in_l...
This is the method you should use to configure the parsyfiles library :param multiple_errors_tb_limit: the traceback size (default is 3) of individual parsers exceptions displayed when parsyfiles tries several parsing chains and all of them fail. :param full_paths_in_logs: if True, full file paths will be ...
def is_valid(self, context): if self.requires: for r in self.requires: if not r in context.executed_actions: raise RequirementMissingError("Action '%s' requires '%s'" % (self.name, r)) return True
Checks through the previous_actions iterable if required actions have been executed
def get_file_contents(file_path): full_path = os.path.join(package_dir, file_path) return open(full_path, 'r').read()
Get the context of the file using full path name
def refresh(self): # new_device = {} if self.type in CONST.BINARY_SENSOR_TYPES: response = self._lupusec.get_sensors() for device in response: if device['device_id'] == self._device_id: self.update(device) return device ...
Refresh a device
def update(self, json_state): if self._type in CONST.BINARY_SENSOR_TYPES: self._json_state['status'] = json_state['status'] else: self._json_state.update( {k: json_state[k] for k in json_state if self._json_state.get(k)})
Update the json data from a dictionary. Only updates if it already exists in the device.
def desc(self): return '{0} (ID: {1}) - {2} - {3}'.format( self.name, self.device_id, self.type, self.status)
Get a short description of the device.
def list(declared, undeclared): queues = current_queues.queues.values() if declared: queues = filter(lambda queue: queue.exists, queues) elif undeclared: queues = filter(lambda queue: not queue.exists, queues) queue_names = [queue.routing_key for queue in queues] queue_names.sor...
List configured queues.
def declare(queues): current_queues.declare(queues=queues) click.secho( 'Queues {} have been declared.'.format( queues or current_queues.queues.keys()), fg='green' )
Initialize the given queues.
def purge_queues(queues=None): current_queues.purge(queues=queues) click.secho( 'Queues {} have been purged.'.format( queues or current_queues.queues.keys()), fg='green' )
Purge the given queues.
def delete_queue(queues): current_queues.delete(queues=queues) click.secho( 'Queues {} have been deleted.'.format( queues or current_queues.queues.keys()), fg='green' )
Delete the given queues.
def find_needed_formatter(input_format, output_format): #Only take the formatters in the registry selected_registry = [re.cls for re in registry if re.category==RegistryCategories.formatters] needed_formatters = [] for formatter in selected_registry: #Initialize the formatter (needed so it ...
Find a data formatter given an input and output format input_format - needed input format. see utils.input.dataformats output_format - needed output format. see utils.input.dataformats
def find_needed_input(input_format): needed_inputs = [re.cls for re in registry if re.category==RegistryCategories.inputs and re.cls.input_format == input_format] if len(needed_inputs)>0: return needed_inputs[0] return None
Find a needed input class input_format - needed input format, see utils.input.dataformats
def exists_in_registry(category, namespace, name): selected_registry = [re for re in registry if re.category==category and re.namespace==namespace and re.name == name] if len(selected_registry)>0: return True return False
See if a given category, namespace, name combination exists in the registry category - See registrycategories. Type of module namespace - Namespace of the module, defined in settings name - the lowercase name of the module
def register(cls): registry_entry = RegistryEntry(category = cls.category, namespace = cls.namespace, name = cls.name, cls=cls) if registry_entry not in registry and not exists_in_registry(cls.category, cls.namespace, cls.name): registry.append(registry_entry) else: log.warn("Class {0} ...
Register a given model in the registry
def _set_fields(self): self.fields = [] self.required_input = [] for member_name, member_object in inspect.getmembers(self.__class__): if inspect.isdatadescriptor(member_object) and not member_name.startswith("__"): self.fields.append(member_name) ...
Initialize the fields for data caching.
def subscriber(address,topics,callback,message_type): return Subscriber(address,topics,callback,message_type)
Creates a subscriber binding to the given address and subscribe the given topics. The callback is invoked for every message received. Args: - address: the address to bind the PUB socket to. - topics: the topics to subscribe - callback: the callback to invoke for eve...
def start(self): t=threading.Thread(target=self._consume) t.start()
Start a thread that consumes the messages and invokes the callback
def _get_forecast(api_result: dict) -> List[SmhiForecast]: forecasts = [] # Need the ordered dict to get # the days in order in next stage forecasts_ordered = OrderedDict() forecasts_ordered = _get_all_forecast_from_api(api_result) # Used to calc the daycount day_nr = 1 ...
Converts results fråm API to SmhiForeCast list
def get_forecast_api(self, longitude: str, latitude: str) -> {}: api_url = APIURL_TEMPLATE.format(longitude, latitude) response = urlopen(api_url) data = response.read().decode('utf-8') json_data = json.loads(data) return json_data
gets data from API
async def async_get_forecast_api(self, longitude: str, latitude: str) -> {}: api_url = APIURL_TEMPLATE.format(longitude, latitude) if self.session is None: self.session = aiohttp.ClientSession() async with self.session.get(api_u...
gets data from API asyncronious
def get_forecast(self) -> List[SmhiForecast]: json_data = self._api.get_forecast_api(self._longitude, self._latitude) return _get_forecast(json_data)
Returns a list of forecasts. The first in list are the current one
async def async_get_forecast(self) -> List[SmhiForecast]: json_data = await self._api.async_get_forecast_api(self._longitude, self._latitude) return _get_forecast(json_data)
Returns a list of forecasts. The first in list are the current one
def _make_decorator(measuring_func): def _decorator(name = None, metric = call_default): def wrapper(func): name_ = name if name is not None else func.__module__ + '.' +func.__name__ class instrument_decorator(object): # must be a class for descriptor magic to work ...
morass of closures for making decorators/descriptors
def all(iterable = None, *, name = None, metric = call_default): if iterable is None: return _iter_decorator(name, metric) else: return _do_all(iterable, name, metric)
Measure total time and item count for consuming an iterable :arg iterable: any iterable :arg function metric: f(name, count, total_time) :arg str name: name for the metric
def each(iterable = None, *, name = None, metric = call_default): if iterable is None: return _each_decorator(name, metric) else: return _do_each(iterable, name, metric)
Measure time elapsed to produce each item of an iterable :arg iterable: any iterable :arg function metric: f(name, 1, time) :arg str name: name for the metric
def first(iterable = None, *, name = None, metric = call_default): if iterable is None: return _first_decorator(name, metric) else: return _do_first(iterable, name, metric)
Measure time elapsed to produce first item of an iterable :arg iterable: any iterable :arg function metric: f(name, 1, time) :arg str name: name for the metric
def _iterable_to_varargs_method(func): def wrapped(self, *args, **kwargs): return func(self, args, **kwargs) return wrapped
decorator to convert a method taking a iterable to a *args one
def _varargs_to_iterable_method(func): def wrapped(self, iterable, **kwargs): return func(self, *iterable, **kwargs) return wrapped
decorator to convert a *args method to one taking a iterable
def producer(*, name = None, metric = call_default): def wrapper(func): def instrumenter(name_, *args, **kwargs): t = time.time() try: ret = func(*args, **kwargs) except Exception: # record a metric for other exceptions, than raise ...
Decorator to measure a function that produces many items. The function should return an object that supports ``__len__`` (ie, a list). If the function returns an iterator, use :func:`all` instead. :arg function metric: f(name, count, total_time) :arg str name: name for the metric
def block(*, name = None, metric = call_default, count = 1): t = time.time() try: yield finally: metric(name, count, time.time() - t)
Context manager to measure execution time of a block :arg function metric: f(name, 1, time) :arg str name: name for the metric :arg int count: user-supplied number of items, defaults to 1
def import_from_string(import_string): import_split = import_string.split(".") import_class = import_split[-1] module_path = ".".join(import_split[:-1]) mod = __import__(module_path, fromlist=[import_class]) klass = getattr(mod, import_class) return klass
Import a class from a string import_string - string path to module to import using dot notation (foo.bar)
def send(self,message,message_type,topic=''): if message_type == RAW: self._sock.send(message) elif message_type == PYOBJ: self._sock.send_pyobj(message) elif message_type == JSON: self._sock.send_json(message) elif message_type == MULTIPART: ...
Send the message on the socket. Args: - message: the message to publish - message_type: the type of message being sent - topic: the topic on which to send the message. Defaults to ''.
def receive(self,message_type): topic = None message = None if message_type == RAW: message = self._sock.recv(flags=zmq.NOBLOCK) elif message_type == PYOBJ: message = self._sock.recv_pyobj(flags=zmq.NOBLOCK) elif message_type == JSON: ...
Receive the message of the specified type and retun Args: - message_type: the type of the message to receive Returns: - the topic of the message - the message received from the socket
def __get_subscript(self, name, ctx=None): assert isinstance(name, string_types), name return ast.Subscript( value=ast.Name(id=self.data_var, ctx=ast.Load()), slice=ast.Index(value=ast.Str(s=name)), ctx=ctx)
Returns `<data_var>["<name>"]`
def __get_subscript_assign(self, name): return ast.Assign( targets=[self.__get_subscript(name, ast.Store())], value=ast.Name(id=name, ctx=ast.Load()))
Returns `<data_var>["<name>"] = <name>`.
def __get_subscript_delete(self, name): return ast.Delete(targets=[self.__get_subscript(name, ast.Del())])
Returns `del <data_var>["<name>"]`.
def __visit_target(self, node): if isinstance(node, ast.Name) and isinstance(node.ctx, ast.Store): self.__add_variable(node.id) elif isinstance(node, (ast.Tuple, ast.List)): [self.__visit_target(x) for x in node.elts]
Call this method to visit assignment targets and to add local variables to the current stack frame. Used in #visit_Assign() and #__visit_comprehension().
def __get_package_manager(self): package_manager = "" args = "" sudo_required = True if system.is_osx(): package_manager = "brew" sudo_required = False args = " install" elif system.is_debian(): package_manager = "apt-get" ...
Installs and verifies package manager
def eval_expr(expr, context): if isinstance(expr, list): rv = [] for item in expr: rv.append(eval_expr(item, context)) return rv if isinstance(expr, dict): rv = {} for k, v in expr.iteritems(): rv[k] = eval_expr(v, context) kwargs = rv...
Recursively evaluates a compiled expression using the specified context. Dict instances can contain a "__kwargs" key which will be used to update the dict with its content
def can_convert(strict: bool, from_type: Type[S], to_type: Type[T]): if (to_type is not None) and (to_type not in (all_primitive_types + all_np_primitive_types)): return False else: return True
None should be treated as a Joker here (but we know that never from_type and to_type will be None at the same time) :param strict: :param from_type: :param to_type: :return:
def parse(self, data, doctype): ''' Parse an input string, and return an AST doctype must have WCADocument as a baseclass ''' self.doctype = doctype self.lexer.lineno = 0 del self.errors[:] del self.warnings[:] self.lexer.lexerror = False a...
Parse an input string, and return an AST doctype must have WCADocument as a baseclass
def _act_on_list(self, lhs): ''' Act on the following rule : items : items item | item ''' lhs[0] = [] if len(lhs) == 3: lhs[0] = lhs[1] # lhs[len(lhs)-1] may be different from lhs[-1] # Yacc use some internal method to ge...
Act on the following rule : items : items item | item
def p_content(self, content): '''content : TITLE opttexts VERSION opttexts sections | TITLE STATESTAG VERSION opttexts states_sections''' content[0] = self.doctype(content[1], content[3], content[4], content[5]) if self.toc: self.toc.set_articles([a for a in conten...
content : TITLE opttexts VERSION opttexts sections | TITLE STATESTAG VERSION opttexts states_sections
def p_text(self, text): '''text : TEXT PARBREAK | TEXT | PARBREAK''' item = text[1] text[0] = item if item[0] != "\n" else u"" if len(text) > 2: text[0] += "\nf p_text(self, text): '''text : TEXT PARBREAK | TEXT ...
text : TEXT PARBREAK | TEXT | PARBREAK
def p_toc(self, toc): '''toc : HEADERSEC opttexts TOC opttexts''' toc[0] = TableOfContent(toc[1], toc[2], []) self.toc = toc[0f p_toc(self, toc): '''toc : HEADERSEC opttexts TOC opttexts''' toc[0] = TableOfContent(toc[1], toc[2], []) self.toc = toc[0]
toc : HEADERSEC opttexts TOC opttexts
def p_article(self, article): '''article : ARTICLEHEADER opttexts rules opttexts''' article[0] = Article(article[1][4], article[2], article[3], article[1][0], article[1][1], article[1][2], article[1][3], article[1][5]f p_article(self, article): '''article : ARTICLEHE...
article : ARTICLEHEADER opttexts rules opttexts
def p_regularsec(self, regularsec): '''regularsec : HEADERSEC opttexts optsubsections''' texts = [] sections = regularsec[2] if len(regularsec) > 3: texts = regularsec[2] sections = regularsec[3] regularsec[0] = Section(regularsec[1], texts, sectionsf p_re...
regularsec : HEADERSEC opttexts optsubsections
def p_subsection(self, subsection): '''subsection : HEADERSUBSEC texts | HEADERSUBSEC texts labeldecls opttexts''' content = subsection[3] if len(subsection) > 3 else [] subsection[0] = Subsection(subsection[1], subsection[2], contentf p_subsection(self, subsection): ...
subsection : HEADERSUBSEC texts | HEADERSUBSEC texts labeldecls opttexts
def p_state(self, state): '''state : STATE opttexts''' state[0] = State(state[1][0], state[1][1], state[1][2], state[1][3], state[2]f p_state(self, state): '''state : STATE opttexts''' state[0] = State(state[1][0], state[1][1], state[1][2], state[1][3], state[2])
state : STATE opttexts
def p_error(self, elem): '''Handle syntax error''' self.errors.append("Syntax error on line " + str(self.lexer.lineno) + ". Got unexpected token " + elem.typef p_error(self, elem): '''Handle syntax error''' self.errors.append("Syntax error on line " + str(self....
Handle syntax error
def set_progress_brackets(self, start, end): self.sep_start = start self.sep_end = end
Set brackets to set around a progress bar.
def add_progress(self, count, symbol='#', color=None, on_color=None, attrs=None): chunk = _ProgressChunk(count, symbol, color, on_color, attrs) self._progress_chunks.append(chunk)
Add a section of progress to the progressbar. The progress is captured by "count" and displayed as a fraction of the statusbar width proportional to this count over the total progress displayed. The progress will be displayed using the "symbol" character and the foreground and backgroun...
def format_progress(self, width): chunk_widths = self._get_chunk_sizes(width) progress_chunks = [chunk.format_chunk(chunk_width) for (chunk, chunk_width) in zip(self._progress_chunks, chunk_widths)] return "{sep_start}{progress}{sep_...
Create the formatted string that displays the progress.
def summary_width(self): chunk_counts = [chunk.count for chunk in self._progress_chunks] numbers_width = sum(max(1, ceil(log10(count + 1))) for count in chunk_counts) separators_with = len(chunk_counts) - 1 return numbers_width + separators_with
Calculate how long a string is needed to show a summary string. This is not simply the length of the formatted summary string since that string might contain ANSI codes.
def format_summary(self): chunks = [chunk.format_chunk_summary() for chunk in self._progress_chunks] return "/".join(chunks)
Generate a summary string for the progress bar.
def add_progress(self, count, symbol='#', color=None, on_color=None, attrs=None): self._progress.add_progress(count, symbol, color, on_color, attrs)
Add a section of progress to the progressbar. The progress is captured by "count" and displayed as a fraction of the statusbar width proportional to this count over the total progress displayed. The progress will be displayed using the "symbol" character and the foreground and backgroun...
def format_status(self, width=None, label_width=None, progress_width=None, summary_width=None): if width is None: # pragma: no cover width = shutil.get_terminal_size()[0] if label_width is None: label_wi...
Generate the formatted status bar string.
def add_status_line(self, label): status_line = StatusBar(label, self._sep_start, self._sep_end, self._fill_char) self._lines.append(status_line) return status_line
Add a status bar line to the table. This function returns the status bar and it can be modified from this return value.
def calculate_field_widths(self, width=None, min_label_width=10, min_progress_width=10): if width is None: # pragma: no cover width = shutil.get_terminal_size()[0] summary_width = self.summary_width() label_widt...
Calculate how wide each field should be so we can align them. We always find room for the summaries since these are short and packed with information. If possible, we will also find room for labels, but if this would make the progress bar width shorter than the specified minium then we ...
def format_table(self, width=None, min_label_width=10, min_progress_width=10): # handle the special case of an empty table. if len(self._lines) == 0: return [] if width is None: # pragma: no cover width = shutil.get_terminal_size()[0] ...
Format the entire table of progress bars. The function first computes the widths of the fields so they can be aligned across lines and then returns formatted lines as a list of strings.
def create_log_dict(request, response): remote_addr = request.META.get('REMOTE_ADDR') if remote_addr in getattr(settings, 'INTERNAL_IPS', []): remote_addr = request.META.get( 'HTTP_X_FORWARDED_FOR') or remote_addr user_email = "-" if hasattr(request, 'user'): user_email...
Create a dictionary with logging data.
def create_log_message(log_dict, use_sql_info=False, fmt=True): log_msg = ( "%(remote_address)s %(user_email)s %(method)s %(url)s %(status)d " "%(content_length)d (%(request_time).2f seconds)" ) if use_sql_info: sql_time = sum( float(q['time']) for q in connection.qu...
Create the logging message string.
def process_response(self, request, response): try: log_dict = create_log_dict(request, response) # add the request time to the log_dict; if no start time is # available, use -1 as NA value request_time = ( time.time() - self.start_time i...
Create the logging message..
def synchronized(obj): if hasattr(obj, 'synchronizable_condition'): return obj.synchronizable_condition elif callable(obj): @functools.wraps(obj) def wrapper(self, *args, **kwargs): with self.synchronizable_condition: return obj(self, *args, **kwargs) return wrapper else: rai...
This function has two purposes: 1. Decorate a function that automatically synchronizes access to the object passed as the first argument (usually `self`, for member methods) 2. Synchronize access to the object, used in a `with`-statement. Note that you can use #wait(), #notify() and #notify_all() only on ...
def wait(obj, timeout=None): if timeout is None: return obj.synchronizable_condition.wait() else: return obj.synchronizable_condition.wait(timeout)
Wait until *obj* gets notified with #notify() or #notify_all(). If a timeout is specified, the function can return without the object being notified if the time runs out. Note that you can only use this function on #synchronized() objects. # Arguments obj (Synchronizable): An object that can be synchronized...
def wait_for_condition(obj, cond, timeout=None): with synchronized(obj): if timeout is None: while not cond(obj): wait(obj) else: t_start = time.time() while not cond(obj): t_delta = time.time() - t_start if t_delta >= timeout: return False wait(...
This is an extended version of #wait() that applies the function *cond* to check for a condition to break free from waiting on *obj*. Note that *obj* must be notified when its state changes in order to check the condition. Note that access to *obj* is synchronized when *cond* is called. # Arguments obj (Sync...
def as_completed(jobs): ''' Generator function that yields the jobs in order of their completion. Attaches a new listener to each job. ''' jobs = tuple(jobs) event = threading.Event() callback = lambda f, ev: event.set() [job.add_listener(Job.SUCCESS, callback, once=True) for job in jobs] [job.add_listen...
Generator function that yields the jobs in order of their completion. Attaches a new listener to each job.