docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Add value to a set in a dictionary by key Args: dictionary (DictUpperBound): Dictionary to which to add values key (Any): Key within dictionary value (Any): Value to add to set in dictionary Returns: None
def dict_of_sets_add(dictionary, key, value): # type: (DictUpperBound, Any, Any) -> None set_objs = dictionary.get(key, set()) set_objs.add(value) dictionary[key] = set_objs
741,108
Distribute the contents of a list eg. [1, 1, 1, 2, 2, 3] -> [1, 2, 3, 1, 2, 1]. List can contain complex types like dictionaries in which case the function can return the appropriate value eg. lambda x: x[KEY] Args: input_list (List): List to distribute values function (Callable[[Any], Any]): Return value to use for distributing. Defaults to lambda x: x. Returns: List: Distributed list
def list_distribute_contents_simple(input_list, function=lambda x: x): # type: (List, Callable[[Any], Any]) -> List dictionary = dict() for obj in input_list: dict_of_lists_add(dictionary, function(obj), obj) output_list = list() i = 0 done = False while not done: found = False for key in sorted(dictionary): if i < len(dictionary[key]): output_list.append(dictionary[key][i]) found = True if found: i += 1 else: done = True return output_list
741,109
Distribute the contents of a list eg. [1, 1, 1, 2, 2, 3] -> [1, 2, 1, 2, 1, 3]. List can contain complex types like dictionaries in which case the function can return the appropriate value eg. lambda x: x[KEY] Args: input_list (List): List to distribute values function (Callable[[Any], Any]): Return value to use for distributing. Defaults to lambda x: x. Returns: List: Distributed list
def list_distribute_contents(input_list, function=lambda x: x): # type: (List, Callable[[Any], Any]) -> List def riffle_shuffle(piles_list): def grouper(n, iterable, fillvalue=None): args = [iter(iterable)] * n return zip_longest(fillvalue=fillvalue, *args) if not piles_list: return [] piles_list.sort(key=len, reverse=True) width = len(piles_list[0]) pile_iters_list = [iter(pile) for pile in piles_list] pile_sizes_list = [[pile_position] * len(pile) for pile_position, pile in enumerate(piles_list)] grouped_rows = grouper(width, itertools.chain.from_iterable(pile_sizes_list)) grouped_columns = zip_longest(*grouped_rows) shuffled_pile = [next(pile_iters_list[position]) for position in itertools.chain.from_iterable(grouped_columns) if position is not None] return shuffled_pile dictionary = dict() for obj in input_list: dict_of_lists_add(dictionary, function(obj), obj) intermediate_list = list() for key in sorted(dictionary): intermediate_list.append(dictionary[key]) return riffle_shuffle(intermediate_list)
741,110
Extract a list by looking up key in each member of a list of dictionaries Args: list_of_dict (List[DictUpperBound]): List of dictionaries key (Any): Key to find in each dictionary Returns: List: List containing values returned from each dictionary
def extract_list_from_list_of_dict(list_of_dict, key): # type: (List[DictUpperBound], Any) -> List result = list() for dictionary in list_of_dict: result.append(dictionary[key]) return result
741,111
Convert keys of dictionary to integers Args: dictin (DictUpperBound): Input dictionary dropfailedkeys (bool): Whether to drop dictionary entries where key conversion fails. Defaults to False. Returns: Dict: Dictionary with keys converted to integers
def integer_key_convert(dictin, dropfailedkeys=False): # type: (DictUpperBound, bool) -> Dict return key_value_convert(dictin, keyfn=int, dropfailedkeys=dropfailedkeys)
741,113
Convert values of dictionary to integers Args: dictin (DictUpperBound): Input dictionary dropfailedvalues (bool): Whether to drop dictionary entries where key conversion fails. Defaults to False. Returns: Dict: Dictionary with values converted to integers
def integer_value_convert(dictin, dropfailedvalues=False): # type: (DictUpperBound, bool) -> Dict return key_value_convert(dictin, valuefn=int, dropfailedvalues=dropfailedvalues)
741,114
Convert values of dictionary to floats Args: dictin (DictUpperBound): Input dictionary dropfailedvalues (bool): Whether to drop dictionary entries where key conversion fails. Defaults to False. Returns: Dict: Dictionary with values converted to floats
def float_value_convert(dictin, dropfailedvalues=False): # type: (DictUpperBound, bool) -> Dict return key_value_convert(dictin, valuefn=float, dropfailedvalues=dropfailedvalues)
741,115
Create a new dictionary from two dictionaries by averaging values Args: dictin1 (DictUpperBound): First input dictionary dictin2 (DictUpperBound): Second input dictionary dropmissing (bool): Whether to drop keys missing in one dictionary. Defaults to True. Returns: Dict: Dictionary with values being average of 2 input dictionaries
def avg_dicts(dictin1, dictin2, dropmissing=True): # type: (DictUpperBound, DictUpperBound, bool) -> Dict dictout = dict() for key in dictin1: if key in dictin2: dictout[key] = (dictin1[key] + dictin2[key]) / 2 elif not dropmissing: dictout[key] = dictin1[key] if not dropmissing: for key in dictin2: if key not in dictin1: dictout[key] = dictin2[key] return dictout
741,116
Convert command line arguments in a comma separated string to a dictionary Args: args (str): Command line arguments Returns: DictUpperBound[str,str]: Dictionary of arguments
def args_to_dict(args): # type: (str) -> DictUpperBound[str,str] arguments = dict() for arg in args.split(','): key, value = arg.split('=') arguments[key] = value return arguments
741,119
Returns the delta between two files using -, ?, + format excluding lines that are the same Args: path1 (str): Path to first file path2 (str): Path to second file Returns: List[str]: Delta between the two files
def compare_files(path1, path2): # type: (str, str) -> List[str] diff = difflib.ndiff(open(path1).readlines(), open(path2).readlines()) return [x for x in diff if x[0] in ['-', '+', '?']]
741,150
Asserts that two files are the same and returns delta using -, ?, + format if not Args: path1 (str): Path to first file path2 (str): Path to second file Returns: None
def assert_files_same(path1, path2): # type: (str, str) -> None difflines = compare_files(path1, path2) assert len(difflines) == 0, ''.join(['\n'] + difflines)
741,151
Overwrite keyword arguments with environment variables Args: **kwargs: See below user_agent (str): User agent string. Returns: kwargs: Changed keyword arguments
def _environment_variables(**kwargs): # type: (Any) -> Any user_agent = os.getenv('USER_AGENT') if user_agent is not None: kwargs['user_agent'] = user_agent preprefix = os.getenv('PREPREFIX') if preprefix is not None: kwargs['preprefix'] = preprefix return kwargs
741,214
Construct user agent Args: configdict (str): Additional configuration for user agent prefix (str): Text to put at start of user agent ua (str): Custom user agent text Returns: str: Full user agent string
def _construct(configdict, prefix, ua): # type: (Dict, str, str) -> str if not ua: raise UserAgentError("User_agent parameter missing. It can be your project's name for example.") preprefix = configdict.get('preprefix') if preprefix: user_agent = '%s:' % preprefix else: user_agent = '' if prefix: user_agent = '%s%s-' % (user_agent, prefix) user_agent = '%s%s' % (user_agent, ua) return user_agent
741,215
Load user agent YAML file Args: prefix (str): Text to put at start of user agent user_agent_config_yaml (str): Path to user agent YAML file user_agent_lookup (Optional[str]): Lookup key for YAML. Ignored if user_agent supplied. Returns: str: user agent
def _load(cls, prefix, user_agent_config_yaml, user_agent_lookup=None): # type: (str, str, Optional[str]) -> str if not user_agent_config_yaml: user_agent_config_yaml = cls.default_user_agent_config_yaml logger.info( 'No user agent or user agent config file given. Using default user agent config file: %s.' % user_agent_config_yaml) if not isfile(user_agent_config_yaml): raise UserAgentError( "User_agent should be supplied in a YAML config file. It can be your project's name for example.") logger.info('Loading user agent config from: %s' % user_agent_config_yaml) user_agent_config_dict = load_yaml(user_agent_config_yaml) if user_agent_lookup: user_agent_config_dict = user_agent_config_dict.get(user_agent_lookup) if not user_agent_config_dict: raise UserAgentError("No user agent information read from: %s" % user_agent_config_yaml) ua = user_agent_config_dict.get('user_agent') return cls._construct(user_agent_config_dict, prefix, ua)
741,216
Get full user agent string Args: user_agent (Optional[str]): User agent string. HDXPythonLibrary/X.X.X- is prefixed. user_agent_config_yaml (Optional[str]): Path to YAML user agent configuration. Ignored if user_agent supplied. Defaults to ~/.useragent.yml. user_agent_lookup (Optional[str]): Lookup key for YAML. Ignored if user_agent supplied. Returns: str: Full user agent string
def _create(cls, user_agent=None, user_agent_config_yaml=None, user_agent_lookup=None, **kwargs): # type: (Optional[str], Optional[str], Optional[str], Any) -> str kwargs = UserAgent._environment_variables(**kwargs) if 'user_agent' in kwargs: user_agent = kwargs['user_agent'] del kwargs['user_agent'] prefix = kwargs.get('prefix') if prefix: del kwargs['prefix'] else: prefix = 'HDXPythonUtilities/%s' % get_utils_version() if not user_agent: ua = cls._load(prefix, user_agent_config_yaml, user_agent_lookup) else: ua = cls._construct(kwargs, prefix, user_agent) return ua
741,217
Set global user agent string Args: user_agent (Optional[str]): User agent string. HDXPythonLibrary/X.X.X- is prefixed. user_agent_config_yaml (Optional[str]): Path to YAML user agent configuration. Ignored if user_agent supplied. Defaults to ~/.useragent.yml. user_agent_lookup (Optional[str]): Lookup key for YAML. Ignored if user_agent supplied. Returns: None
def set_global(cls, user_agent=None, user_agent_config_yaml=None, user_agent_lookup=None, **kwargs): # type: (Optional[str], Optional[str], Optional[str], Any) -> None cls.user_agent = cls._create(user_agent, user_agent_config_yaml, user_agent_lookup, **kwargs)
741,218
Save dictionary to YAML file preserving order if it is an OrderedDict Args: dictionary (Dict): Python dictionary to save path (str): Path to YAML file pretty (bool): Whether to pretty print. Defaults to False. sortkeys (bool): Whether to sort dictionary keys. Defaults to False. Returns: None
def save_yaml(dictionary, path, pretty=False, sortkeys=False): # type: (Dict, str, bool, bool) -> None if sortkeys: dictionary = dict(dictionary) with open(path, 'w') as f: if pretty: pyaml.dump(dictionary, f) else: yaml.dump(dictionary, f, default_flow_style=None, Dumper=yamlloader.ordereddict.CDumper)
741,251
Save dictionary to JSON file preserving order if it is an OrderedDict Args: dictionary (Dict): Python dictionary to save path (str): Path to JSON file pretty (bool): Whether to pretty print. Defaults to False. sortkeys (bool): Whether to sort dictionary keys. Defaults to False. Returns: None
def save_json(dictionary, path, pretty=False, sortkeys=False): # type: (Dict, str, bool, bool) -> None with open(path, 'w') as f: if pretty: indent = 2 separators = (',', ': ') else: indent = None separators = (', ', ': ') json.dump(dictionary, f, indent=indent, sort_keys=sortkeys, separators=separators)
741,252
Load YAML file into an ordered dictionary Args: path (str): Path to YAML file Returns: OrderedDict: Ordered dictionary containing loaded YAML file
def load_yaml(path): # type: (str) -> OrderedDict with open(path, 'rt') as f: yamldict = yaml.load(f.read(), Loader=yamlloader.ordereddict.CSafeLoader) if not yamldict: raise (LoadError('YAML file: %s is empty!' % path)) return yamldict
741,253
Load JSON file into an ordered dictionary Args: path (str): Path to JSON file Returns: OrderedDict: Ordered dictionary containing loaded JSON file
def load_json(path): # type: (str) -> OrderedDict with open(path, 'rt') as f: jsondict = json.loads(f.read(), object_pairs_hook=OrderedDict) if not jsondict: raise (LoadError('JSON file: %s is empty!' % path)) return jsondict
741,254
Load file into a string removing newlines Args: path (str): Path to file Returns: str: String contents of file
def load_file_to_str(path): # type: (str) -> str with open(path, 'rt') as f: string = f.read().replace(linesep, '') if not string: raise LoadError('%s file is empty!' % path) return string
741,255
Find a function in the given context by name. This function will first search the list of builtins and if the desired function is not a builtin, it will continue to search the given context. Args: context (object): A dict or class that is a typedargs context funname (str): The name of the function to find Returns: callable: The found function.
def find_function(self, context, funname): if funname in self.builtins: return self.builtins[funname] func = None if isinstance(context, dict): if funname in context: func = context[funname] #Allowed lazy loading of functions if isinstance(func, str): func = self._deferred_add(func) context[funname] = func elif hasattr(context, funname): func = getattr(context, funname) if func is None: raise NotFoundError("Function not found", function=funname) return func
741,400
Return a listing of all of the functions in this context including builtins. Args: context (object): The context to print a directory for. Returns: str
def list_dir(self, context): doc = inspect.getdoc(context) listing = "" listing += "\n" listing += annotate.context_name(context) + "\n" if doc is not None: doc = inspect.cleandoc(doc) listing += doc + "\n" listing += "\nDefined Functions:\n" is_dict = False if isinstance(context, dict): funs = context.keys() is_dict = True else: funs = utils.find_all(context) for fun in sorted(funs): override_name = None if is_dict: override_name = fun fun = self.find_function(context, fun) if isinstance(fun, dict): if is_dict: listing += " - " + override_name + '\n' else: listing += " - " + fun.metadata.name + '\n' else: listing += " - " + fun.metadata.signature(name=override_name) + '\n' if annotate.short_description(fun) != "": listing += " " + annotate.short_description(fun) + '\n' listing += "\nBuiltin Functions\n" for bif in sorted(self.builtins.keys()): listing += ' - ' + bif + '\n' listing += '\n' return listing
741,401
Parse and invoke a string line. Args: line (str): The line that we want to parse and invoke. Returns: bool: A boolean specifying if the last function created a new context (False if a new context was created) and a list with the remainder of the command line if this function did not consume all arguments.)
def invoke_string(self, line): # Make sure line is a unicode string on all python versions line = str(line) # Ignore empty lines and comments if len(line) == 0: return True if line[0] == u'#': return True args = self._split_line(line) return self.invoke(args)
741,407
Check if we have enough arguments to call this function. Args: pos_args (list): A list of all the positional values we have. kw_args (dict): A dict of all of the keyword args we have. Returns: bool: True if we have a filled spec, False otherwise.
def spec_filled(self, pos_args, kw_args): req_names = self.arg_names if len(self.arg_defaults) > 0: req_names = req_names[:-len(self.arg_defaults)] req = [x for x in req_names if x not in kw_args] return len(req) <= len(pos_args)
741,438
Add type information to the return value of this function. Args: type_name (str): The name of the type of the return value. formatter (str): An optional name of a formatting function specified for the type given in type_name.
def typed_returnvalue(self, type_name, formatter=None): self.return_info = ReturnInfo(type_name, formatter, True, None)
741,440
Use a custom function to print the return value. Args: printer (callable): A function that should take in the return value and convert it to a string. desc (str): An optional description of the return value.
def custom_returnvalue(self, printer, desc=None): self.return_info = ReturnInfo(None, printer, True, desc)
741,441
Try to convert a prefix into a parameter name. If the result could be ambiguous or there is no matching parameter, throw an ArgumentError Args: name (str): A prefix for a parameter name filled_args (list): A list of filled positional arguments that will be removed from consideration. Returns: str: The full matching parameter name
def match_shortname(self, name, filled_args=None): filled_count = 0 if filled_args is not None: filled_count = len(filled_args) possible = [x for x in self.arg_names[filled_count:] if x.startswith(name)] if len(possible) == 0: raise ArgumentError("Could not convert short-name full parameter name, none could be found", short_name=name, parameters=self.arg_names) elif len(possible) > 1: raise ArgumentError("Short-name is ambiguous, could match multiple keyword parameters", short_name=name, possible_matches=possible) return possible[0]
741,442
Get the parameter type information by name. Args: name (str): The full name of a parameter. Returns: str: The type name or None if no type information is given.
def param_type(self, name): self._ensure_loaded() if name not in self.annotated_params: return None return self.annotated_params[name].type_name
741,443
Return our function signature as a string. By default this function uses the annotated name of the function however if you need to override that with a custom name you can pass name=<custom name> Args: name (str): Optional name to override the default name given in the function signature. Returns: str: The formatted function signature
def signature(self, name=None): self._ensure_loaded() if name is None: name = self.name num_args = len(self.arg_names) num_def = 0 if self.arg_defaults is not None: num_def = len(self.arg_defaults) num_no_def = num_args - num_def args = [] for i in range(0, len(self.arg_names)): typestr = "" if self.arg_names[i] in self.annotated_params: typestr = "{} ".format(self.annotated_params[self.arg_names[i]].type_name) if i >= num_no_def: default = str(self.arg_defaults[i-num_no_def]) if len(default) == 0: default = "''" args.append("{}{}={}".format(typestr, str(self.arg_names[i]), default)) else: args.append(typestr + str(self.arg_names[i])) return "{}({})".format(name, ", ".join(args))
741,444
Format the return value of this function as a string. Args: value (object): The return value that we are supposed to format. Returns: str: The formatted return value, or None if this function indicates that it does not return data
def format_returnvalue(self, value): self._ensure_loaded() if not self.return_info.is_data: return None # If the return value is typed, use the type_system to format it if self.return_info.type_name is not None: return typeinfo.type_system.format_value(value, self.return_info.type_name, self.return_info.formatter) # Otherwise we expect a callable function to convert this value to a string return self.return_info.formatter(value)
741,445
Convert and validate a positional argument. Args: index (int): The positional index of the argument arg_value (object): The value to convert and validate Returns: object: The converted value.
def convert_positional_argument(self, index, arg_value): # For bound methods, skip self if self._has_self: if index == 0: return arg_value index -= 1 arg_name = self.arg_names[index] return self.convert_argument(arg_name, arg_value)
741,446
Given a parameter with type information, convert and validate it. Args: arg_name (str): The name of the argument to convert and validate arg_value (object): The value to convert and validate Returns: object: The converted value.
def convert_argument(self, arg_name, arg_value): self._ensure_loaded() type_name = self.param_type(arg_name) if type_name is None: return arg_value val = typeinfo.type_system.convert_to_type(arg_value, type_name) validators = self.annotated_params[arg_name].validators if len(validators) == 0: return val type_obj = typeinfo.type_system.get_type(type_name) # Run all of the validators that were defined for this argument. # If the validation fails, they will raise an exception that we convert to # an instance of ValidationError try: for validator_name, extra_args in validators: if not hasattr(type_obj, validator_name): raise ValidationError("Could not find validator specified for argument", argument=arg_name, validator_name=validator_name, type=str(type_obj), method=dir(type_obj)) validator = getattr(type_obj, validator_name) validator(val, *extra_args) except (ValueError, TypeError) as exc: raise ValidationError(exc.args[0], argument=arg_name, arg_value=val) return val
741,448
Format this exception as a string including class name. Args: exclude_class (bool): Whether to exclude the exception class name when formatting this exception Returns: string: a multiline string with the message, class name and key value parameters passed to create the exception.
def format(self, exclude_class=False): if exclude_class: msg = self.msg else: msg = "%s: %s" % (self.__class__.__name__, self.msg) if len(self.params) != 0: paramstring = "\n".join([str(key) + ": " + str(val) for key, val in self.params.items()]) msg += "\nAdditional Information:\n" + paramstring return msg
741,450
Find all annotated function inside of a container. Annotated functions are identified as those that: - do not start with a _ character - are either annotated with metadata - or strings that point to lazily loaded modules Args: container (object): The container to search for annotated functions. Returns: dict: A dict with all of the found functions in it.
def find_all(container): if isinstance(container, dict): names = container.keys() else: names = dir(container) built_context = BasicContext() for name in names: # Ignore _ and __ names if name.startswith('_'): continue if isinstance(container, dict): obj = container[name] else: obj = getattr(container, name) # Check if this is an annotated object that should be included. Check the type of # annotated to avoid issues with module imports where someone did from annotate import * # into the module causing an annotated symbol to be defined as a decorator # If we are in a dict context then strings point to lazily loaded modules so include them too. if isinstance(container, dict) and isinstance(obj, str): built_context[name] = obj elif hasattr(obj, 'metadata') and isinstance(getattr(obj, 'metadata'), AnnotatedMetadata): built_context[name] = obj return built_context
741,460
Return usage information about a context or function. For contexts, just return the context name and its docstring For functions, return the function signature as well as its argument types. Args: func (callable): An annotated callable function Returns: str: The formatted help text
def get_help(func): help_text = "" if isinstance(func, dict): name = context_name(func) help_text = "\n" + name + "\n\n" doc = inspect.getdoc(func) if doc is not None: doc = inspect.cleandoc(doc) help_text += doc + '\n' return help_text sig = func.metadata.signature() doc = inspect.getdoc(func) if doc is not None: doc = inspect.cleandoc(doc) help_text += "\n" + sig + "\n\n" if doc is not None: help_text += doc + '\n' if inspect.isclass(func): func = func.__init__ # If we derived the parameter annotations from a docstring, # don't insert a custom arguments section since it already # exists. if func.metadata.load_from_doc: return help_text help_text += "\nArguments:\n" for key, info in func.metadata.annotated_params.items(): type_name = info.type_name desc = "" if info.desc is not None: desc = info.desc help_text += " - %s (%s): %s\n" % (key, type_name, desc) return help_text
741,465
Specify how the return value of this function should be handled. Args: desc (str): A deprecated description of the return value printer (callable): A callable function that can format this return value data (bool): A deprecated parameter for specifying that this function returns data.
def returns(desc=None, printer=None, data=True): if data is False: raise ArgumentError("Specifying non data return type in returns is no longer supported") def _returns(func): annotated(func) func.custom_returnvalue(printer, desc) return func return _returns
741,467
Specify that this function returns a typed value. Args: type_name (str): A type name known to the global typedargs type system formatter (str): An optional name of a formatting function specified for the type given in type_name.
def return_type(type_name, formatter=None): def _returns(func): annotated(func) func.metadata.typed_returnvalue(type_name, formatter) return func return _returns
741,468
Declare that a class defines a context. Contexts are for use with HierarchicalShell for discovering and using functionality from the command line. Args: name (str): Optional name for this context if you don't want to just use the class name.
def context(name=None): def _context(cls): annotated(cls, name) cls.context = True return cls return _context
741,469
Mark a function as callable from the command line. This function is meant to be called as decorator. This function also initializes metadata about the function's arguments that is built up by the param decorator. Args: func (callable): The function that we wish to mark as callable from the command line. name (str): Optional string that will override the function's built-in name.
def annotated(func, name=None): if hasattr(func, 'metadata'): if name is not None: func.metadata = AnnotatedMetadata(func, name) return func func.metadata = AnnotatedMetadata(func, name) func.finalizer = False func.takes_cmdline = False func.decorated = False func.context = False return func
741,471
Core function used to generate the read_time for content. Parameters: :param content: Instance of pelican.content.Content Returns: None
def read_time(self, content): if get_class_name(content) in self.content_type_supported: # Exit if readtime is already set if hasattr(content, 'readtime'): return None default_lang_conf = self.lang_settings['default'] lang_conf = self.lang_settings.get(content.lang, default_lang_conf) avg_reading_wpm = lang_conf['wpm'] num_words = len(content._content.split()) # Floor division so we don't have to convert float -> int minutes = num_words // avg_reading_wpm # Get seconds to read, then subtract our minutes as seconds from # the time to get remainder seconds seconds = int((num_words / avg_reading_wpm * 60) - (minutes * 60)) minutes_str = self.pluralize( minutes, lang_conf['min_singular'], lang_conf['min_plural'] ) seconds_str = self.pluralize( seconds, lang_conf['sec_singular'], lang_conf['sec_plural'] ) content.readtime = minutes content.readtime_string = minutes_str content.readtime_with_seconds = (minutes, seconds,) content.readtime_string_with_seconds = "{}, {}".format( minutes_str, seconds_str)
741,926
Returns a string that contains the measure (amount) and its plural or singular form depending on the amount. Parameters: :param measure: Amount, value, always a numerical value :param singular: The singular form of the chosen word :param plural: The plural form of the chosen word Returns: String
def pluralize(self, measure, singular, plural): if measure == 1: return "{} {}".format(measure, singular) else: return "{} {}".format(measure, plural)
741,927
Turn on or off smartplug Args: device_label (str): Smartplug device label state (boolean): new status, 'True' or 'False'
def set_smartplug_state(self, device_label, state): response = None try: response = requests.post( urls.smartplug(self._giid), headers={ 'Content-Type': 'application/json', 'Cookie': 'vid={}'.format(self._vid)}, data=json.dumps([{ "deviceLabel": device_label, "state": state}])) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response)
742,347
Get recent events Args: filters (string set): 'ARM', 'DISARM', 'FIRE', 'INTRUSION', 'TECHNICAL', 'SOS', 'WARNING', 'LOCK', 'UNLOCK' pagesize (int): Number of events to display offset (int): Skip pagesize * offset first events
def get_history(self, filters=(), pagesize=15, offset=0): response = None try: response = requests.get( urls.history(self._giid), headers={ 'Accept': 'application/json, text/javascript, */*; q=0.01', 'Cookie': 'vid={}'.format(self._vid)}, params={ "offset": int(offset), "pagesize": int(pagesize), "notificationCategories": filters}) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response) return json.loads(response.text)
742,348
Get climate history Args: device_label: device label of climate device
def get_climate(self, device_label): response = None try: response = requests.get( urls.climate(self._giid), headers={ 'Accept': 'application/json, text/javascript, */*; q=0.01', 'Cookie': 'vid={}'.format(self._vid)}, params={ "deviceLabel": device_label}) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response) return json.loads(response.text)
742,349
Lock or unlock Args: code (str): Lock code device_label (str): device label of lock state (str): 'lock' or 'unlock'
def set_lock_state(self, code, device_label, state): response = None try: response = requests.put( urls.set_lockstate(self._giid, device_label, state), headers={ 'Accept': 'application/json, text/javascript, */*; q=0.01', 'Content-Type': 'application/json', 'Cookie': 'vid={}'.format(self._vid)}, data=json.dumps({"code": str(code)})) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response) return json.loads(response.text)
742,350
Get lock state transaction status Args: transaction_id: Transaction ID received from set_lock_state
def get_lock_state_transaction(self, transaction_id): response = None try: response = requests.get( urls.get_lockstate_transaction(self._giid, transaction_id), headers={ 'Accept': 'application/json, text/javascript, */*; q=0.01', 'Cookie': 'vid={}'.format(self._vid)}) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response) return json.loads(response.text)
742,351
Get lock configuration Args: device_label (str): device label of lock
def get_lock_config(self, device_label): response = None try: response = requests.get( urls.lockconfig(self._giid, device_label), headers={ 'Accept': 'application/json, text/javascript, */*; q=0.01', 'Cookie': 'vid={}'.format(self._vid)}) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response) return json.loads(response.text)
742,352
Set lock configuration Args: device_label (str): device label of lock volume (str): 'SILENCE', 'LOW' or 'HIGH' voice_level (str): 'ESSENTIAL' or 'NORMAL' auto_lock_enabled (boolean): auto lock enabled
def set_lock_config(self, device_label, volume=None, voice_level=None, auto_lock_enabled=None): response = None data = {} if volume: data['volume'] = volume if voice_level: data['voiceLevel'] = voice_level if auto_lock_enabled is not None: data['autoLockEnabled'] = auto_lock_enabled try: response = requests.put( urls.lockconfig(self._giid, device_label), headers={ 'Content-Type': 'application/json', 'Cookie': 'vid={}'.format(self._vid)}, data=json.dumps(data)) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response)
742,353
Capture smartcam image Args: device_label (str): device label of camera
def capture_image(self, device_label): response = None try: response = requests.post( urls.imagecapture(self._giid, device_label), headers={ 'Content-Type': 'application/json', 'Cookie': 'vid={}'.format(self._vid)}) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response)
742,354
Get smartcam image series Args: number_of_imageseries (int): number of image series to get offset (int): skip offset amount of image series
def get_camera_imageseries(self, number_of_imageseries=10, offset=0): response = None try: response = requests.get( urls.get_imageseries(self._giid), headers={ 'Accept': 'application/json, text/javascript, */*; q=0.01', 'Cookie': 'vid={}'.format(self._vid)}, params={ "numberOfImageSeries": int(number_of_imageseries), "offset": int(offset), "fromDate": "", "toDate": "", "onlyNotViewed": "", "_": self._giid}) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response) return json.loads(response.text)
742,355
Download image taken by a smartcam Args: device_label (str): device label of camera image_id (str): image id from image series file_name (str): path to file
def download_image(self, device_label, image_id, file_name): response = None try: response = requests.get( urls.download_image(self._giid, device_label, image_id), headers={ 'Cookie': 'vid={}'.format(self._vid)}, stream=True) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response) with open(file_name, 'wb') as image_file: for chunk in response.iter_content(chunk_size=1024): if chunk: image_file.write(chunk)
742,356
Set heatpump mode Args: mode (str): 'HEAT', 'COOL', 'FAN' or 'AUTO'
def set_heat_pump_mode(self, device_label, mode): response = None try: response = requests.put( urls.set_heatpump_state(self._giid, device_label), headers={ 'Accept': 'application/json', 'Content-Type': 'application/json', 'Cookie': 'vid={}'.format(self._vid)}, data=json.dumps({'mode': mode})) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response) return json.loads(response.text)
742,359
Set heatpump mode Args: feature: 'QUIET', 'ECONAVI', or 'POWERFUL'
def set_heat_pump_feature(self, device_label, feature): response = None try: response = requests.put( urls.set_heatpump_feature(self._giid, device_label, feature), headers={ 'Accept': 'application/json', 'Content-Type': 'application/json', 'Cookie': 'vid={}'.format(self._vid)}) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response) return json.loads(response.text)
742,360
This method rebuid tree. Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session tree_id (int or str): id of tree Example: * :mod:`sqlalchemy_mptt.tests.cases.get_tree.test_rebuild`
def rebuild_tree(cls, session, tree_id): session.query(cls).filter_by(tree_id=tree_id)\ .update({cls.left: 0, cls.right: 0, cls.level: 0}) top = session.query(cls).filter_by(parent_id=None)\ .filter_by(tree_id=tree_id).one() top.left = left = 1 top.right = right = 2 top.level = level = cls.get_default_level() def recursive(children, left, right, level): level = level + 1 for i, node in enumerate(children): same_level_right = children[i - 1].right left = left + 1 if i > 0: left = left + 1 if same_level_right: left = same_level_right + 1 right = left + 1 node.left = left node.right = right parent = node.parent j = 0 while parent: parent.right = right + 1 + j parent = parent.parent j += 1 node.level = level recursive(node.children, left, right, level) recursive(top.children, left, right, level)
743,164
This function rebuid tree. Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session Kwargs: tree_id (int or str): id of tree, default None Example: * :mod:`sqlalchemy_mptt.tests.TestTree.test_rebuild`
def rebuild(cls, session, tree_id=None): trees = session.query(cls).filter_by(parent_id=None) if tree_id: trees = trees.filter_by(tree_id=tree_id) for tree in trees: cls.rebuild_tree(session, tree.tree_id)
743,165
qx: Returns the probability that a life aged x dies before 1 year With the convention: the true probability is qx/1000 Args: mt: the mortality table x: the age as integer number.
def qx(mt, x): if x < len(mt.qx): return mt.qx[x] else: return 0
743,172
initializes the layout algorithm by computing roots (unless provided), inverted edges (unless provided), vertices ranks and creates all dummy vertices and layers. Parameters: roots (list[Vertex]): set *root* vertices (layer 0) inverted_edges (list[Edge]): set edges to invert to have a DAG. optimize (bool): optimize ranking if True (default False)
def init_all(self,roots=None,inverted_edges=None,optimize=False): if self.initdone: return # For layered sugiyama algorithm, the input graph must be acyclic, # so we must provide a list of root nodes and a list of inverted edges. if roots==None: roots = [v for v in self.g.sV if len(v.e_in())==0] if inverted_edges==None: L = self.g.get_scs_with_feedback(roots) inverted_edges = [x for x in self.g.sE if x.feedback] self.alt_e = inverted_edges # assign rank to all vertices: self.rank_all(roots,optimize) # add dummy vertex/edge for 'long' edges: for e in self.g.E(): self.setdummies(e) # precompute some layers values: for l in self.layers: l.setup(self) self.initdone = True
743,253
creates a DummyVertex at rank r inserted in the ctrl dict of the associated edge and layer. Arguments: r (int): rank value ctrl (dict): the edge's control vertices Returns: DummyVertex : the created DummyVertex.
def dummyctrl(self,r,ctrl): dv = DummyVertex(r) dv.view.w,dv.view.h=self.dw,self.dh self.grx[dv] = dv dv.ctrl = ctrl ctrl[r] = dv self.layers[r].append(dv) return dv
743,263
Reads from the FIFO. Reads as much data as possible from the FIFO up to the specified length. If the length argument is negative or ommited all data currently available in the FIFO will be read. If there is no data available in the FIFO an empty string is returned. Args: length: The amount of data to read from the FIFO. Defaults to -1.
def read(self, length=-1): if 0 <= length < len(self): newpos = self.pos + length data = self.buf[self.pos:newpos] self.pos = newpos self.__discard() return data data = self.buf[self.pos:] self.clear() return data
743,528
Create a project handler Args: uri (str): schema://something formatted uri local_path (str): the project configs directory Return: ProjectHandler derived class instance
def create(self, uri, local_path): matches = self.schema_pattern.search(uri) if not matches: logger.error("Unknown uri schema: '%s'. Added schemas: %s", uri, list(self.handlers.keys())) return None schema = matches.group(1) url = matches.group(2) return self.handlers[schema](url, local_path)
743,612
Save the projects configs to local path Args: projects (dict): project_name -> project_data
def save(self, projects): base_path = os.path.expanduser(self.path) if not os.path.isdir(base_path): return logger.debug("Save projects config to %s", base_path) for name, data in list(projects.items()): project_file_path = self.get_project_config_path(name) with open(project_file_path, "w") as f: yaml.dump(data, stream = f, default_flow_style = False) logger.debug("Project '%s' config has been writed to '%s'", name, project_file_path)
743,616
Creates a property with the given name, but the cls will created only with the first call Args: carrier: an instance of the class where want to reach the cls instance name (str): the variable name of the cls instance cls (type): the singleton object type cls_args (dict): optional dict for createing cls
def define_singleton(carrier, name, cls, cls_args = {}): instance_name = "__{}".format(name) setattr(carrier, instance_name, None) def getter(self): instance = getattr(carrier, instance_name) if instance is None: instance = cls(**cls_args) setattr(carrier, instance_name, instance) return instance setattr(type(carrier), name, property(getter))
743,647
Get the dependencies of the Project Args: recursive (bool): add the dependant project's dependencies too Returns: dict of project name and project instances
def get_dependent_projects(self, recursive = True): projects = {} for name, ref in list(self.dependencies.items()): try: prj = self.vcp.projects[name] except KeyError: logger.error("Unknown project '%s' in project '%s' dependencies!", name, self.name) continue projects[name] = prj if recursive: projects.update(prj.get_dependent_projects()) return projects
743,716
Save the scattering lookup tables. Save the state of the scattering lookup tables to a file. This can be loaded later with load_scatter_table. Other variables will not be saved, but this does not matter because the results of the computations are based only on the contents of the table. Args: fn: The name of the scattering table file. description (optional): A description of the table.
def save_scatter_table(self, fn, description=""): data = { "description": description, "time": datetime.now(), "psd_scatter": (self.num_points, self.D_max, self._psd_D, self._S_table, self._Z_table, self._angular_table, self._m_table, self.geometries), "version": tmatrix_aux.VERSION } pickle.dump(data, file(fn, 'w'), pickle.HIGHEST_PROTOCOL)
744,551
Load the scattering lookup tables. Load the scattering lookup tables saved with save_scatter_table. Args: fn: The name of the scattering table file.
def load_scatter_table(self, fn): data = pickle.load(file(fn)) if ("version" not in data) or (data["version"]!=tmatrix_aux.VERSION): warnings.warn("Loading data saved with another version.", Warning) (self.num_points, self.D_max, self._psd_D, self._S_table, self._Z_table, self._angular_table, self._m_table, self.geometries) = data["psd_scatter"] return (data["time"], data["description"])
744,552
Gaussian PDF for orientation averaging. Args: std: The standard deviation in degrees of the Gaussian PDF mean: The mean in degrees of the Gaussian PDF. This should be a number in the interval [0, 180) Returns: pdf(x), a function that returns the value of the spherical Jacobian- normalized Gaussian PDF with the given STD at x (degrees). It is normalized for the interval [0, 180].
def gaussian_pdf(std=10.0, mean=0.0): norm_const = 1.0 def pdf(x): return norm_const*np.exp(-0.5 * ((x-mean)/std)**2) * \ np.sin(np.pi/180.0 * x) norm_dev = quad(pdf, 0.0, 180.0)[0] # ensure that the integral over the distribution equals 1 norm_const /= norm_dev return pdf
744,558
Compute the T-matrix using variable orientation scatterers. This method uses a very slow adaptive routine and should mainly be used for reference purposes. Uses the set particle orientation PDF, ignoring the alpha and beta attributes. Args: tm: TMatrix (or descendant) instance Returns: The amplitude (S) and phase (Z) matrices.
def orient_averaged_adaptive(tm): S = np.zeros((2,2), dtype=complex) Z = np.zeros((4,4)) def Sfunc(beta, alpha, i, j, real): (S_ang, Z_ang) = tm.get_SZ_single(alpha=alpha, beta=beta) s = S_ang[i,j].real if real else S_ang[i,j].imag return s * tm.or_pdf(beta) ind = range(2) for i in ind: for j in ind: S.real[i,j] = dblquad(Sfunc, 0.0, 360.0, lambda x: 0.0, lambda x: 180.0, (i,j,True))[0]/360.0 S.imag[i,j] = dblquad(Sfunc, 0.0, 360.0, lambda x: 0.0, lambda x: 180.0, (i,j,False))[0]/360.0 def Zfunc(beta, alpha, i, j): (S_and, Z_ang) = tm.get_SZ_single(alpha=alpha, beta=beta) return Z_ang[i,j] * tm.or_pdf(beta) ind = range(4) for i in ind: for j in ind: Z[i,j] = dblquad(Zfunc, 0.0, 360.0, lambda x: 0.0, lambda x: 180.0, (i,j))[0]/360.0 return (S, Z)
744,560
Compute the T-matrix using variable orientation scatterers. This method uses a fast Gaussian quadrature and is suitable for most use. Uses the set particle orientation PDF, ignoring the alpha and beta attributes. Args: tm: TMatrix (or descendant) instance. Returns: The amplitude (S) and phase (Z) matrices.
def orient_averaged_fixed(tm): S = np.zeros((2,2), dtype=complex) Z = np.zeros((4,4)) ap = np.linspace(0, 360, tm.n_alpha+1)[:-1] aw = 1.0/tm.n_alpha for alpha in ap: for (beta, w) in zip(tm.beta_p, tm.beta_w): (S_ang, Z_ang) = tm.get_SZ_single(alpha=alpha, beta=beta) S += w * S_ang Z += w * Z_ang sw = tm.beta_w.sum() #normalize to get a proper average S *= aw/sw Z *= aw/sw return (S, Z)
744,561
A convenience function to set the geometry variables. Args: geom: A tuple containing (thet0, thet, phi0, phi, alpha, beta). See the Scatterer class documentation for a description of these angles.
def set_geometry(self, geom): (self.thet0, self.thet, self.phi0, self.phi, self.alpha, self.beta) = geom
744,563
Scattering intensity (phase function) for the current setup. Args: scatterer: a Scatterer instance. h_pol: If True (default), use horizontal polarization. If False, use vertical polarization. Returns: The differential scattering cross section.
def sca_intensity(scatterer, h_pol=True): Z = scatterer.get_Z() return (Z[0,0] - Z[0,1]) if h_pol else (Z[0,0] + Z[0,1])
744,579
Linear depolarizarion ratio (LDR) for the current setup. Args: scatterer: a Scatterer instance. h_pol: If True (default), return LDR_h. If False, return LDR_v. Returns: The LDR.
def ldr(scatterer, h_pol=True): Z = scatterer.get_Z() if h_pol: return (Z[0,0] - Z[0,1] + Z[1,0] - Z[1,1]) / \ (Z[0,0] - Z[0,1] - Z[1,0] + Z[1,1]) else: return (Z[0,0] + Z[0,1] - Z[1,0] - Z[1,1]) / \ (Z[0,0] + Z[0,1] + Z[1,0] + Z[1,1])
744,580
Scattering cross section for the current setup, with polarization. Args: scatterer: a Scatterer instance. h_pol: If True (default), use horizontal polarization. If False, use vertical polarization. Returns: The scattering cross section.
def sca_xsect(scatterer, h_pol=True): if scatterer.psd_integrator is not None: return scatterer.psd_integrator.get_angular_integrated( scatterer.psd, scatterer.get_geometry(), "sca_xsect") old_geom = scatterer.get_geometry() def d_xsect(thet, phi): (scatterer.phi, scatterer.thet) = (phi*rad_to_deg, thet*rad_to_deg) Z = scatterer.get_Z() I = sca_intensity(scatterer, h_pol) return I * np.sin(thet) try: xsect = dblquad(d_xsect, 0.0, 2*np.pi, lambda x: 0.0, lambda x: np.pi)[0] finally: scatterer.set_geometry(old_geom) return xsect
744,581
Extinction cross section for the current setup, with polarization. Args: scatterer: a Scatterer instance. h_pol: If True (default), use horizontal polarization. If False, use vertical polarization. Returns: The extinction cross section.
def ext_xsect(scatterer, h_pol=True): if scatterer.psd_integrator is not None: try: return scatterer.psd_integrator.get_angular_integrated( scatterer.psd, scatterer.get_geometry(), "ext_xsect") except AttributeError: # Fall back to the usual method of computing this from S pass old_geom = scatterer.get_geometry() (thet0, thet, phi0, phi, alpha, beta) = old_geom try: scatterer.set_geometry((thet0, thet0, phi0, phi0, alpha, beta)) S = scatterer.get_S() finally: scatterer.set_geometry(old_geom) if h_pol: return 2 * scatterer.wavelength * S[1,1].imag else: return 2 * scatterer.wavelength * S[0,0].imag
744,582
Single-scattering albedo for the current setup, with polarization. Args: scatterer: a Scatterer instance. h_pol: If True (default), use horizontal polarization. If False, use vertical polarization. Returns: The single-scattering albedo.
def ssa(scatterer, h_pol=True): ext_xs = ext_xsect(scatterer, h_pol=h_pol) return sca_xsect(scatterer, h_pol=h_pol)/ext_xs if ext_xs > 0.0 else 0.0
744,583
Asymmetry parameter for the current setup, with polarization. Args: scatterer: a Scatterer instance. h_pol: If True (default), use horizontal polarization. If False, use vertical polarization. Returns: The asymmetry parameter.
def asym(scatterer, h_pol=True): if scatterer.psd_integrator is not None: return scatterer.psd_integrator.get_angular_integrated( scatterer.psd, scatterer.get_geometry(), "asym") old_geom = scatterer.get_geometry() cos_t0 = np.cos(scatterer.thet0 * deg_to_rad) sin_t0 = np.sin(scatterer.thet0 * deg_to_rad) p0 = scatterer.phi0 * deg_to_rad def integrand(thet, phi): (scatterer.phi, scatterer.thet) = (phi*rad_to_deg, thet*rad_to_deg) cos_T_sin_t = 0.5 * (np.sin(2*thet)*cos_t0 + \ (1-np.cos(2*thet))*sin_t0*np.cos(p0-phi)) I = sca_intensity(scatterer, h_pol) return I * cos_T_sin_t try: cos_int = dblquad(integrand, 0.0, 2*np.pi, lambda x: 0.0, lambda x: np.pi)[0] finally: scatterer.set_geometry(old_geom) return cos_int/sca_xsect(scatterer, h_pol)
744,584
Radar cross section for the current setup. Args: scatterer: a Scatterer instance. h_pol: If True (default), use horizontal polarization. If False, use vertical polarization. Returns: The radar cross section.
def radar_xsect(scatterer, h_pol=True): Z = scatterer.get_Z() if h_pol: return 2 * np.pi * \ (Z[0,0] - Z[0,1] - Z[1,0] + Z[1,1]) else: return 2 * np.pi * \ (Z[0,0] + Z[0,1] + Z[1,0] + Z[1,1])
744,585
Reflectivity (with number concentration N=1) for the current setup. Args: scatterer: a Scatterer instance. h_pol: If True (default), use horizontal polarization. If False, use vertical polarization. Returns: The reflectivity. NOTE: To compute reflectivity in dBZ, give the particle diameter and wavelength in [mm], then take 10*log10(Zi).
def refl(scatterer, h_pol=True): return scatterer.wavelength**4/(np.pi**5*scatterer.Kw_sqr) * \ radar_xsect(scatterer, h_pol)
744,586
Delta_hv for the current setup. Args: scatterer: a Scatterer instance. Returns: Delta_hv [rad].
def delta_hv(scatterer): Z = scatterer.get_Z() return np.arctan2(Z[2,3] - Z[3,2], -Z[2,2] - Z[3,3])
744,587
Copolarized correlation (rho_hv) for the current setup. Args: scatterer: a Scatterer instance. Returns: rho_hv.
def rho_hv(scatterer): Z = scatterer.get_Z() a = (Z[2,2] + Z[3,3])**2 + (Z[3,2] - Z[2,3])**2 b = (Z[0,0] - Z[0,1] - Z[1,0] + Z[1,1]) c = (Z[0,0] + Z[0,1] + Z[1,0] + Z[1,1]) return np.sqrt(a / (b*c))
744,588
Specific differential phase (K_dp) for the current setup. Args: scatterer: a Scatterer instance. Returns: K_dp [deg/km]. NOTE: This only returns the correct value if the particle diameter and wavelength are given in [mm]. The scatterer object should be set to forward scattering geometry before calling this function.
def Kdp(scatterer): if (scatterer.thet0 != scatterer.thet) or \ (scatterer.phi0 != scatterer.phi): raise ValueError("A forward scattering geometry is needed to " + \ "compute the specific differential phase.") S = scatterer.get_S() return 1e-3 * (180.0/np.pi) * scatterer.wavelength * (S[1,1]-S[0,0]).real
744,589
Drop shape relationship function from Thurai2007 (http://dx.doi.org/10.1175/JTECH2051.1) paper. Arguments: D_eq: Drop volume-equivalent diameter (mm) Returns: r: The vertical-to-horizontal drop axis ratio. Note: the Scatterer class expects horizontal to vertical, so you should pass 1/dsr_thurai_2007
def dsr_thurai_2007(D_eq): if D_eq < 0.7: return 1.0 elif D_eq < 1.5: return 1.173 - 0.5165*D_eq + 0.4698*D_eq**2 - 0.1317*D_eq**3 - \ 8.5e-3*D_eq**4 else: return 1.065 - 6.25e-2*D_eq - 3.99e-3*D_eq**2 + 7.66e-4*D_eq**3 - \ 4.095e-5*D_eq**4
744,593
Constructor Arguments: these arguments match the ones of the SSLSocket class in the standard library's ssl module
def __init__(self, sock, keyfile=None, certfile=None, server_side=False, cert_reqs=CERT_NONE, ssl_version=PROTOCOL_DTLS, ca_certs=None, do_handshake_on_connect=True, suppress_ragged_eofs=True, ciphers=None, cb_user_config_ssl_ctx=None, cb_user_config_ssl=None): if keyfile and not certfile or certfile and not keyfile: raise_ssl_error(ERR_BOTH_KEY_CERT_FILES) if server_side and not keyfile: raise_ssl_error(ERR_BOTH_KEY_CERT_FILES_SVR) if cert_reqs != CERT_NONE and not ca_certs: raise_ssl_error(ERR_NO_CERTS) if not ciphers: ciphers = "DEFAULT" self._sock = sock self._keyfile = keyfile self._certfile = certfile self._cert_reqs = cert_reqs self._ssl_version = ssl_version self._ca_certs = ca_certs self._do_handshake_on_connect = do_handshake_on_connect self._suppress_ragged_eofs = suppress_ragged_eofs self._ciphers = ciphers self._handshake_done = False self._wbio_nb = self._rbio_nb = False self._user_config_ssl_ctx = cb_user_config_ssl_ctx self._intf_ssl_ctx = None self._user_config_ssl = cb_user_config_ssl self._intf_ssl = None if isinstance(sock, SSLConnection): post_init = self._copy_server() elif isinstance(sock, _UnwrappedSocket): post_init = self._reconnect_unwrapped() else: try: peer_address = sock.getpeername() except socket.error: peer_address = None if server_side: post_init = self._init_server(peer_address) else: post_init = self._init_client(peer_address) if self._user_config_ssl: self._user_config_ssl(self._intf_ssl) if sys.platform.startswith('win') and \ not (SSL_get_options(self._ssl.value) & SSL_OP_NO_QUERY_MTU): SSL_set_options(self._ssl.value, SSL_OP_NO_QUERY_MTU) DTLS_set_link_mtu(self._ssl.value, 576) SSL_set_bio(self._ssl.value, self._rbio.value, self._wbio.value) self._rbio.disown() self._wbio.disown() if post_init: post_init()
745,381
Client-side UDP connection establishment This method connects this object's underlying socket. It subsequently performs a handshake if do_handshake_on_connect was set during initialization. Arguments: peer_address - address tuple of server peer
def connect(self, peer_address): self._sock.connect(peer_address) peer_address = self._sock.getpeername() # substituted host addrinfo BIO_dgram_set_connected(self._wbio.value, peer_address) assert self._wbio is self._rbio if self._do_handshake_on_connect: self.do_handshake()
745,385
Read data from connection Read up to len bytes and return them. Arguments: len -- maximum number of bytes to read Return value: string containing read bytes
def read(self, len=1024, buffer=None): try: return self._wrap_socket_library_call( lambda: SSL_read(self._ssl.value, len, buffer), ERR_READ_TIMEOUT) except openssl_error() as err: if err.ssl_error == SSL_ERROR_SYSCALL and err.result == -1: raise_ssl_error(ERR_PORT_UNREACHABLE, err) raise
745,387
Write data to connection Write data as string of bytes. Arguments: data -- buffer containing data to be written Return value: number of bytes actually transmitted
def write(self, data): try: ret = self._wrap_socket_library_call( lambda: SSL_write(self._ssl.value, data), ERR_WRITE_TIMEOUT) except openssl_error() as err: if err.ssl_error == SSL_ERROR_SYSCALL and err.result == -1: raise_ssl_error(ERR_PORT_UNREACHABLE, err) raise if ret: self._handshake_done = True return ret
745,388
Constructor Arguments: datagram_socket -- the root socket; this must be a bound, unconnected datagram socket
def __init__(self, datagram_socket): if datagram_socket.type != socket.SOCK_DGRAM: raise InvalidSocketError("datagram_socket is not of " + "type SOCK_DGRAM") try: datagram_socket.getsockname() except: raise InvalidSocketError("datagram_socket is unbound") try: datagram_socket.getpeername() except: pass else: raise InvalidSocketError("datagram_socket is connected") datagram_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self._datagram_socket = datagram_socket
745,400
Create or retrieve a muxed connection Arguments: address -- a peer endpoint in IPv4/v6 address format; None refers to the connection for unknown peers Return: a bound, connected datagram socket instance, or the root socket in case address was None
def get_connection(self, address): if not address: return self._datagram_socket # Create a new datagram socket bound to the same interface and port as # the root socket, but connected to the given peer conn = socket.socket(self._datagram_socket.family, self._datagram_socket.type, self._datagram_socket.proto) conn.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) conn.bind(self._datagram_socket.getsockname()) conn.connect(address) _logger.debug("Created new connection for address: %s", address) return conn
745,401
Constructor Arguments: datagram_socket -- the root socket; this must be a bound, unconnected datagram socket
def __init__(self, datagram_socket): if datagram_socket.type != socket.SOCK_DGRAM: raise InvalidSocketError("datagram_socket is not of " + "type SOCK_DGRAM") try: datagram_socket.getsockname() except: raise InvalidSocketError("datagram_socket is unbound") try: datagram_socket.getpeername() except: pass else: raise InvalidSocketError("datagram_socket is connected") self.datagram_socket = datagram_socket self.payload = "" self.payload_peer_address = None self.connections = WeakValueDictionary()
745,478
Create or retrieve a muxed connection Arguments: address -- a peer endpoint in IPv4/v6 address format; None refers to the connection for unknown peers Return: a bound, connected datagram socket instance
def get_connection(self, address): if self.connections.has_key(address): return self.connections[address] # We need a new datagram socket on a dynamically assigned ephemeral port conn = socket.socket(self._forwarding_socket.family, self._forwarding_socket.type, self._forwarding_socket.proto) conn.bind((self._forwarding_socket.getsockname()[0], 0)) conn.connect(self._forwarding_socket.getsockname()) if not address: conn.setblocking(0) self.connections[address] = conn _logger.debug("Created new connection for address: %s", address) return conn
745,479
Parses the metadata from a Landsat image bundle. Arguments: metadataloc: a filename or a directory. Returns metadata dictionary
def parsemeta(metadataloc): # filename or directory? if several fit, use first one and warn if os.path.isdir(metadataloc): metalist = glob.glob(os.path.join(metadataloc, METAPATTERN)) if not metalist: raise MTLParseError( "No files matching metadata file pattern in directory %s." % metadataloc) elif len(metalist) > 0: metadatafn = metalist[0] filehandle = open(metadatafn, 'r') if len(metalist) > 1: logging.warning( "More than one file in directory match metadata " + "file pattern. Using %s." % metadatafn) elif os.path.isfile(metadataloc): metadatafn = metadataloc filehandle = open(metadatafn, 'r') logging.info("Using file %s." % metadatafn) elif 'L1_METADATA_FILE' in metadataloc: filehandle = StringIO(metadataloc) else: raise MTLParseError( "File location %s is unavailable " % metadataloc + "or doesn't contain a suitable metadata file.") # Reading file line by line and inserting data into metadata dictionary status = 0 metadata = {} grouppath = [] dictpath = [metadata] for line in filehandle: if status == 4: # we reached the end in the previous iteration, # but are still reading lines logging.warning( "Metadata file %s appears to " % metadatafn + "have extra lines after the end of the metadata. " + "This is probably, but not necessarily, harmless.") status = _checkstatus(status, line) grouppath, dictpath = _transstat(status, grouppath, dictpath, line) return metadata
745,840
Add a Chapter to your epub. Args: c (Chapter): A Chapter object representing your chapter. Raises: TypeError: Raised if a Chapter object isn't supplied to this method.
def add_chapter(self, c): try: assert type(c) == chapter.Chapter except AssertionError: raise TypeError('chapter must be of type Chapter') chapter_file_output = os.path.join(self.OEBPS_DIR, self.current_chapter_path) c._replace_images_in_chapter(self.OEBPS_DIR) c.write(chapter_file_output) self._increase_current_chapter_number() self.chapters.append(c)
746,130
Create an epub file from this object. Args: output_directory (str): Directory to output the epub file to epub_name (Option[str]): The file name of your epub. This should not contain .epub at the end. If this argument is not provided, defaults to the title of the epub.
def create_epub(self, output_directory, epub_name=None): def createTOCs_and_ContentOPF(): for epub_file, name in ((self.toc_html, 'toc.html'), (self.toc_ncx, 'toc.ncx'), (self.opf, 'content.opf'),): epub_file.add_chapters(self.chapters) epub_file.write(os.path.join(self.OEBPS_DIR, name)) def create_zip_archive(epub_name): try: assert isinstance(epub_name, basestring) or epub_name is None except AssertionError: raise TypeError('epub_name must be string or None') if epub_name is None: epub_name = self.title epub_name = ''.join([c for c in epub_name if c.isalpha() or c.isdigit() or c == ' ']).rstrip() epub_name_with_path = os.path.join(output_directory, epub_name) try: os.remove(os.path.join(epub_name_with_path, '.zip')) except OSError: pass shutil.make_archive(epub_name_with_path, 'zip', self.EPUB_DIR) return epub_name_with_path + '.zip' def turn_zip_into_epub(zip_archive): epub_full_name = zip_archive.strip('.zip') + '.epub' try: os.remove(epub_full_name) except OSError: pass os.rename(zip_archive, epub_full_name) return epub_full_name createTOCs_and_ContentOPF() epub_path = turn_zip_into_epub(create_zip_archive(epub_name)) return epub_path
746,131
Saves an online image from image_url to image_directory with the name image_name. Returns the extension of the image saved, which is determined dynamically. Args: image_url (str): The url of the image. image_directory (str): The directory to save the image in. image_name (str): The file name to save the image as. Raises: ImageErrorException: Raised if unable to save the image at image_url
def save_image(image_url, image_directory, image_name): image_type = get_image_type(image_url) if image_type is None: raise ImageErrorException(image_url) full_image_file_name = os.path.join(image_directory, image_name + '.' + image_type) # If the image is present on the local filesystem just copy it if os.path.exists(image_url): shutil.copy(image_url, full_image_file_name) return image_type try: # urllib.urlretrieve(image_url, full_image_file_name) with open(full_image_file_name, 'wb') as f: user_agent = r'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:31.0) Gecko/20100101 Firefox/31.0' request_headers = {'User-Agent': user_agent} requests_object = requests.get(image_url, headers=request_headers) try: content = requests_object.content # Check for empty response f.write(content) except AttributeError: raise ImageErrorException(image_url) except IOError: raise ImageErrorException(image_url) return image_type
746,133
Writes the chapter object to an xhtml file. Args: file_name (str): The full name of the xhtml file to save to.
def write(self, file_name): try: assert file_name[-6:] == '.xhtml' except (AssertionError, IndexError): raise ValueError('filename must end with .xhtml') with open(file_name, 'wb') as f: f.write(self.content.encode('utf-8'))
746,136
Creates full html tree from a fragment. Assumes that tag should be wrapped in a body and is currently not Args: tag: a bs4.element.Tag Returns:" bs4.element.Tag: A bs4 tag representing a full html document
def create_html_from_fragment(tag): try: assert isinstance(tag, bs4.element.Tag) except AssertionError: raise TypeError try: assert tag.find_all('body') == [] except AssertionError: raise ValueError soup = BeautifulSoup('<html><head></head><body></body></html>', 'html.parser') soup.body.append(tag) return soup
746,144
Trims leadings and trailing whitespace between tags in an html document Args: input_string: A (possible unicode) string representing HTML. Returns: A (possibly unicode) string representing HTML. Raises: TypeError: Raised if input_string isn't a unicode string or string.
def condense(input_string): try: assert isinstance(input_string, basestring) except AssertionError: raise TypeError removed_leading_whitespace = re.sub('>\s+', '>', input_string).strip() removed_trailing_whitespace = re.sub('\s+<', '<', removed_leading_whitespace).strip() return removed_trailing_whitespace
746,146
Converts html to xhtml Args: html_unicode_string: A (possible unicode) string representing HTML. Returns: A (possibly unicode) string representing XHTML. Raises: TypeError: Raised if input_string isn't a unicode string or string.
def html_to_xhtml(html_unicode_string): try: assert isinstance(html_unicode_string, basestring) except AssertionError: raise TypeError root = BeautifulSoup(html_unicode_string, 'html.parser') # Confirm root node is html try: assert root.html is not None except AssertionError: raise ValueError(''.join(['html_unicode_string cannot be a fragment.', 'string is the following: %s', unicode(root)])) # Add xmlns attribute to html node root.html['xmlns'] = 'http://www.w3.org/1999/xhtml' unicode_string = unicode(root.prettify(encoding='utf-8', formatter='html'), encoding='utf-8') # Close singleton tag_dictionary for tag in constants.SINGLETON_TAG_LIST: unicode_string = unicode_string.replace( '<' + tag + '/>', '<' + tag + ' />') return unicode_string
746,147
Ask a user for a int input between two values args: message (str): Prompt for user low (int): Low value, user entered value must be > this value to be accepted high (int): High value, user entered value must be < this value to be accepted show_range (boolean, Default True): Print hint to user the range returns: int_in (int): Input integer
def int_input(message, low, high, show_range = True): int_in = low - 1 while (int_in < low) or (int_in > high): if show_range: suffix = ' (integer between ' + str(low) + ' and ' + str(high) + ')' else: suffix = '' inp = input('Enter a ' + message + suffix + ': ') if re.match('^-?[0-9]+$', inp) is not None: int_in = int(inp) else: print(colored('Must be an integer, try again!', 'red')) return int_in
746,537
Ask a user for a float input between two values args: message (str): Prompt for user low (float): Low value, user entered value must be > this value to be accepted high (float): High value, user entered value must be < this value to be accepted returns: float_in (int): Input float
def float_input(message, low, high): float_in = low - 1.0 while (float_in < low) or (float_in > high): inp = input('Enter a ' + message + ' (float between ' + str(low) + ' and ' + str(high) + '): ') if re.match('^([0-9]*[.])?[0-9]+$', inp) is not None: float_in = float(inp) else: print(colored('Must be a float, try again!', 'red')) return float_in
746,538