code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def is_byte_range_valid(start, stop, length): """Checks if a given byte content range is valid for the given length. .. versionadded:: 0.7 """ if (start is None) != (stop is None): return False elif start is None: return length is None or length >= 0 elif length is None: return 0 <= start < stop elif start >= stop: return False return 0 <= start < length
Checks if a given byte content range is valid for the given length. .. versionadded:: 0.7
def _build_specs(self, specs, kwargs, fp_precision): """ Returns the specs, the remaining kwargs and whether or not the constructor was called with kwarg or explicit specs. """ if specs is None: overrides = param.ParamOverrides(self, kwargs, allow_extra_keywords=True) extra_kwargs = overrides.extra_keywords() kwargs = dict([(k,v) for (k,v) in kwargs.items() if k not in extra_kwargs]) rounded_specs = list(self.round_floats([extra_kwargs], fp_precision)) if extra_kwargs=={}: return [], kwargs, True else: return rounded_specs, kwargs, False return list(self.round_floats(specs, fp_precision)), kwargs, True
Returns the specs, the remaining kwargs and whether or not the constructor was called with kwarg or explicit specs.
def process_tags(self, user, msg, reply, st=[], bst=[], depth=0, ignore_object_errors=True): """Post process tags in a message. :param str user: The user ID. :param str msg: The user's formatted message. :param str reply: The raw RiveScript reply for the message. :param []str st: The array of ``<star>`` matches from the trigger. :param []str bst: The array of ``<botstar>`` matches from a ``%Previous`` command. :param int depth: The recursion depth counter. :param bool ignore_object_errors: Whether to ignore errors in Python object macros instead of raising an ``ObjectError`` exception. :return str: The final reply after tags have been processed. """ stars = [''] stars.extend(st) botstars = [''] botstars.extend(bst) if len(stars) == 1: stars.append("undefined") if len(botstars) == 1: botstars.append("undefined") matcher = re.findall(RE.reply_array, reply) for match in matcher: name = match if name in self.master._array: result = "{random}" + "|".join(self.master._array[name]) + "{/random}" else: result = "\x00@" + name + "\x00" reply = reply.replace("(@"+name+")", result) reply = re.sub(RE.ph_array, r'(@\1)', reply) # Tag shortcuts. reply = reply.replace('<person>', '{person}<star>{/person}') reply = reply.replace('<@>', '{@<star>}') reply = reply.replace('<formal>', '{formal}<star>{/formal}') reply = reply.replace('<sentence>', '{sentence}<star>{/sentence}') reply = reply.replace('<uppercase>', '{uppercase}<star>{/uppercase}') reply = reply.replace('<lowercase>', '{lowercase}<star>{/lowercase}') # Weight and <star> tags. reply = re.sub(RE.weight, '', reply) # Leftover {weight}s if len(stars) > 0: reply = reply.replace('<star>', text_type(stars[1])) reStars = re.findall(RE.star_tags, reply) for match in reStars: if int(match) < len(stars): reply = reply.replace('<star{match}>'.format(match=match), text_type(stars[int(match)])) if len(botstars) > 0: reply = reply.replace('<botstar>', botstars[1]) reStars = re.findall(RE.botstars, reply) for match in reStars: if int(match) < len(botstars): reply = reply.replace('<botstar{match}>'.format(match=match), text_type(botstars[int(match)])) # <input> and <reply> history = self.master.get_uservar(user, "__history__") if type(history) is not dict: history = self.default_history() reply = reply.replace('<input>', history['input'][0]) reply = reply.replace('<reply>', history['reply'][0]) reInput = re.findall(RE.input_tags, reply) for match in reInput: reply = reply.replace('<input{match}>'.format(match=match), history['input'][int(match) - 1]) reReply = re.findall(RE.reply_tags, reply) for match in reReply: reply = reply.replace('<reply{match}>'.format(match=match), history['reply'][int(match) - 1]) # <id> and escape codes. reply = reply.replace('<id>', user) reply = reply.replace('\\s', ' ') reply = reply.replace('\\n', "\n") reply = reply.replace('\\#', '#') # Random bits. reRandom = re.findall(RE.random_tags, reply) for match in reRandom: output = '' if '|' in match: output = utils.random_choice(match.split('|')) else: output = utils.random_choice(match.split(' ')) reply = reply.replace('{{random}}{match}{{/random}}'.format(match=match), output, 1) # Replace 1st match # Person Substitutions and String Formatting. for item in ['person', 'formal', 'sentence', 'uppercase', 'lowercase']: matcher = re.findall(r'\{' + item + r'\}(.+?)\{/' + item + r'\}', reply) for match in matcher: output = None if item == 'person': # Person substitutions. output = self.substitute(match, "person") else: output = utils.string_format(match, item) reply = reply.replace('{{{item}}}{match}{{/{item}}}'.format(item=item, match=match), output) # Handle all variable-related tags with an iterative regex approach, # to allow for nesting of tags in arbitrary ways (think <set a=<get b>>) # Dummy out the <call> tags first, because we don't handle them right # here. reply = reply.replace("<call>", "{__call__}") reply = reply.replace("</call>", "{/__call__}") while True: # This regex will match a <tag> which contains no other tag inside # it, i.e. in the case of <set a=<get b>> it will match <get b> but # not the <set> tag, on the first pass. The second pass will get the # <set> tag, and so on. match = re.search(RE.tag_search, reply) if not match: break # No remaining tags! match = match.group(1) parts = match.split(" ", 1) tag = parts[0].lower() data = parts[1] if len(parts) > 1 else "" insert = "" # Result of the tag evaluation # Handle the tags. if tag == "bot" or tag == "env": # <bot> and <env> tags are similar. target = self.master._var if tag == "bot" else self.master._global if "=" in data: # Setting a bot/env variable. parts = data.split("=") self.say("Set " + tag + " variable " + text_type(parts[0]) + "=" + text_type(parts[1])) target[parts[0]] = parts[1] else: # Getting a bot/env variable. insert = target.get(data, "undefined") elif tag == "set": # <set> user vars. parts = data.split("=") self.say("Set uservar " + text_type(parts[0]) + "=" + text_type(parts[1])) self.master.set_uservar(user, parts[0], parts[1]) elif tag in ["add", "sub", "mult", "div"]: # Math operator tags. parts = data.split("=") var = parts[0] value = parts[1] curv = self.master.get_uservar(user, var) # Sanity check the value. try: value = int(value) if curv in [None, "undefined"]: # Initialize it. curv = 0 except: insert = "[ERR: Math can't '{}' non-numeric value '{}']".format(tag, value) # Attempt the operation. try: orig = int(curv) new = 0 if tag == "add": new = orig + value elif tag == "sub": new = orig - value elif tag == "mult": new = orig * value elif tag == "div": new = orig // value self.master.set_uservar(user, var, new) except: insert = "[ERR: Math couldn't '{}' to value '{}']".format(tag, curv) elif tag == "get": insert = self.master.get_uservar(user, data) else: # Unrecognized tag. insert = "\x00{}\x01".format(match) reply = reply.replace("<{}>".format(match), text_type(insert)) # Restore unrecognized tags. reply = reply.replace("\x00", "<").replace("\x01", ">") # Streaming code. DEPRECATED! if '{!' in reply: self._warn("Use of the {!...} tag is deprecated and not supported here.") # Topic setter. reTopic = re.findall(RE.topic_tag, reply) for match in reTopic: self.say("Setting user's topic to " + match) self.master.set_uservar(user, "topic", match) reply = reply.replace('{{topic={match}}}'.format(match=match), '') # Inline redirecter. reRedir = re.findall(RE.redir_tag, reply) for match in reRedir: self.say("Redirect to " + match) at = match.strip() subreply = self._getreply(user, at, step=(depth + 1)) reply = reply.replace('{{@{match}}}'.format(match=match), subreply) # Object caller. reply = reply.replace("{__call__}", "<call>") reply = reply.replace("{/__call__}", "</call>") reCall = re.findall(r'<call>(.+?)</call>', reply) for match in reCall: parts = re.split(RE.ws, match) output = '' obj = parts[0] args = [] if len(parts) > 1: args = parts[1:] # Do we know this object? if obj in self.master._objlangs: # We do, but do we have a handler for that language? lang = self.master._objlangs[obj] if lang in self.master._handlers: # We do. try: output = self.master._handlers[lang].call(self.master, obj, user, args) except python.PythonObjectError as e: self.warn(str(e)) if not ignore_object_errors: raise ObjectError(str(e)) output = RS_ERR_OBJECT else: if not ignore_object_errors: raise ObjectError(RS_ERR_OBJECT_HANDLER) output = RS_ERR_OBJECT_HANDLER else: if not ignore_object_errors: raise ObjectError(RS_ERR_OBJECT_MISSING) output = RS_ERR_OBJECT_MISSING reply = reply.replace('<call>{match}</call>'.format(match=match), output) return reply
Post process tags in a message. :param str user: The user ID. :param str msg: The user's formatted message. :param str reply: The raw RiveScript reply for the message. :param []str st: The array of ``<star>`` matches from the trigger. :param []str bst: The array of ``<botstar>`` matches from a ``%Previous`` command. :param int depth: The recursion depth counter. :param bool ignore_object_errors: Whether to ignore errors in Python object macros instead of raising an ``ObjectError`` exception. :return str: The final reply after tags have been processed.
def run(self, module, post_check): ''' Execute the configured source code in a module and run any post checks. Args: module (Module) : a module to execute the configured code in. post_check(callable) : a function that can raise an exception if expected post-conditions are not met after code execution. ''' try: # Simulate the sys.path behaviour decribed here: # # https://docs.python.org/2/library/sys.html#sys.path _cwd = os.getcwd() _sys_path = list(sys.path) _sys_argv = list(sys.argv) sys.path.insert(0, os.path.dirname(self._path)) sys.argv = [os.path.basename(self._path)] + self._argv exec(self._code, module.__dict__) post_check() except Exception as e: self._failed = True self._error_detail = traceback.format_exc() _exc_type, _exc_value, exc_traceback = sys.exc_info() filename, line_number, func, txt = traceback.extract_tb(exc_traceback)[-1] self._error = "%s\nFile \"%s\", line %d, in %s:\n%s" % (str(e), os.path.basename(filename), line_number, func, txt) finally: # undo sys.path, CWD fixups os.chdir(_cwd) sys.path = _sys_path sys.argv = _sys_argv self.ran = True
Execute the configured source code in a module and run any post checks. Args: module (Module) : a module to execute the configured code in. post_check(callable) : a function that can raise an exception if expected post-conditions are not met after code execution.
def merge_extras(items, config): """Merge extra disambiguated reads into a final BAM file. """ final = {} for extra_name in items[0]["disambiguate"].keys(): in_files = [] for data in items: in_files.append(data["disambiguate"][extra_name]) out_file = "%s-allmerged%s" % os.path.splitext(in_files[0]) if in_files[0].endswith(".bam"): merged_file = merge.merge_bam_files(in_files, os.path.dirname(out_file), items[0], out_file=out_file) else: assert extra_name == "summary", extra_name merged_file = _merge_summary(in_files, out_file, items[0]) final[extra_name] = merged_file out = [] for data in items: data["disambiguate"] = final out.append([data]) return out
Merge extra disambiguated reads into a final BAM file.
def _get_raw_xsrf_token(self) -> Tuple[Optional[int], bytes, float]: """Read or generate the xsrf token in its raw form. The raw_xsrf_token is a tuple containing: * version: the version of the cookie from which this token was read, or None if we generated a new token in this request. * token: the raw token data; random (non-ascii) bytes. * timestamp: the time this token was generated (will not be accurate for version 1 cookies) """ if not hasattr(self, "_raw_xsrf_token"): cookie = self.get_cookie("_xsrf") if cookie: version, token, timestamp = self._decode_xsrf_token(cookie) else: version, token, timestamp = None, None, None if token is None: version = None token = os.urandom(16) timestamp = time.time() assert token is not None assert timestamp is not None self._raw_xsrf_token = (version, token, timestamp) return self._raw_xsrf_token
Read or generate the xsrf token in its raw form. The raw_xsrf_token is a tuple containing: * version: the version of the cookie from which this token was read, or None if we generated a new token in this request. * token: the raw token data; random (non-ascii) bytes. * timestamp: the time this token was generated (will not be accurate for version 1 cookies)
def group_add(self, name='Ungrouped'): """ Dynamically add a group instance to the system if not exist. Parameters ---------- name : str, optional ('Ungrouped' as default) Name of the group Returns ------- None """ if not hasattr(self, name): self.__dict__[name] = Group(self, name) self.loaded_groups.append(name)
Dynamically add a group instance to the system if not exist. Parameters ---------- name : str, optional ('Ungrouped' as default) Name of the group Returns ------- None
def to_json(data, filename='data.json', indent=4): """ Write an object to a json file :param data: The object :param filename: The name of the file :param indent: The indentation of the file :return: None """ with open(filename, 'w') as f: f.write(json.dumps(data, indent=indent))
Write an object to a json file :param data: The object :param filename: The name of the file :param indent: The indentation of the file :return: None
def send_once(remote, codes, count=None, device=None, address=None): """ All parameters are passed to irsend. See the man page for irsend for details about their usage. Parameters ---------- remote: str codes: [str] count: int device: str address: str Notes ----- No attempt is made to catch or handle errors. See the documentation for subprocess.check_output to see the types of exceptions it may raise. """ args = ['send_once', remote] + codes _call(args, count, device, address)
All parameters are passed to irsend. See the man page for irsend for details about their usage. Parameters ---------- remote: str codes: [str] count: int device: str address: str Notes ----- No attempt is made to catch or handle errors. See the documentation for subprocess.check_output to see the types of exceptions it may raise.
def POST(self): """ The HTTP POST body parsed into a MultiDict. This supports urlencoded and multipart POST requests. Multipart is commonly used for file uploads and may result in some of the values beeing cgi.FieldStorage objects instead of strings. Multiple values per key are possible. See MultiDict for details. """ if self._POST is None: save_env = dict() # Build a save environment for cgi for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'): if key in self.environ: save_env[key] = self.environ[key] save_env['QUERY_STRING'] = '' # Without this, sys.argv is called! if TextIOWrapper: fb = TextIOWrapper(self.body, encoding='ISO-8859-1') else: fb = self.body data = cgi.FieldStorage(fp=fb, environ=save_env) self._POST = MultiDict() for item in data.list: self._POST[item.name] = item if item.filename else item.value return self._POST
The HTTP POST body parsed into a MultiDict. This supports urlencoded and multipart POST requests. Multipart is commonly used for file uploads and may result in some of the values beeing cgi.FieldStorage objects instead of strings. Multiple values per key are possible. See MultiDict for details.
def crossdomain(f): """This decorator sets the rules for the crossdomain request per http method. The settings are taken from the actual resource itself, and returned as per the CORS spec. All CORS requests are rejected if the resource's `allow_methods` doesn't include the 'OPTIONS' method. """ @wraps(f) def decorator(self, *args, **kwargs): # TODO: if a non-cors request has the origin header, this will fail if not self.cors_enabled and 'origin' in request.headers: return self._make_response(405, "CORS request rejected") resp = f(self, *args, **kwargs) h = resp.headers current_app.logger.debug("Request Headers: {}".format(request.headers)) allowed_methods = self.cors_config['methods'] + ["OPTIONS"] h['Access-Control-Allow-Methods'] = ", ".join(allowed_methods) h['Access-Control-Max-Age'] = self.cors_config.get('max_age', 21600) # Request Origin checks hostname = urlparse(request.headers['origin']).netloc \ if 'origin' in request.headers else request.headers['host'] if hostname in self.cors_config.get('blacklist', []): return self._make_response(405, "CORS request blacklisted") if self.cors_config.get('allowed', None) is not None and \ hostname not in self.cors_config.get('allowed', None): return self._make_response(405, "CORS request refused") if 'origin' in request.headers: h['Access-Control-Allow-Origin'] = request.headers['origin'] # Request header checks if 'access-control-request-headers' in request.headers: if self.cors_config.get('headers', None) is None: allowed_headers = \ request.headers.get('access-control-request-headers', "*") else: allowed_headers = [] for k in request.headers.get( 'access-control-request-headers', []): if k in self.cors_config.get('headers', []): allowed_headers.append(k) allowed_headers = " ,".join(allowed_headers) h['Access-Control-Allow-Headers'] = allowed_headers return resp return decorator
This decorator sets the rules for the crossdomain request per http method. The settings are taken from the actual resource itself, and returned as per the CORS spec. All CORS requests are rejected if the resource's `allow_methods` doesn't include the 'OPTIONS' method.
def _expand_variable_match(positional_vars, named_vars, match): """Expand a matched variable with its value. Args: positional_vars (list): A list of positonal variables. This list will be modified. named_vars (dict): A dictionary of named variables. match (re.Match): A regular expression match. Returns: str: The expanded variable to replace the match. Raises: ValueError: If a positional or named variable is required by the template but not specified or if an unexpected template expression is encountered. """ positional = match.group("positional") name = match.group("name") if name is not None: try: return six.text_type(named_vars[name]) except KeyError: raise ValueError( "Named variable '{}' not specified and needed by template " "`{}` at position {}".format(name, match.string, match.start()) ) elif positional is not None: try: return six.text_type(positional_vars.pop(0)) except IndexError: raise ValueError( "Positional variable not specified and needed by template " "`{}` at position {}".format(match.string, match.start()) ) else: raise ValueError("Unknown template expression {}".format(match.group(0)))
Expand a matched variable with its value. Args: positional_vars (list): A list of positonal variables. This list will be modified. named_vars (dict): A dictionary of named variables. match (re.Match): A regular expression match. Returns: str: The expanded variable to replace the match. Raises: ValueError: If a positional or named variable is required by the template but not specified or if an unexpected template expression is encountered.
def hash_str(data, hasher=None): """Checksum hash a string.""" hasher = hasher or hashlib.sha1() hasher.update(data) return hasher
Checksum hash a string.
def assign_objective_requisite(self, objective_id=None, requisite_objective_id=None): """Creates a requirement dependency between two Objectives. arg: objective_id (osid.id.Id): the Id of the dependent Objective arg: requisite_objective_id (osid.id.Id): the Id of the required Objective raise: AlreadyExists - objective_id already mapped to requisite_objective_id raise: NotFound - objective_id or requisite_objective_id not found raise: NullArgument - objective_id or requisite_objective_id is null raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure compliance: mandatory - This method must be implemented. """ if objective_id is None or requisite_objective_id is None: raise NullArgument() ors = ObjectiveRequisiteSession(self._objective_bank_id, runtime=self._runtime) ids_arg = {'ids': []} for objective in ors.get_requisite_objectives(objective_id): if objective.get_id() == requisite_objective_id: raise AlreadyExists() ids_arg['ids'].append(str(objective.get_id())) ids_arg['ids'].append(str(requisite_objective_id)) url_path = construct_url('requisiteids', bank_id=self._catalog_idstr, obj_id=objective_id) try: result = self._put_request(url_path, ids_arg) except Exception: raise id_list = list() for identifier in result['ids']: id_list.append(Id(idstr=identifier)) return id_objects.IdList(id_list)
Creates a requirement dependency between two Objectives. arg: objective_id (osid.id.Id): the Id of the dependent Objective arg: requisite_objective_id (osid.id.Id): the Id of the required Objective raise: AlreadyExists - objective_id already mapped to requisite_objective_id raise: NotFound - objective_id or requisite_objective_id not found raise: NullArgument - objective_id or requisite_objective_id is null raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure compliance: mandatory - This method must be implemented.
def sunion(self, keys, *args): """Emulate sunion.""" func = lambda left, right: left.union(right) return self._apply_to_sets(func, "SUNION", keys, *args)
Emulate sunion.
def verify_submit(self, job_ids, timeout, delay, **kwargs): """Verifies that the results were successfully submitted.""" if self.skip: return False jobs = self.wait_for_jobs(job_ids, timeout, delay) self.get_logs(jobs, log_file=kwargs.get("log_file")) return self._check_outcome(jobs)
Verifies that the results were successfully submitted.
def key_size(self): """*new in 0.4.1* The size pertaining to this key. ``int`` for non-EC key algorithms; :py:obj:`constants.EllipticCurveOID` for EC keys. """ if self.key_algorithm in {PubKeyAlgorithm.ECDSA, PubKeyAlgorithm.ECDH}: return self._key.keymaterial.oid return next(iter(self._key.keymaterial)).bit_length()
*new in 0.4.1* The size pertaining to this key. ``int`` for non-EC key algorithms; :py:obj:`constants.EllipticCurveOID` for EC keys.
def run_toy_DistilledSGLD(gpu_id): """Run DistilledSGLD on toy dataset""" X, Y, X_test, Y_test = load_toy() minibatch_size = 1 teacher_noise_precision = 1.0 teacher_net = get_toy_sym(True, teacher_noise_precision) student_net = get_toy_sym(False) data_shape = (minibatch_size,) + X.shape[1::] teacher_data_inputs = {'data': nd.zeros(data_shape, ctx=dev(gpu_id)), 'teacher_output_label': nd.zeros((minibatch_size, 1), ctx=dev(gpu_id))} student_data_inputs = {'data': nd.zeros(data_shape, ctx=dev(gpu_id))} teacher_initializer = mx.init.Uniform(0.07) student_initializer = mx.init.Uniform(0.07) student_grad_f = lambda student_outputs, teacher_pred: \ regression_student_grad(student_outputs, teacher_pred, teacher_noise_precision) student_exe, student_params, _ = \ DistilledSGLD(teacher_sym=teacher_net, student_sym=student_net, teacher_data_inputs=teacher_data_inputs, student_data_inputs=student_data_inputs, X=X, Y=Y, X_test=X_test, Y_test=Y_test, total_iter_num=80000, teacher_initializer=teacher_initializer, student_initializer=student_initializer, teacher_learning_rate=1E-4, student_learning_rate=0.01, # teacher_lr_scheduler=mx.lr_scheduler.FactorScheduler(100000, 0.5), student_lr_scheduler=mx.lr_scheduler.FactorScheduler(8000, 0.8), student_grad_f=student_grad_f, teacher_prior_precision=0.1, student_prior_precision=0.001, perturb_deviation=0.1, minibatch_size=minibatch_size, task='regression', dev=dev(gpu_id))
Run DistilledSGLD on toy dataset
def input_fields(self, preamble, *args): """Get a set of fields from the user. Optionally a preamble may be shown to the user secribing the fields to return. The fields are specified as the remaining arguments with each field being a a list with the following entries: - a programmer-visible name for the field - a string prompt to show to the user - one of the following values: - string: return a string from the user - password: return a string from the user but do not echo the input to the screen - boolean: return a boolean value from the user - integer: return an integer value from the user - the default value (optional) Fields are requested from the user in the order specified. Fields are returned in a dictionary with the field names being the keys and the values being the items. """ self.new_section() if preamble is not None: self.message(preamble) if any([True for x in args if len(x) > 3]): self.message(""" Some questions have default answers which can be selected by pressing 'Enter' at the prompt.""") output_dict = { } for field in args: (field_name, prompt, field_type) = field[:3] default = None if len(field) > 3: default = field[3] if field_type == 'string': output_dict[field_name] = self.input(prompt, default = default) elif field_type == 'password': output_dict[field_name] = self.input(prompt, no_echo=True) elif field_type == 'boolean': output_dict[field_name] = self.input_boolean(prompt, default = default) elif field_type == 'integer': output_dict[field_name] = self.input_integer(prompt, default = default) return output_dict
Get a set of fields from the user. Optionally a preamble may be shown to the user secribing the fields to return. The fields are specified as the remaining arguments with each field being a a list with the following entries: - a programmer-visible name for the field - a string prompt to show to the user - one of the following values: - string: return a string from the user - password: return a string from the user but do not echo the input to the screen - boolean: return a boolean value from the user - integer: return an integer value from the user - the default value (optional) Fields are requested from the user in the order specified. Fields are returned in a dictionary with the field names being the keys and the values being the items.
def _extract_authors(pub, idx, _root): """ Create a concatenated string of author names. Separate names with semi-colons. :param any pub: Publication author structure is ambiguous :param int idx: Index number of Pub """ logger_ts.info("enter extract_authors") try: # DOI Author data. We'd prefer to have this first. names = pub['author'] except KeyError as e: try: # Manually entered author data. This is second best. names = pub['authors'] except KeyError as e: # Couldn't find any author data. Skip it altogether. names = False logger_ts.info("extract_authors: KeyError: author data not provided, {}".format(e)) # If there is author data, find out what type it is if names: # Build author names onto empty string auth = '' # Is it a list of dicts or a list of strings? Could be either # Authors: Stored as a list of dictionaries or list of strings if isinstance(names, list): for name in names: if isinstance(name, str): auth += name + ';' elif isinstance(name, dict): for k, v in name.items(): auth += v + ';' elif isinstance(names, str): auth = names # Enter finished author string into target _root['pub' + str(idx + 1) + '_author'] = auth[:-1] return _root
Create a concatenated string of author names. Separate names with semi-colons. :param any pub: Publication author structure is ambiguous :param int idx: Index number of Pub
def create_queue_wrapper(name, queue_size, fed_arrays, data_sources, *args, **kwargs): """ Arguments name: string Name of the queue queue_size: integer Size of the queue fed_arrays: list array names that will be fed by this queue data_sources: dict (lambda/method, dtype) tuples, keyed on array names """ qtype = SingleInputMultiQueueWrapper if 'count' in kwargs else QueueWrapper return qtype(name, queue_size, fed_arrays, data_sources, *args, **kwargs)
Arguments name: string Name of the queue queue_size: integer Size of the queue fed_arrays: list array names that will be fed by this queue data_sources: dict (lambda/method, dtype) tuples, keyed on array names
def read_playlists(self): self.playlists = [] self.selected_playlist = -1 files = glob.glob(path.join(self.stations_dir, '*.csv')) if len(files) == 0: return 0, -1 else: for a_file in files: a_file_name = ''.join(path.basename(a_file).split('.')[:-1]) a_file_size = self._bytes_to_human(path.getsize(a_file)) a_file_time = ctime(path.getmtime(a_file)) self.playlists.append([a_file_name, a_file_time, a_file_size, a_file]) self.playlists.sort() """ get already loaded playlist id """ for i, a_playlist in enumerate(self.playlists): if a_playlist[-1] == self.stations_file: self.selected_playlist = i break return len(self.playlists), self.selected_playlist
get already loaded playlist id
def negative_directional_index(close_data, high_data, low_data, period): """ Negative Directional Index (-DI). Formula: -DI = 100 * SMMA(-DM) / ATR """ catch_errors.check_for_input_len_diff(close_data, high_data, low_data) ndi = (100 * smma(negative_directional_movement(high_data, low_data), period) / atr(close_data, period) ) return ndi
Negative Directional Index (-DI). Formula: -DI = 100 * SMMA(-DM) / ATR
def exec_container_commands(self, action, c_name, **kwargs): """ Runs all configured commands of a container configuration inside the container instance. :param action: Action configuration. :type action: dockermap.map.runner.ActionConfig :param c_name: Container name. :type c_name: unicode | str :return: List of exec command return values (e.g. containing the command id), if applicable, or ``None`` if either no commands have been run or no values have been returned from the API. :rtype: list[dict] | NoneType """ config_cmds = action.config.exec_commands if not config_cmds: return None return self.exec_commands(action, c_name, run_cmds=config_cmds)
Runs all configured commands of a container configuration inside the container instance. :param action: Action configuration. :type action: dockermap.map.runner.ActionConfig :param c_name: Container name. :type c_name: unicode | str :return: List of exec command return values (e.g. containing the command id), if applicable, or ``None`` if either no commands have been run or no values have been returned from the API. :rtype: list[dict] | NoneType
def get_composite_keywords(ckw_db, fulltext, skw_spans): """Return a list of composite keywords bound with number of occurrences. :param ckw_db: list of KewordToken objects (they are supposed to be composite ones) :param fulltext: string to search in :param skw_spans: dictionary of already identified single keywords :return : dictionary of matches in a format { <keyword object>, [[position, position...], [info_about_matches] ], .. } """ timer_start = time.clock() # Build the list of composite candidates ckw_out = {} skw_as_components = [] for composite_keyword in ckw_db.values(): # Counters for the composite keyword. First count is for the # number of occurrences in the whole document and second count # is for the human defined keywords. ckw_count = 0 matched_spans = [] # First search in the fulltext using the regex pattern of the whole # composite keyword (including the alternative labels) for regex in composite_keyword.regex: for match in regex.finditer(fulltext): span = list(match.span()) span[1] -= 1 span = tuple(span) if span not in matched_spans: ckw_count += 1 matched_spans.append(span) # Get the single keywords locations. try: components = composite_keyword.compositeof except AttributeError: current_app.logger.error( "Cached ontology is corrupted. Please " "remove the cached ontology in your temporary file." ) raise OntologyError('Cached ontology is corrupted.') spans = [] try: spans = [skw_spans[component][0] for component in components] except KeyError: # Some of the keyword components are not to be found in the text. # Therefore we cannot continue because the match is incomplete. pass ckw_spans = [] for index in range(len(spans) - 1): len_ckw = len(ckw_spans) if ckw_spans: # cause ckw_spans include the previous previous_spans = ckw_spans else: previous_spans = spans[index] for new_span in [(span0, colmd1) for span0 in previous_spans for colmd1 in spans[index + 1]]: span = _get_ckw_span(fulltext, new_span) if span is not None: ckw_spans.append(span) # the spans must be overlapping to be included if index > 0 and ckw_spans: _ckw_spans = [] for _span in ckw_spans[len_ckw:]: # new spans for _colmd2 in ckw_spans[:len_ckw]: s = _span_overlapping(_span, _colmd2) if s: _ckw_spans.append(s) ckw_spans = _ckw_spans for matched_span in [mspan for mspan in ckw_spans if mspan not in matched_spans]: ckw_count += 1 matched_spans.append(matched_span) if ckw_count: # Gather the component counts. component_counts = [] for component in components: skw_as_components.append(component) # Get the single keyword count. try: component_counts.append(len(skw_spans[component][0])) except KeyError: component_counts.append(0) # Store the composite keyword ckw_out[composite_keyword] = [matched_spans, component_counts] # Remove the single keywords that appear as components from the list # of single keywords. for skw in skw_as_components: try: del skw_spans[skw] except KeyError: pass # Remove the composite keywords that are fully present in # longer composite keywords _ckw_base = filter(lambda x: len(x.compositeof) == 2, ckw_out.keys()) _ckw_extended = sorted( filter(lambda x: len(x.compositeof) > 2, ckw_out.keys()), key=lambda x: len(x.compositeof)) if _ckw_extended: candidates = [] for kw1 in _ckw_base: s1 = set(kw1.compositeof) for kw2 in _ckw_extended: s2 = set(kw2.compositeof) if s1.issubset(s2): candidates.append((kw1, kw2)) # break # don't stop because this keyword may be # partly contained by kw_x and kw_y for i in range(len(_ckw_extended)): kw1 = _ckw_extended[i] s1 = set(kw1.compositeof) for ii in range(i + 1, len(_ckw_extended)): kw2 = _ckw_extended[ii] s2 = set(kw2.compositeof) if s1.issubset(s2): candidates.append((kw1, kw2)) break if candidates: for kw1, kw2 in candidates: try: match1 = ckw_out[kw1] # subset of the kw2 match2 = ckw_out[kw2] except KeyError: continue positions1 = match1[0] for pos1 in positions1: for pos2 in match2[0]: if _span_overlapping(pos1, pos2): del positions1[positions1.index(pos1)] # if we removed all the matches also # delete the keyword if len(positions1) == 0: del ckw_out[kw1] break current_app.logger.info( "Matching composite keywords... %d keywords found " "in %.1f sec." % (len(ckw_out), time.clock() - timer_start), ) return ckw_out
Return a list of composite keywords bound with number of occurrences. :param ckw_db: list of KewordToken objects (they are supposed to be composite ones) :param fulltext: string to search in :param skw_spans: dictionary of already identified single keywords :return : dictionary of matches in a format { <keyword object>, [[position, position...], [info_about_matches] ], .. }
def make_prototype_request(*args, **kwargs): """Make a prototype Request for a Matcher.""" if args and inspect.isclass(args[0]) and issubclass(args[0], Request): request_cls, arg_list = args[0], args[1:] return request_cls(*arg_list, **kwargs) if args and isinstance(args[0], Request): if args[1:] or kwargs: raise_args_err("can't interpret args") return args[0] # Match any opcode. return Request(*args, **kwargs)
Make a prototype Request for a Matcher.
def get_autype_list(self, code_list): """ 获取给定股票列表的复权因子 :param code_list: 股票列表,例如['HK.00700'] :return: (ret, data) ret == RET_OK 返回pd dataframe数据,data.DataFrame数据, 数据列格式如下 ret != RET_OK 返回错误字符串 ===================== =========== ================================================================================= 参数 类型 说明 ===================== =========== ================================================================================= code str 股票代码 ex_div_date str 除权除息日 split_ratio float 拆合股比例(该字段为比例字段,默认不展示%),例如,对于5股合1股为1/5,对于1股拆5股为5/1 per_cash_div float 每股派现 per_share_div_ratio float 每股送股比例(该字段为比例字段,默认不展示%) per_share_trans_ratio float 每股转增股比例(该字段为比例字段,默认不展示%) allotment_ratio float 每股配股比例(该字段为比例字段,默认不展示%) allotment_price float 配股价 stk_spo_ratio float 增发比例(该字段为比例字段,默认不展示%) stk_spo_price float 增发价格 forward_adj_factorA float 前复权因子A forward_adj_factorB float 前复权因子B backward_adj_factorA float 后复权因子A backward_adj_factorB float 后复权因子B ===================== =========== ================================================================================= """ code_list = unique_and_normalize_list(code_list) for code in code_list: if code is None or is_str(code) is False: error_str = ERROR_STR_PREFIX + "the type of param in code_list is wrong" return RET_ERROR, error_str query_processor = self._get_sync_query_processor( ExrightQuery.pack_req, ExrightQuery.unpack_rsp) kargs = { "stock_list": code_list, "conn_id": self.get_sync_conn_id() } ret_code, msg, exr_record = query_processor(**kargs) if ret_code == RET_ERROR: return ret_code, msg col_list = [ 'code', 'ex_div_date', 'split_ratio', 'per_cash_div', 'per_share_div_ratio', 'per_share_trans_ratio', 'allotment_ratio', 'allotment_price', 'stk_spo_ratio', 'stk_spo_price', 'forward_adj_factorA', 'forward_adj_factorB', 'backward_adj_factorA', 'backward_adj_factorB' ] exr_frame_table = pd.DataFrame(exr_record, columns=col_list) return RET_OK, exr_frame_table
获取给定股票列表的复权因子 :param code_list: 股票列表,例如['HK.00700'] :return: (ret, data) ret == RET_OK 返回pd dataframe数据,data.DataFrame数据, 数据列格式如下 ret != RET_OK 返回错误字符串 ===================== =========== ================================================================================= 参数 类型 说明 ===================== =========== ================================================================================= code str 股票代码 ex_div_date str 除权除息日 split_ratio float 拆合股比例(该字段为比例字段,默认不展示%),例如,对于5股合1股为1/5,对于1股拆5股为5/1 per_cash_div float 每股派现 per_share_div_ratio float 每股送股比例(该字段为比例字段,默认不展示%) per_share_trans_ratio float 每股转增股比例(该字段为比例字段,默认不展示%) allotment_ratio float 每股配股比例(该字段为比例字段,默认不展示%) allotment_price float 配股价 stk_spo_ratio float 增发比例(该字段为比例字段,默认不展示%) stk_spo_price float 增发价格 forward_adj_factorA float 前复权因子A forward_adj_factorB float 前复权因子B backward_adj_factorA float 后复权因子A backward_adj_factorB float 后复权因子B ===================== =========== =================================================================================
def registerDirectory(self,name,physicalPath,directoryType,cleanupMode, maxFileAge,description): """ Registers a new server directory. While registering the server directory, you can also specify the directory's cleanup parameters. You can also register a directory by using its JSON representation as a value of the directory parameter. Inputs: name - The name of the server directory. physicalPath - The absolute physical path of the server directory. directoryType - The type of server directory. cleanupMode - Defines if files in the server directory needs to be cleaned up. Default: NONE maxFileAge - Defines how long a file in the directory needs to be kept before it is deleted (in minutes). description - An optional description for the server directory. """ url = self._url + "/directories/register" params = { "f" : "json", "name" : name, "physicalPath" : physicalPath, "directoryType" : directoryType, "cleanupMode" : cleanupMode, "maxFileAge" : maxFileAge, "description" : description } res = self._post(url=url, param_dict=params, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port) return res
Registers a new server directory. While registering the server directory, you can also specify the directory's cleanup parameters. You can also register a directory by using its JSON representation as a value of the directory parameter. Inputs: name - The name of the server directory. physicalPath - The absolute physical path of the server directory. directoryType - The type of server directory. cleanupMode - Defines if files in the server directory needs to be cleaned up. Default: NONE maxFileAge - Defines how long a file in the directory needs to be kept before it is deleted (in minutes). description - An optional description for the server directory.
def is_git_directory_clean(path_to_repo: Path, search_parent_dirs: bool = True, check_untracked: bool = False) -> None: """ Check that the git working directory is in a clean state and raise exceptions if not. :path_to_repo: The path of the git repo """ repo = Repo(str(path_to_repo), search_parent_directories=search_parent_dirs) logger.debug("is_git_directory_clean check for repo in path={} from "\ "cwd={} with search_parent_directories={}".format( path_to_repo, os.getcwd(), search_parent_dirs)) # If there are changes to already tracked files if repo.is_dirty(): raise DirtyRepoException("Changes to the index or working tree." "Commit them first .") if check_untracked: if repo.untracked_files: raise DirtyRepoException("Untracked files. Commit them first.")
Check that the git working directory is in a clean state and raise exceptions if not. :path_to_repo: The path of the git repo
def _generate_for_subfolder(self, sid): ''' Generate report for a subfolder. :param sid: The subfolder id; assumed valid ''' # TODO: the following assumes subfolder names can be constructed from a # subfolder id, which might not be the case in the future. name = self._sanitise_sheetname(uni(Folders.id_to_name(sid))) ws = self.workbook.add_worksheet(name) fmt = self.formats ws.write("A1", "Dossier report", fmt['title']) ws.write("A2", "%s | %s" % (uni(self.folder_name), name)) # Column dimensions ws.set_column('A:A', 37) ws.set_column('B:B', 37) ws.set_column('C:C', 37) ws.set_column('D:D', 8) ws.set_column('E:E', 30) ws.set_column('F:F', 37) # Header ws.write("A4", "Id", fmt['header']) ws.write("B4", "URL", fmt['header']) ws.write("C4", "Subtopic Id", fmt['header']) ws.write("D4", "Type", fmt['header']) ws.write("E4", "Content", fmt['header']) ws.write("F4", "Image URL", fmt['header']) # TODO: we probably want to wrap the following in a try-catch block, in # case the call to folders.subtopics fails. row = 4 for i in subtopics(self.store, self.folders, self.folder_id, sid, self.user): Item.construct(self, i).generate_to(ws, row) row += 1
Generate report for a subfolder. :param sid: The subfolder id; assumed valid
def dispatch(restricted=False): """ Dispatch registered handlers. When dispatching in restricted mode, only matching hook handlers are executed. Handlers are dispatched according to the following rules: * Handlers are repeatedly tested and invoked in iterations, until the system settles into quiescence (that is, until no new handlers match to be invoked). * In the first iteration, :func:`@hook <charms.reactive.decorators.hook>` and :func:`@action <charms.reactive.decorators.action>` handlers will be invoked, if they match. * In subsequent iterations, other handlers are invoked, if they match. * Added flags will not trigger new handlers until the next iteration, to ensure that chained flags are invoked in a predictable order. * Removed flags will cause the current set of matched handlers to be re-tested, to ensure that no handler is invoked after its matching flag has been removed. * Other than the guarantees mentioned above, the order in which matching handlers are invoked is undefined. * Flags are preserved between hook and action invocations, and all matching handlers are re-invoked for every hook and action. There are :doc:`decorators <charms.reactive.decorators>` and :doc:`helpers <charms.reactive.helpers>` to prevent unnecessary reinvocations, such as :func:`~charms.reactive.decorators.only_once`. """ FlagWatch.reset() def _test(to_test): return list(filter(lambda h: h.test(), to_test)) def _invoke(to_invoke): while to_invoke: unitdata.kv().set('reactive.dispatch.removed_state', False) for handler in list(to_invoke): to_invoke.remove(handler) hookenv.log('Invoking reactive handler: %s' % handler.id(), level=hookenv.INFO) handler.invoke() if unitdata.kv().get('reactive.dispatch.removed_state'): # re-test remaining handlers to_invoke = _test(to_invoke) break FlagWatch.commit() tracer().start_dispatch() # When in restricted context, only run hooks for that context. if restricted: unitdata.kv().set('reactive.dispatch.phase', 'restricted') hook_handlers = _test(Handler.get_handlers()) tracer().start_dispatch_phase('restricted', hook_handlers) _invoke(hook_handlers) return unitdata.kv().set('reactive.dispatch.phase', 'hooks') hook_handlers = _test(Handler.get_handlers()) tracer().start_dispatch_phase('hooks', hook_handlers) _invoke(hook_handlers) unitdata.kv().set('reactive.dispatch.phase', 'other') for i in range(100): FlagWatch.iteration(i) other_handlers = _test(Handler.get_handlers()) if i == 0: tracer().start_dispatch_phase('other', other_handlers) tracer().start_dispatch_iteration(i, other_handlers) if not other_handlers: break _invoke(other_handlers) FlagWatch.reset()
Dispatch registered handlers. When dispatching in restricted mode, only matching hook handlers are executed. Handlers are dispatched according to the following rules: * Handlers are repeatedly tested and invoked in iterations, until the system settles into quiescence (that is, until no new handlers match to be invoked). * In the first iteration, :func:`@hook <charms.reactive.decorators.hook>` and :func:`@action <charms.reactive.decorators.action>` handlers will be invoked, if they match. * In subsequent iterations, other handlers are invoked, if they match. * Added flags will not trigger new handlers until the next iteration, to ensure that chained flags are invoked in a predictable order. * Removed flags will cause the current set of matched handlers to be re-tested, to ensure that no handler is invoked after its matching flag has been removed. * Other than the guarantees mentioned above, the order in which matching handlers are invoked is undefined. * Flags are preserved between hook and action invocations, and all matching handlers are re-invoked for every hook and action. There are :doc:`decorators <charms.reactive.decorators>` and :doc:`helpers <charms.reactive.helpers>` to prevent unnecessary reinvocations, such as :func:`~charms.reactive.decorators.only_once`.
def random_walk(network): """Take a random walk from a source. Start at a node randomly selected from those that receive input from a source. At each step, transmit to a randomly-selected downstream node. """ latest = network.latest_transmission_recipient() if (not network.transmissions() or latest is None): sender = random.choice(network.nodes(type=Source)) else: sender = latest receiver = random.choice(sender.neighbors(direction="to", type=Agent)) sender.transmit(to_whom=receiver)
Take a random walk from a source. Start at a node randomly selected from those that receive input from a source. At each step, transmit to a randomly-selected downstream node.
def num_dml_affected_rows(self): """Return the number of DML rows affected by the job. See: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.query.numDmlAffectedRows :rtype: int or None :returns: number of DML rows affected by the job, or None if job is not yet complete. """ result = self._job_statistics().get("numDmlAffectedRows") if result is not None: result = int(result) return result
Return the number of DML rows affected by the job. See: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.query.numDmlAffectedRows :rtype: int or None :returns: number of DML rows affected by the job, or None if job is not yet complete.
def bbox(width=1.0, height=1.0, depth=1.0): """ Generates a bounding box with (0.0, 0.0, 0.0) as the center. This is simply a box with ``LINE_STRIP`` as draw mode. Keyword Args: width (float): Width of the box height (float): Height of the box depth (float): Depth of the box Returns: A :py:class:`demosys.opengl.vao.VAO` instance """ width, height, depth = width / 2.0, height / 2.0, depth / 2.0 pos = numpy.array([ width, -height, depth, width, height, depth, -width, -height, depth, width, height, depth, -width, height, depth, -width, -height, depth, width, -height, -depth, width, height, -depth, width, -height, depth, width, height, -depth, width, height, depth, width, -height, depth, width, -height, -depth, width, -height, depth, -width, -height, depth, width, -height, -depth, -width, -height, depth, -width, -height, -depth, -width, -height, depth, -width, height, depth, -width, height, -depth, -width, -height, depth, -width, height, -depth, -width, -height, -depth, width, height, -depth, width, -height, -depth, -width, -height, -depth, width, height, -depth, -width, -height, -depth, -width, height, -depth, width, height, -depth, -width, height, -depth, width, height, depth, -width, height, -depth, -width, height, depth, width, height, depth, ], dtype=numpy.float32) vao = VAO("geometry:cube", mode=moderngl.LINE_STRIP) vao.buffer(pos, '3f', ["in_position"]) return vao
Generates a bounding box with (0.0, 0.0, 0.0) as the center. This is simply a box with ``LINE_STRIP`` as draw mode. Keyword Args: width (float): Width of the box height (float): Height of the box depth (float): Depth of the box Returns: A :py:class:`demosys.opengl.vao.VAO` instance
def valid_processor_options(processors=None): """ Return a list of unique valid options for a list of image processors (and/or source generators) """ if processors is None: processors = [ dynamic_import(p) for p in tuple(settings.THUMBNAIL_PROCESSORS) + tuple(settings.THUMBNAIL_SOURCE_GENERATORS)] valid_options = set(['size', 'quality', 'subsampling']) for processor in processors: args = inspect.getfullargspec(processor)[0] if six.PY3 else inspect.getargspec(processor)[0] # Add all arguments apart from the first (the source image). valid_options.update(args[1:]) return list(valid_options)
Return a list of unique valid options for a list of image processors (and/or source generators)
def training_data(job_id): '''Returns training_examples for a given job_id from offset to limit If full_info parameter is greater than 0, will return extra architecture info, GET /jobs/139/vectors?offset=0&limit=10&full_info=1 { "labeled_vectors": [{"vector":{"indices": {"0": 1}, "reductions": 3}, "label":0}, {"vector":{"indices": {"1": 1}, "reductions": 3}, "label":1}, ...], "vector_length": 3, # non-negative int or -1 if vector length is inconsistent "num_labeled_vectors": 1600000, # non-negative int "num_classes": 2, # pos integer, probably 2 or more } ''' offset = request.args.get('offset', 0) limit = request.args.get('limit', 0) cur.execute('SELECT vector,label FROM vectors WHERE job_id=%s OFFSET %s LIMIT %s', (job_id, offset, limit)) training_examples = [{'vector':v,'label':l} for v,l in cur] data = { 'labeled_vectors': training_examples } if int(request.args.get('full_info', 0)) > 0: cur.execute("SELECT vector->>'reductions' AS num_reductions FROM vectors WHERE job_id=%s GROUP BY num_reductions", (job_id,)) unique_num_reductions = cur.fetchall() # [[5000]] if len(unique_num_reductions) > 1: # the vector length for this job is inconsistent! set vector_length # to -1 data['vector_length'] = -1 else: data['vector_length'] = unique_num_reductions[0][0] cur.execute("SELECT count(*) FROM vectors WHERE job_id=%s", (job_id,)) data['num_labeled_vectors'] = cur.fetchone()[0] cur.execute("SELECT count(*) FROM (SELECT label FROM vectors WHERE job_id=%s GROUP BY label) AS all_vecs_for_job", (job_id,)) data['num_classes'] = cur.fetchone()[0] return jsonify(data)
Returns training_examples for a given job_id from offset to limit If full_info parameter is greater than 0, will return extra architecture info, GET /jobs/139/vectors?offset=0&limit=10&full_info=1 { "labeled_vectors": [{"vector":{"indices": {"0": 1}, "reductions": 3}, "label":0}, {"vector":{"indices": {"1": 1}, "reductions": 3}, "label":1}, ...], "vector_length": 3, # non-negative int or -1 if vector length is inconsistent "num_labeled_vectors": 1600000, # non-negative int "num_classes": 2, # pos integer, probably 2 or more }
def flux_balance(model, reaction, tfba, solver): """Run flux balance analysis on the given model. Yields the reaction id and flux value for each reaction in the model. This is a convenience function for sertting up and running the FluxBalanceProblem. If the FBA is solved for more than one parameter it is recommended to setup and reuse the FluxBalanceProblem manually for a speed up. This is an implementation of flux balance analysis (FBA) as described in [Orth10]_ and [Fell86]_. Args: model: MetabolicModel to solve. reaction: Reaction to maximize. If a dict is given, this instead represents the objective function weights on each reaction. tfba: If True enable thermodynamic constraints. solver: LP solver instance to use. Returns: Iterator over reaction ID and reaction flux pairs. """ fba = _get_fba_problem(model, tfba, solver) fba.maximize(reaction) for reaction in model.reactions: yield reaction, fba.get_flux(reaction)
Run flux balance analysis on the given model. Yields the reaction id and flux value for each reaction in the model. This is a convenience function for sertting up and running the FluxBalanceProblem. If the FBA is solved for more than one parameter it is recommended to setup and reuse the FluxBalanceProblem manually for a speed up. This is an implementation of flux balance analysis (FBA) as described in [Orth10]_ and [Fell86]_. Args: model: MetabolicModel to solve. reaction: Reaction to maximize. If a dict is given, this instead represents the objective function weights on each reaction. tfba: If True enable thermodynamic constraints. solver: LP solver instance to use. Returns: Iterator over reaction ID and reaction flux pairs.
def princomp(x): """Determine the principal components of a vector of measurements Determine the principal components of a vector of measurements x should be a M x N numpy array composed of M observations of n variables The output is: coeffs - the NxN correlation matrix that can be used to transform x into its components The code for this function is based on "A Tutorial on Principal Component Analysis", Shlens, 2005 http://www.snl.salk.edu/~shlens/pub/notes/pca.pdf (unpublished) """ (M,N) = x.shape Mean = x.mean(0) y = x - Mean cov = numpy.dot(y.transpose(),y) / (M-1) (V,PC) = numpy.linalg.eig(cov) order = (-V).argsort() coeff = PC[:,order] return coeff
Determine the principal components of a vector of measurements Determine the principal components of a vector of measurements x should be a M x N numpy array composed of M observations of n variables The output is: coeffs - the NxN correlation matrix that can be used to transform x into its components The code for this function is based on "A Tutorial on Principal Component Analysis", Shlens, 2005 http://www.snl.salk.edu/~shlens/pub/notes/pca.pdf (unpublished)
def edit_asn(self, auth, asn, attr): """ Edit AS number * `auth` [BaseAuth] AAA options. * `asn` [integer] AS number to edit. * `attr` [asn_attr] New AS attributes. This is the documentation of the internal backend function. It's exposed over XML-RPC, please also see the XML-RPC documentation for :py:func:`nipap.xmlrpc.NipapXMLRPC.edit_asn` for full understanding. """ self._logger.debug("edit_asn called; asn: %s attr: %s" % (unicode(asn), unicode(attr))) # sanity check - do we have all attributes? req_attr = [ ] allowed_attr = [ 'name', ] self._check_attr(attr, req_attr, allowed_attr) asns = self.list_asn(auth, asn) where, params1 = self._expand_asn_spec(asn) update, params2 = self._sql_expand_update(attr) params = dict(params2.items() + params1.items()) sql = "UPDATE ip_net_asn SET " + update + " WHERE " + where sql += " RETURNING *" self._execute(sql, params) updated_asns = [] for row in self._curs_pg: updated_asns.append(dict(row)) # write to audit table for a in asns: audit_params = { 'username': auth.username, 'authenticated_as': auth.authenticated_as, 'full_name': auth.full_name, 'authoritative_source': auth.authoritative_source } audit_params['description'] = 'Edited ASN %s attr: %s' % (unicode(a['asn']), unicode(attr)) sql, params = self._sql_expand_insert(audit_params) self._execute('INSERT INTO ip_net_log %s' % sql, params) return updated_asns
Edit AS number * `auth` [BaseAuth] AAA options. * `asn` [integer] AS number to edit. * `attr` [asn_attr] New AS attributes. This is the documentation of the internal backend function. It's exposed over XML-RPC, please also see the XML-RPC documentation for :py:func:`nipap.xmlrpc.NipapXMLRPC.edit_asn` for full understanding.
def html_print_file(self, catalog, destination): """ Prints text_file in html. :param catalog: text file you wish to pretty print :param destination: where you wish to save the HTML data :return: output in html_file.html. """ with open(destination, mode='r+', encoding='utf8') as t_f: for text in catalog: pnum = catalog[text]['pnum'] edition = catalog[text]['edition'] metadata = '<br>\n'.join(catalog[text]['metadata']) transliteration = '<br>\n'.join(catalog[text]['transliteration']) normalization = '<br>\n'.join(catalog[text]['normalization']) translation = '<br>\n'.join(catalog[text]['translation']) self.html_file = """<!DOCTYPE html> <html lang="en"> <head> <meta charset="UTF-8"> <title>{edition}</title> </head> <body><table cellpadding="10"; border="1"> <tr><th> <h2>{edition}<br>{pnum}</h2> </th><th> <h3>transliteration</h3> </th><th> <h3>normalization</h3> </th><th> <h3>translation</h3> </tr><tr><td> {metadata}</td><td> <p>{trans} </td><td> <p>{norm} </td><td> <font size='2'> {translation} </font></td></tr> </table> <br> </body> </html>""".format( pnum=pnum, edition=edition, metadata=metadata, trans=transliteration, norm=normalization, translation=translation) t_f.write(self.html_file)
Prints text_file in html. :param catalog: text file you wish to pretty print :param destination: where you wish to save the HTML data :return: output in html_file.html.
def startElement (self, name, attrs): '''if there's a start method for this element, call it ''' func = getattr(self, 'start_' + name, None) if func: func(attrs)
if there's a start method for this element, call it
def delete_view(self, request, object_id, extra_context=None): """ Overrides the default to enable redirecting to the directory view after deletion of a folder. we need to fetch the object and find out who the parent is before super, because super will delete the object and make it impossible to find out the parent folder to redirect to. """ parent_folder = None try: obj = self.queryset(request).get(pk=unquote(object_id)) parent_folder = obj.parent except self.model.DoesNotExist: obj = None r = super(FolderAdmin, self).delete_view( request=request, object_id=object_id, extra_context=extra_context) url = r.get("Location", None) if url in ["../../../../", "../../"] or url == self._get_post_url(obj): if parent_folder: url = reverse('admin:filer-directory_listing', kwargs={'folder_id': parent_folder.id}) else: url = reverse('admin:filer-directory_listing-root') url = "%s%s%s" % (url, popup_param(request), selectfolder_param(request, "&")) return HttpResponseRedirect(url) return r
Overrides the default to enable redirecting to the directory view after deletion of a folder. we need to fetch the object and find out who the parent is before super, because super will delete the object and make it impossible to find out the parent folder to redirect to.
def notebook_to_rst(npth, rpth, rdir, cr=None): """ Convert notebook at `npth` to rst document at `rpth`, in directory `rdir`. Parameter `cr` is a CrossReferenceLookup object. """ # Read the notebook file ntbk = nbformat.read(npth, nbformat.NO_CONVERT) # Convert notebook object to rstpth notebook_object_to_rst(ntbk, rpth, rdir, cr)
Convert notebook at `npth` to rst document at `rpth`, in directory `rdir`. Parameter `cr` is a CrossReferenceLookup object.
def requireCompatibleAPI(): """If PyQt4's API should be configured to be compatible with PySide's (i.e. QString and QVariant should not be explicitly exported, cf. documentation of sip.setapi()), call this function to check that the PyQt4 was properly imported. (It will always be configured this way by this module, but it could have been imported before we got a hand on doing so.) """ if 'PyQt4.QtCore' in sys.modules: import sip for api in ('QVariant', 'QString'): if sip.getapi(api) != 2: raise RuntimeError('%s API already set to V%d, but should be 2' % (api, sip.getapi(api)))
If PyQt4's API should be configured to be compatible with PySide's (i.e. QString and QVariant should not be explicitly exported, cf. documentation of sip.setapi()), call this function to check that the PyQt4 was properly imported. (It will always be configured this way by this module, but it could have been imported before we got a hand on doing so.)
def get_info( self, userSpecifier, **kwargs ): """ Fetch the user information for the specified user. This endpoint is intended to be used by the user themself to obtain their own information. Args: userSpecifier: The User Specifier Returns: v20.response.Response containing the results from submitting the request """ request = Request( 'GET', '/v3/users/{userSpecifier}' ) request.set_path_param( 'userSpecifier', userSpecifier ) response = self.ctx.request(request) if response.content_type is None: return response if not response.content_type.startswith("application/json"): return response jbody = json.loads(response.raw_body) parsed_body = {} # # Parse responses as defined by the API specification # if str(response.status) == "200": if jbody.get('userInfo') is not None: parsed_body['userInfo'] = \ self.ctx.user.UserInfo.from_dict( jbody['userInfo'], self.ctx ) elif str(response.status) == "401": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') elif str(response.status) == "403": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') elif str(response.status) == "405": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') # # Unexpected response status # else: parsed_body = jbody response.body = parsed_body return response
Fetch the user information for the specified user. This endpoint is intended to be used by the user themself to obtain their own information. Args: userSpecifier: The User Specifier Returns: v20.response.Response containing the results from submitting the request
def get_text_for_repeated_menu_item( self, request=None, current_site=None, original_menu_tag='', **kwargs ): """Return the a string to use as 'text' for this page when it is being included as a 'repeated' menu item in a menu. You might want to override this method if you're creating a multilingual site and you have different translations of 'repeated_item_text' that you wish to surface.""" source_field_name = settings.PAGE_FIELD_FOR_MENU_ITEM_TEXT return self.repeated_item_text or getattr( self, source_field_name, self.title )
Return the a string to use as 'text' for this page when it is being included as a 'repeated' menu item in a menu. You might want to override this method if you're creating a multilingual site and you have different translations of 'repeated_item_text' that you wish to surface.
def disable_on_env(func): """Disable the ``func`` called if its name is present in ``VALIDATORS_DISABLED``. :param func: The function/validator to be disabled. :type func: callable :returns: If disabled, the ``value`` (first positional argument) passed to ``func``. If enabled, the result of ``func``. """ @wraps(func) def func_wrapper(*args, **kwargs): # pylint: disable=C0111, C0103 function_name = func.__name__ VALIDATORS_DISABLED = os.getenv('VALIDATORS_DISABLED', '') disabled_functions = [x.strip() for x in VALIDATORS_DISABLED.split(',')] force_run = kwargs.get('force_run', False) try: value = args[0] except IndexError: raise ValidatorUsageError('no value was supplied') if function_name in disabled_functions and not force_run: return value else: updated_kwargs = {key : kwargs[key] for key in kwargs if key != 'force_run'} return func(*args, **updated_kwargs) return func_wrapper
Disable the ``func`` called if its name is present in ``VALIDATORS_DISABLED``. :param func: The function/validator to be disabled. :type func: callable :returns: If disabled, the ``value`` (first positional argument) passed to ``func``. If enabled, the result of ``func``.
def imbox(xy, w, h, angle=0.0, **kwargs): """ draw boundary box :param xy: start index xy (ji) :param w: width :param h: height :param angle: :param kwargs: :return: """ from matplotlib.patches import Rectangle return imbound(Rectangle, xy, w, h, angle, **kwargs)
draw boundary box :param xy: start index xy (ji) :param w: width :param h: height :param angle: :param kwargs: :return:
def load_files(files, tag=None, sat_id=None, altitude_bin=None): '''Loads a list of COSMIC data files, supplied by user. Returns a list of dicts, a dict for each file. ''' output = [None]*len(files) drop_idx = [] for (i,file) in enumerate(files): try: #data = netCDF4.Dataset(file) data = netcdf_file(file, mode='r', mmap=False) # build up dictionary will all ncattrs new = {} # get list of file attributes #ncattrsList = data.ncattrs() ncattrsList = data._attributes.keys() for d in ncattrsList: new[d] = data._attributes[d] #data.getncattr(d) # load all of the variables in the netCDF loadedVars={} keys = data.variables.keys() for key in keys: if data.variables[key][:].dtype.byteorder != '=': loadedVars[key] = data.variables[key][:].byteswap().newbyteorder() else: loadedVars[key] = data.variables[key][:] new['profiles'] = pysat.DataFrame(loadedVars) output[i] = new data.close() except RuntimeError: # some of the files have zero bytes, which causes a read error # this stores the index of these zero byte files so I can drop # the Nones the gappy file leaves behind drop_idx.append(i) # drop anything that came from the zero byte files drop_idx.reverse() for i in drop_idx: del output[i] if tag == 'ionprf': if altitude_bin is not None: for out in output: out['profiles'].index = (out['profiles']['MSL_alt']/altitude_bin).round().values*altitude_bin out['profiles'] = out['profiles'].groupby(out['profiles'].index.values).mean() else: for out in output: out['profiles'].index = out['profiles']['MSL_alt'] return output
Loads a list of COSMIC data files, supplied by user. Returns a list of dicts, a dict for each file.
def write_data(filename, data, data_format=None, compress=False, add=False): """ Write image data to file Function to write image data to specified file. If file format is not provided explicitly, it is guessed from the filename extension. If format is TIFF, geo information and compression can be optionally added. :param filename: name of file to write data to :type filename: str :param data: image data to write to file :type data: numpy array :param data_format: format of output file. Default is ``None`` :type data_format: MimeType :param compress: whether to compress data or not. Default is ``False`` :type compress: bool :param add: whether to append to existing text file or not. Default is ``False`` :type add: bool :raises: exception if numpy format is not supported or file cannot be written """ create_parent_folder(filename) if not isinstance(data_format, MimeType): data_format = get_data_format(filename) if data_format.is_tiff_format(): return write_tiff_image(filename, data, compress) if data_format.is_image_format(): return write_image(filename, data) if data_format is MimeType.TXT: return write_text(filename, data, add=add) try: return { MimeType.CSV: write_csv, MimeType.JSON: write_json, MimeType.XML: write_xml, MimeType.GML: write_xml }[data_format](filename, data) except KeyError: raise ValueError('Writing data format .{} is not supported'.format(data_format.value))
Write image data to file Function to write image data to specified file. If file format is not provided explicitly, it is guessed from the filename extension. If format is TIFF, geo information and compression can be optionally added. :param filename: name of file to write data to :type filename: str :param data: image data to write to file :type data: numpy array :param data_format: format of output file. Default is ``None`` :type data_format: MimeType :param compress: whether to compress data or not. Default is ``False`` :type compress: bool :param add: whether to append to existing text file or not. Default is ``False`` :type add: bool :raises: exception if numpy format is not supported or file cannot be written
def flatten(nested_list): '''converts a list-of-lists to a single flat list''' return_list = [] for i in nested_list: if isinstance(i,list): return_list += flatten(i) else: return_list.append(i) return return_list
converts a list-of-lists to a single flat list
def _win32_junction(path, link, verbose=0): """ On older (pre 10) versions of windows we need admin privledges to make symlinks, however junctions seem to work. For paths we do a junction (softlink) and for files we use a hard link CommandLine: python -m ubelt._win32_links _win32_junction Example: >>> # xdoc: +REQUIRES(WIN32) >>> import ubelt as ub >>> root = ub.ensure_app_cache_dir('ubelt', 'win32_junction') >>> ub.delete(root) >>> ub.ensuredir(root) >>> fpath = join(root, 'fpath.txt') >>> dpath = join(root, 'dpath') >>> fjunc = join(root, 'fjunc.txt') >>> djunc = join(root, 'djunc') >>> ub.touch(fpath) >>> ub.ensuredir(dpath) >>> ub.ensuredir(join(root, 'djunc_fake')) >>> ub.ensuredir(join(root, 'djunc_fake with space')) >>> ub.touch(join(root, 'djunc_fake with space file')) >>> _win32_junction(fpath, fjunc) >>> _win32_junction(dpath, djunc) >>> # thank god colons are not allowed >>> djunc2 = join(root, 'djunc2 [with pathological attrs]') >>> _win32_junction(dpath, djunc2) >>> _win32_is_junction(djunc) >>> ub.writeto(join(djunc, 'afile.txt'), 'foo') >>> assert ub.readfrom(join(dpath, 'afile.txt')) == 'foo' >>> ub.writeto(fjunc, 'foo') """ # junctions store absolute paths path = os.path.abspath(path) link = os.path.abspath(link) from ubelt import util_cmd if os.path.isdir(path): # try using a junction (soft link) if verbose: print('... as soft link') # TODO: what is the windows api for this? command = 'mklink /J "{}" "{}"'.format(link, path) else: # try using a hard link if verbose: print('... as hard link') # command = 'mklink /H "{}" "{}"'.format(link, path) try: jwfs.link(path, link) # this seems to be allowed except Exception: print('Failed to hardlink link={} to path={}'.format(link, path)) raise command = None if command is not None: info = util_cmd.cmd(command, shell=True) if info['ret'] != 0: from ubelt import util_format print('Failed command:') print(info['command']) print(util_format.repr2(info, nl=1)) raise OSError(str(info)) return link
On older (pre 10) versions of windows we need admin privledges to make symlinks, however junctions seem to work. For paths we do a junction (softlink) and for files we use a hard link CommandLine: python -m ubelt._win32_links _win32_junction Example: >>> # xdoc: +REQUIRES(WIN32) >>> import ubelt as ub >>> root = ub.ensure_app_cache_dir('ubelt', 'win32_junction') >>> ub.delete(root) >>> ub.ensuredir(root) >>> fpath = join(root, 'fpath.txt') >>> dpath = join(root, 'dpath') >>> fjunc = join(root, 'fjunc.txt') >>> djunc = join(root, 'djunc') >>> ub.touch(fpath) >>> ub.ensuredir(dpath) >>> ub.ensuredir(join(root, 'djunc_fake')) >>> ub.ensuredir(join(root, 'djunc_fake with space')) >>> ub.touch(join(root, 'djunc_fake with space file')) >>> _win32_junction(fpath, fjunc) >>> _win32_junction(dpath, djunc) >>> # thank god colons are not allowed >>> djunc2 = join(root, 'djunc2 [with pathological attrs]') >>> _win32_junction(dpath, djunc2) >>> _win32_is_junction(djunc) >>> ub.writeto(join(djunc, 'afile.txt'), 'foo') >>> assert ub.readfrom(join(dpath, 'afile.txt')) == 'foo' >>> ub.writeto(fjunc, 'foo')
def _check_nonlocal_and_global(self, node): """Check that a name is both nonlocal and global.""" def same_scope(current): return current.scope() is node from_iter = itertools.chain.from_iterable nonlocals = set( from_iter( child.names for child in node.nodes_of_class(astroid.Nonlocal) if same_scope(child) ) ) if not nonlocals: return global_vars = set( from_iter( child.names for child in node.nodes_of_class(astroid.Global) if same_scope(child) ) ) for name in nonlocals.intersection(global_vars): self.add_message("nonlocal-and-global", args=(name,), node=node)
Check that a name is both nonlocal and global.
def drawPoints(points, bg=','): """A small debug function that takes an iterable of (x, y) integer tuples and draws them to the screen.""" # Note: I set bg to ',' instead of '.' because using ... in the docstrings # confuses doctest and makes it think it's Python's secondary ... prompt, # causing doctest errors. import sys points = list(points) try: points = [(int(x), int(y)) for x, y in points] except: raise PyBresenhamException('points must only contains (x, y) numeric tuples') # Calculate size of the character grid from the given points. minx = min([x for x, y in points]) maxx = max([x for x, y in points]) miny = min([y for x, y in points]) maxy = max([y for x, y in points]) charGrid = [[' '] * (maxy - miny + 1) for i in range(maxx - minx + 1)] # Draw O characters to the grid at the given points. for x, y in points: charGrid[x - minx][y - miny] = 'O' # Print out the character grid. for y in range(len(charGrid[0])): for x in range(len(charGrid)): if charGrid[x][y] in (None, ' '): charToDraw = bg else: charToDraw = charGrid[x][y] sys.stdout.write(charToDraw) print()
A small debug function that takes an iterable of (x, y) integer tuples and draws them to the screen.
def cart_add(self, items, CartId=None, HMAC=None, **kwargs): """CartAdd. :param items: A dictionary containing the items to be added to the cart. Or a list containing these dictionaries. It is not possible to create an empty cart! example: [{'offer_id': 'rt2ofih3f389nwiuhf8934z87o3f4h', 'quantity': 1}] :param CartId: Id of Cart :param HMAC: HMAC of Cart, see CartCreate for more info :return: An :class:`~.AmazonCart`. """ if not CartId or not HMAC: raise CartException('CartId and HMAC required for CartAdd call') if isinstance(items, dict): items = [items] if len(items) > 10: raise CartException("You can't add more than 10 items at once") offer_id_key_template = 'Item.{0}.OfferListingId' quantity_key_template = 'Item.{0}.Quantity' for i, item in enumerate(items): kwargs[offer_id_key_template.format(i)] = item['offer_id'] kwargs[quantity_key_template.format(i)] = item['quantity'] response = self.api.CartAdd(CartId=CartId, HMAC=HMAC, **kwargs) root = objectify.fromstring(response) new_cart = AmazonCart(root) self._check_for_cart_error(new_cart) return new_cart
CartAdd. :param items: A dictionary containing the items to be added to the cart. Or a list containing these dictionaries. It is not possible to create an empty cart! example: [{'offer_id': 'rt2ofih3f389nwiuhf8934z87o3f4h', 'quantity': 1}] :param CartId: Id of Cart :param HMAC: HMAC of Cart, see CartCreate for more info :return: An :class:`~.AmazonCart`.
def _save_state(self): """ Helper context manager for :meth:`buffer` which saves the whole state. This is broken out in a separate method for readability and tested indirectly by testing :meth:`buffer`. """ ns_prefixes_floating_in = copy.copy(self._ns_prefixes_floating_in) ns_prefixes_floating_out = copy.copy(self._ns_prefixes_floating_out) ns_decls_floating_in = copy.copy(self._ns_decls_floating_in) curr_ns_map = copy.copy(self._curr_ns_map) ns_map_stack = copy.copy(self._ns_map_stack) pending_start_element = self._pending_start_element ns_counter = self._ns_counter # XXX: I have been unable to find a test justifying copying this :/ # for completeness, I’m still doing it ns_auto_prefixes_floating_in = \ copy.copy(self._ns_auto_prefixes_floating_in) try: yield except: # NOQA: E722 self._ns_prefixes_floating_in = ns_prefixes_floating_in self._ns_prefixes_floating_out = ns_prefixes_floating_out self._ns_decls_floating_in = ns_decls_floating_in self._pending_start_element = pending_start_element self._curr_ns_map = curr_ns_map self._ns_map_stack = ns_map_stack self._ns_counter = ns_counter self._ns_auto_prefixes_floating_in = ns_auto_prefixes_floating_in raise
Helper context manager for :meth:`buffer` which saves the whole state. This is broken out in a separate method for readability and tested indirectly by testing :meth:`buffer`.
def _get_node(template, context, name): ''' taken originally from http://stackoverflow.com/questions/2687173/django-how-can-i-get-a-block-from-a-template ''' for node in template: if isinstance(node, BlockNode) and node.name == name: return node.nodelist.render(context) elif isinstance(node, ExtendsNode): return _get_node(node.nodelist, context, name) # raise Exception("Node '%s' could not be found in template." % name) return ""
taken originally from http://stackoverflow.com/questions/2687173/django-how-can-i-get-a-block-from-a-template
def jsonify(symbol): """ returns json format for symbol """ try: # all symbols have a toJson method, try it return json.dumps(symbol.toJson(), indent=' ') except AttributeError: pass return json.dumps(symbol, indent=' ')
returns json format for symbol
def dnld_assc(assc_name, go2obj=None, prt=sys.stdout): """Download association from http://geneontology.org/gene-associations.""" # Example assc_name: "tair.gaf" # Download the Association dirloc, assc_base = os.path.split(assc_name) if not dirloc: dirloc = os.getcwd() assc_locfile = os.path.join(dirloc, assc_base) if not dirloc else assc_name dnld_annotation(assc_locfile, prt) # Read the downloaded association assc_orig = read_gaf(assc_locfile, prt) if go2obj is None: return assc_orig # If a GO DAG is provided, use only GO IDs present in the GO DAG assc = {} goids_dag = set(go2obj.keys()) for gene, goids_cur in assc_orig.items(): assc[gene] = goids_cur.intersection(goids_dag) return assc
Download association from http://geneontology.org/gene-associations.
def __sort_registry(self, svc_ref): # type: (ServiceReference) -> None """ Sorts the registry, after the update of the sort key of given service reference :param svc_ref: A service reference with a modified sort key """ with self.__svc_lock: if svc_ref not in self.__svc_registry: raise BundleException("Unknown service: {0}".format(svc_ref)) # Remove current references for spec in svc_ref.get_property(OBJECTCLASS): # Use bisect to remove the reference (faster) spec_refs = self.__svc_specs[spec] idx = bisect.bisect_left(spec_refs, svc_ref) del spec_refs[idx] # ... use the new sort key svc_ref.update_sort_key() for spec in svc_ref.get_property(OBJECTCLASS): # ... and insert it again spec_refs = self.__svc_specs[spec] bisect.insort_left(spec_refs, svc_ref)
Sorts the registry, after the update of the sort key of given service reference :param svc_ref: A service reference with a modified sort key
def _at(self, idx): """Returns a view of the array sliced at `idx` in the first dim. This is called through ``x[idx]``. Parameters ---------- idx : int index for slicing the `NDArray` in the first dim. Returns ------- NDArray `NDArray` sharing the memory with the current one sliced at `idx` in the first dim. Examples -------- >>> a = mx.nd.array([[1,2], [3, 4]]) >>> a[1].asnumpy() array([ 3., 4.], dtype=float32) >>> b = mx.nd.array([1, 2, 3, 4]) >>> b[0].asnumpy() array([ 1.], dtype=float32) """ handle = NDArrayHandle() if idx < 0: length = self.shape[0] idx += length if idx < 0: raise IndexError('index %d is out of bounds for axis 0 with size %d' % (idx-length, length)) check_call(_LIB.MXNDArrayAt( self.handle, mx_uint(idx), ctypes.byref(handle))) return NDArray(handle=handle, writable=self.writable)
Returns a view of the array sliced at `idx` in the first dim. This is called through ``x[idx]``. Parameters ---------- idx : int index for slicing the `NDArray` in the first dim. Returns ------- NDArray `NDArray` sharing the memory with the current one sliced at `idx` in the first dim. Examples -------- >>> a = mx.nd.array([[1,2], [3, 4]]) >>> a[1].asnumpy() array([ 3., 4.], dtype=float32) >>> b = mx.nd.array([1, 2, 3, 4]) >>> b[0].asnumpy() array([ 1.], dtype=float32)
def image(request, data): """ Generates identicon image based on passed data. Arguments: data - Data which should be used for generating an identicon. This data will be used in order to create a digest which is used for generating the identicon. If the data passed is a hex digest already, the digest will be used as-is. Returns: Identicon image in raw format. """ # Get image width, height, padding, and format from GET parameters, or # fall-back to default values from settings. try: width = int(request.GET.get("w", PYDENTICON_WIDTH)) except ValueError: raise SuspiciousOperation("Identicon width must be a positive integer.") try: height = int(request.GET.get("h", PYDENTICON_HEIGHT)) except ValueError: raise SuspiciousOperation("Identicon height must be a positive integer.") output_format = request.GET.get("f", PYDENTICON_FORMAT) try: padding = [int(p) for p in request.GET["p"].split(",")] except KeyError: padding = PYDENTICON_PADDING except ValueError: raise SuspiciousOperation("Identicon padding must consist out of 4 positive integers separated with commas.") if "i" in request.GET: inverted = request.GET.get("i") if inverted.lower() == "true": inverted = True elif inverted.lower() == "false": inverted = False else: raise SuspiciousOperation("Inversion parameter must be a boolean (true/false).") else: inverted = PYDENTICON_INVERT # Validate the input parameters. if not isinstance(width, int) or width <= 0: raise SuspiciousOperation("Identicon width must be a positive integer.") if not isinstance(height, int) or height <= 0: raise SuspiciousOperation("Identicon height must be a positive integer.") if not all([isinstance(p, int) and p >= 0 for p in padding]) or len(padding) != 4: raise SuspiciousOperation("Padding must be a 4-element tuple consisting out of positive integers.") # Set-up correct content type based on requested identicon format. if output_format == "png": content_type = "image/png" elif output_format == "ascii": content_type = "text/plain" else: raise SuspiciousOperation("Unsupported identicon format requested - '%s' % output_format") # Initialise a generator. generator = Generator(PYDENTICON_ROWS, PYDENTICON_COLUMNS, foreground = PYDENTICON_FOREGROUND, background = PYDENTICON_BACKGROUND, digest = PYDENTICON_DIGEST) # Generate the identicion. content = generator.generate(data, width, height, padding=padding, output_format=output_format, inverted=inverted) # Create and return the response. response = HttpResponse(content, content_type=content_type) return response
Generates identicon image based on passed data. Arguments: data - Data which should be used for generating an identicon. This data will be used in order to create a digest which is used for generating the identicon. If the data passed is a hex digest already, the digest will be used as-is. Returns: Identicon image in raw format.
def extra_reading_spec(self): """Additional data fields to store on disk and their decoders.""" field_names = ("frame_number", "action", "reward", "done") data_fields = { name: tf.FixedLenFeature([1], tf.int64) for name in field_names } decoders = { name: tf.contrib.slim.tfexample_decoder.Tensor(tensor_key=name) for name in field_names } return (data_fields, decoders)
Additional data fields to store on disk and their decoders.
def split_array_like(df, columns=None): #TODO rename TODO if it's not a big performance hit, just make them arraylike? We already indicated the column explicitly (sort of) so... ''' Split cells with array-like values along row axis. Column names are maintained. The index is dropped. Parameters ---------- df : ~pandas.DataFrame Data frame ``df[columns]`` should contain :py:class:`~pytil.numpy.ArrayLike` values. columns : ~typing.Collection[str] or str or None Columns (or column) whose values to split. Defaults to ``df.columns``. Returns ------- ~pandas.DataFrame Data frame with array-like values in ``df[columns]`` split across rows, and corresponding values in other columns repeated. Examples -------- >>> df = pd.DataFrame([[1,[1,2],[1]],[1,[1,2],[3,4,5]],[2,[1],[1,2]]], columns=('check', 'a', 'b')) >>> df check a b 0 1 [1, 2] [1] 1 1 [1, 2] [3, 4, 5] 2 2 [1] [1, 2] >>> split_array_like(df, ['a', 'b']) check a b 0 1 1 1 1 1 2 1 2 1 1 3 3 1 1 4 4 1 1 5 5 1 2 3 6 1 2 4 7 1 2 5 8 2 1 1 9 2 1 2 ''' # TODO could add option to keep_index by using reset_index and eventually # set_index. index names trickery: MultiIndex.names, Index.name. Both can be # None. If Index.name can be None in which case it translates to 'index' or # if that already exists, it translates to 'level_0'. If MultiIndex.names is # None, it translates to level_0,... level_N dtypes = df.dtypes if columns is None: columns = df.columns elif isinstance(columns, str): columns = [columns] for column in columns: expanded = np.repeat(df.values, df[column].apply(len).values, axis=0) expanded[:, df.columns.get_loc(column)] = np.concatenate(df[column].tolist()) df = pd.DataFrame(expanded, columns=df.columns) # keep types unchanged for i, dtype in enumerate(dtypes): df.iloc[:,i] = df.iloc[:,i].astype(dtype) return df
Split cells with array-like values along row axis. Column names are maintained. The index is dropped. Parameters ---------- df : ~pandas.DataFrame Data frame ``df[columns]`` should contain :py:class:`~pytil.numpy.ArrayLike` values. columns : ~typing.Collection[str] or str or None Columns (or column) whose values to split. Defaults to ``df.columns``. Returns ------- ~pandas.DataFrame Data frame with array-like values in ``df[columns]`` split across rows, and corresponding values in other columns repeated. Examples -------- >>> df = pd.DataFrame([[1,[1,2],[1]],[1,[1,2],[3,4,5]],[2,[1],[1,2]]], columns=('check', 'a', 'b')) >>> df check a b 0 1 [1, 2] [1] 1 1 [1, 2] [3, 4, 5] 2 2 [1] [1, 2] >>> split_array_like(df, ['a', 'b']) check a b 0 1 1 1 1 1 2 1 2 1 1 3 3 1 1 4 4 1 1 5 5 1 2 3 6 1 2 4 7 1 2 5 8 2 1 1 9 2 1 2
def getrefnames(idf, objname): """get the reference names for this object""" iddinfo = idf.idd_info dtls = idf.model.dtls index = dtls.index(objname) fieldidds = iddinfo[index] for fieldidd in fieldidds: if 'field' in fieldidd: if fieldidd['field'][0].endswith('Name'): if 'reference' in fieldidd: return fieldidd['reference'] else: return []
get the reference names for this object
def mpub(self, topic, messages, binary=True): '''Send multiple messages to a topic. Optionally pack the messages''' if binary: # Pack and ship the data return self.post('mpub', data=pack(messages)[4:], params={'topic': topic, 'binary': True}) elif any('\n' in m for m in messages): # If any of the messages has a newline, then you must use the binary # calling format raise ClientException( 'Use `binary` flag in mpub for messages with newlines') else: return self.post( '/mpub', params={'topic': topic}, data='\n'.join(messages))
Send multiple messages to a topic. Optionally pack the messages
def _write_value(self, field_type, value): """ Writes an item of an array :param field_type: Value type :param value: The value itself """ if len(field_type) > 1: # We don't need details for arrays and objects field_type = field_type[0] if field_type == self.TYPE_BOOLEAN: self._writeStruct(">B", 1, (1 if value else 0,)) elif field_type == self.TYPE_BYTE: self._writeStruct(">b", 1, (value,)) elif field_type == self.TYPE_CHAR: self._writeStruct(">H", 1, (ord(value),)) elif field_type == self.TYPE_SHORT: self._writeStruct(">h", 1, (value,)) elif field_type == self.TYPE_INTEGER: self._writeStruct(">i", 1, (value,)) elif field_type == self.TYPE_LONG: self._writeStruct(">q", 1, (value,)) elif field_type == self.TYPE_FLOAT: self._writeStruct(">f", 1, (value,)) elif field_type == self.TYPE_DOUBLE: self._writeStruct(">d", 1, (value,)) elif field_type == self.TYPE_OBJECT or field_type == self.TYPE_ARRAY: if value is None: self.write_null() elif isinstance(value, JavaEnum): self.write_enum(value) elif isinstance(value, (JavaArray, JavaByteArray)): self.write_array(value) elif isinstance(value, JavaObject): self.write_object(value) elif isinstance(value, JavaString): self.write_string(value) elif isinstance(value, str): self.write_blockdata(value) else: raise RuntimeError("Unknown typecode: {0}".format(field_type)) else: raise RuntimeError("Unknown typecode: {0}".format(field_type))
Writes an item of an array :param field_type: Value type :param value: The value itself
def encrypt_to(self, f, mac_bytes=10): """ Returns a file like object `ef'. Anything written to `ef' will be encrypted for this pubkey and written to `f'. """ ctx = EncryptionContext(f, self.p, mac_bytes) yield ctx ctx.finish()
Returns a file like object `ef'. Anything written to `ef' will be encrypted for this pubkey and written to `f'.
def salt_and_pepper_noise(X, v): """Apply salt and pepper noise to data in X. In other words a fraction v of elements of X (chosen at random) is set to its maximum or minimum value according to a fair coin flip. If minimum or maximum are not given, the min (max) value in X is taken. :param X: array_like, Input data :param v: int, fraction of elements to distort :return: transformed data """ X_noise = X.copy() n_features = X.shape[1] mn = X.min() mx = X.max() for i, sample in enumerate(X): mask = np.random.randint(0, n_features, v) for m in mask: if np.random.random() < 0.5: X_noise[i][m] = mn else: X_noise[i][m] = mx return X_noise
Apply salt and pepper noise to data in X. In other words a fraction v of elements of X (chosen at random) is set to its maximum or minimum value according to a fair coin flip. If minimum or maximum are not given, the min (max) value in X is taken. :param X: array_like, Input data :param v: int, fraction of elements to distort :return: transformed data
def load(self): """ Extract tabular data as |TableData| instances from a MediaWiki text object. |load_source_desc_text| :return: Loaded table data iterator. |load_table_name_desc| =================== ============================================== Format specifier Value after the replacement =================== ============================================== ``%(filename)s`` ``""`` ``%(key)s`` | This replaced to: | **(1)** ``caption`` mark of the table | **(2)** ``%(format_name)s%(format_id)s`` | if ``caption`` mark not included | in the table. ``%(format_name)s`` ``"mediawiki"`` ``%(format_id)s`` |format_id_desc| ``%(global_id)s`` |global_id| =================== ============================================== :rtype: |TableData| iterator :raises pytablereader.DataError: If the MediaWiki data is invalid or empty. """ self._validate() self._logger.logging_load() formatter = MediaWikiTableFormatter(self.source) formatter.accept(self) return formatter.to_table_data()
Extract tabular data as |TableData| instances from a MediaWiki text object. |load_source_desc_text| :return: Loaded table data iterator. |load_table_name_desc| =================== ============================================== Format specifier Value after the replacement =================== ============================================== ``%(filename)s`` ``""`` ``%(key)s`` | This replaced to: | **(1)** ``caption`` mark of the table | **(2)** ``%(format_name)s%(format_id)s`` | if ``caption`` mark not included | in the table. ``%(format_name)s`` ``"mediawiki"`` ``%(format_id)s`` |format_id_desc| ``%(global_id)s`` |global_id| =================== ============================================== :rtype: |TableData| iterator :raises pytablereader.DataError: If the MediaWiki data is invalid or empty.
def import_new_atlas_pointings( self, recent=False): """ *Import any new ATLAS pointings from the atlas3/atlas4 databases into the ``atlas_exposures`` table of the Atlas Movers database* **Key Arguments:** - ``recent`` -- only sync the most recent 2 weeks of data (speeds things up) **Return:** - None **Usage:** .. code-block:: python from rockAtlas.bookkeeping import bookkeeper bk = bookkeeper( log=log, settings=settings ) bk.import_new_atlas_pointings() """ self.log.info('starting the ``import_new_atlas_pointings`` method') if recent: mjd = mjdnow( log=self.log ).get_mjd() recent = mjd - 14 recent = " mjd_obs > %(recent)s " % locals() else: recent = "1=1" # SELECT ALL OF THE POINTING INFO REQUIRED FROM THE ATLAS3 DATABASE sqlQuery = u""" SELECT `expname`, `dec` as `decDeg`, `exptime` as `exp_time`, `filter`, `mjd_obs` as `mjd`, `ra` as `raDeg`, if(mjd_obs<57855.0,mag5sig-0.75,mag5sig) as `limiting_magnitude`, `object` as `atlas_object_id` from atlas_metadata where %(recent)s and object like "TA%%" order by mjd_obs desc; """ % locals() rows = readquery( log=self.log, sqlQuery=sqlQuery, dbConn=self.atlas3DbConn, quiet=False ) dbSettings = self.settings["database settings"]["atlasMovers"] # TIDY RESULTS BEFORE IMPORT entries = list(rows) if len(rows) > 0: # ADD THE NEW RESULTS TO THE atlas_exposures TABLE insert_list_of_dictionaries_into_database_tables( dbConn=self.atlasMoversDBConn, log=self.log, dictList=entries, dbTableName="atlas_exposures", uniqueKeyList=["expname"], dateModified=False, batchSize=10000, replace=True, dbSettings=dbSettings ) recent = recent.replace("mjd_obs", "mjd") # SELECT ALL OF THE POINTING INFO REQUIRED FROM THE ATLAS4 DATABASE sqlQuery = u""" SELECT `obs` as `expname`, `dec` as `decDeg`, `texp` as `exp_time`, `filt` as `filter`, `mjd`, `ra` as `raDeg`, `mag5sig` as `limiting_magnitude`, `obj` as `atlas_object_id` from atlas_metadataddc where %(recent)s and obj like "TA%%" order by mjd desc; """ % locals() rows = readquery( log=self.log, sqlQuery=sqlQuery, dbConn=self.atlas4DbConn, quiet=False ) # TIDY RESULTS BEFORE IMPORT entries = list(rows) if len(rows) > 0: # ADD THE NEW RESULTS TO THE atlas_exposures TABLE insert_list_of_dictionaries_into_database_tables( dbConn=self.atlasMoversDBConn, log=self.log, dictList=entries, dbTableName="atlas_exposures", uniqueKeyList=["expname"], dateModified=False, batchSize=10000, replace=True, dbSettings=dbSettings ) # APPEND HTMIDs TO THE atlas_exposures TABLE add_htm_ids_to_mysql_database_table( raColName="raDeg", declColName="decDeg", tableName="atlas_exposures", dbConn=self.atlasMoversDBConn, log=self.log, primaryIdColumnName="primaryId" ) print "ATLAS pointings synced between ATLAS3/ATLAS4 databases and the ATLAS Movers `atlas_exposures` database table" self.log.info('completed the ``import_new_atlas_pointings`` method') return None
*Import any new ATLAS pointings from the atlas3/atlas4 databases into the ``atlas_exposures`` table of the Atlas Movers database* **Key Arguments:** - ``recent`` -- only sync the most recent 2 weeks of data (speeds things up) **Return:** - None **Usage:** .. code-block:: python from rockAtlas.bookkeeping import bookkeeper bk = bookkeeper( log=log, settings=settings ) bk.import_new_atlas_pointings()
def compress_encoder(inputs, hparams, strides=(2, 2), kernel_size=(3, 3), name=None): """Encoder that compresses 2-D inputs by 2**num_compress_steps. Args: inputs: Tensor of shape [batch, height, width, channels]. hparams: HParams. strides: Tuple, strides for conv block. kernel_size: Tuple, kernel window size for conv block. name: string, variable scope. Returns: Tensor of shape [batch, latent_length, hparams.hidden_size], where latent_length is hparams.num_latents * (height*width) / 2**(hparams.num_compress_steps). """ with tf.variable_scope(name, default_name="compress"): x = inputs for i in range(hparams.num_compress_steps // 2): with tf.variable_scope("compress_conv_%d" % i): y = common_layers.conv_block( common_layers.layer_norm( x, hparams.hidden_size, name="lnorm"), hparams.hidden_size, dilation_rates_and_kernel_sizes=[((1, 1), kernel_size)], strides=strides, padding="SAME", name="compress_conv_%d" % i) y = tf.nn.dropout(y, 1.0 - hparams.dropout) if hparams.do_compress_attend: y = compress_self_attention_layer( x, hparams, name="compress_selfatt_%d" % i) y += x x = y x = residual_block_layer(x, hparams) # If using multiple copies of latents, blow up the hidden size and then # reshape to increase by num_latents. shape_x = common_layers.shape_list(x) x = tf.layers.dense(x, hparams.num_latents * hparams.hidden_size, name=name + "_dense") return tf.reshape(x, [shape_x[0], shape_x[1] * shape_x[2] * hparams.num_latents, hparams.hidden_size])
Encoder that compresses 2-D inputs by 2**num_compress_steps. Args: inputs: Tensor of shape [batch, height, width, channels]. hparams: HParams. strides: Tuple, strides for conv block. kernel_size: Tuple, kernel window size for conv block. name: string, variable scope. Returns: Tensor of shape [batch, latent_length, hparams.hidden_size], where latent_length is hparams.num_latents * (height*width) / 2**(hparams.num_compress_steps).
def getObjectWorkflowStates(self): """This method is used to populate catalog values Returns a dictionary with the workflow id as key and workflow state as value. :return: {'review_state':'active',...} """ workflow = getToolByName(self, 'portal_workflow') states = {} for w in workflow.getWorkflowsFor(self): state = api.get_workflow_status_of(self, w.state_var) states[w.state_var] = state return states
This method is used to populate catalog values Returns a dictionary with the workflow id as key and workflow state as value. :return: {'review_state':'active',...}
def _map_term_using_schema(master, path, term, schema_edges): """ IF THE WHERE CLAUSE REFERS TO FIELDS IN THE SCHEMA, THEN EXPAND THEM """ output = FlatList() for k, v in term.items(): dimension = schema_edges[k] if isinstance(dimension, Dimension): domain = dimension.getDomain() if dimension.fields: if is_data(dimension.fields): # EXPECTING A TUPLE for local_field, es_field in dimension.fields.items(): local_value = v[local_field] if local_value == None: output.append({"missing": {"field": es_field}}) else: output.append({"term": {es_field: local_value}}) continue if len(dimension.fields) == 1 and is_variable_name(dimension.fields[0]): # SIMPLE SINGLE-VALUED FIELD if domain.getPartByKey(v) is domain.NULL: output.append({"missing": {"field": dimension.fields[0]}}) else: output.append({"term": {dimension.fields[0]: v}}) continue if AND(is_variable_name(f) for f in dimension.fields): # EXPECTING A TUPLE if not isinstance(v, tuple): Log.error("expecing {{name}}={{value}} to be a tuple", name= k, value= v) for i, f in enumerate(dimension.fields): vv = v[i] if vv == None: output.append({"missing": {"field": f}}) else: output.append({"term": {f: vv}}) continue if len(dimension.fields) == 1 and is_variable_name(dimension.fields[0]): if domain.getPartByKey(v) is domain.NULL: output.append({"missing": {"field": dimension.fields[0]}}) else: output.append({"term": {dimension.fields[0]: v}}) continue if domain.partitions: part = domain.getPartByKey(v) if part is domain.NULL or not part.esfilter: Log.error("not expected to get NULL") output.append(part.esfilter) continue else: Log.error("not expected") elif is_data(v): sub = _map_term_using_schema(master, path + [k], v, schema_edges[k]) output.append(sub) continue output.append({"term": {k: v}}) return {"and": output}
IF THE WHERE CLAUSE REFERS TO FIELDS IN THE SCHEMA, THEN EXPAND THEM
def PrintField(self, field, value): """Print a single field name/value pair.""" out = self.out out.write(' ' * self.indent) if self.use_field_number: out.write(str(field.number)) else: if field.is_extension: out.write('[') if (field.containing_type.GetOptions().message_set_wire_format and field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and field.label == descriptor.FieldDescriptor.LABEL_OPTIONAL): out.write(field.message_type.full_name) else: out.write(field.full_name) out.write(']') elif field.type == descriptor.FieldDescriptor.TYPE_GROUP: # For groups, use the capitalized name. out.write(field.message_type.name) else: out.write(field.name) if field.cpp_type != descriptor.FieldDescriptor.CPPTYPE_MESSAGE: # The colon is optional in this case, but our cross-language golden files # don't include it. out.write(': ') self.PrintFieldValue(field, value) if self.as_one_line: out.write(' ') else: out.write('\n')
Print a single field name/value pair.
def create_doc_dict(self, document, doc_key=None, owner_document=None): """ Generate a dictionary representation of the document. (no recursion) DO NOT CALL DIRECTLY """ # Get doc field for top level documents if owner_document: doc_field = owner_document._fields.get(doc_key, None) if doc_key else None else: doc_field = document._fields.get(doc_key, None) if doc_key else None # Generate the base fields for the document doc_dict = {"_document": document if owner_document is None else owner_document, "_key": document.__class__.__name__.lower() if doc_key is None else doc_key, "_document_field": doc_field} if not isinstance(document, TopLevelDocumentMetaclass) and doc_key: doc_dict.update({"_field_type": EmbeddedDocumentField}) for key, field in document._fields.items(): doc_dict[key] = field return doc_dict
Generate a dictionary representation of the document. (no recursion) DO NOT CALL DIRECTLY
def from_ep_string(cls, ep_string, location): """Initalize from an EnergyPlus string of a SizingPeriod:DesignDay. args: ep_string: A full string representing a SizingPeriod:DesignDay. """ # format the object into a list of properties ep_string = ep_string.strip() if '\n' in ep_string: ep_lines = ep_string.split('\n') else: ep_lines = ep_string.split('\r') lines = [l.split('!')[0].strip().replace(',', '') for l in ep_lines] # check to be sure that we have a valid ddy object assert len(lines) == 27 or len(lines) == 26, "Number " \ "of lines of text [{}] does not correspond" \ " to an EP Design Day [26 or 27]".format( len(lines)) lines[-1] = lines[-1].split(';')[0] # extract primary properties name = lines[1] day_type = lines[4] # extract dry bulb temperatures dry_bulb_condition = DryBulbCondition( float(lines[5]), float(lines[6]), lines[7], lines[8]) # extract humidity conditions h_type = lines[9] h_val = 0 if lines[10] == '' else float(lines[10]) if h_type == 'HumidityRatio': h_val = float(lines[12]) elif h_type == 'Enthalpy': h_val = float(lines[13]) humidity_condition = HumidityCondition( h_type, h_val, float(lines[15]), lines[11]) # extract wind conditions wind_condition = WindCondition( float(lines[16]), float(lines[17]), lines[18], lines[19]) # extract the sky conditions sky_model = lines[21] if sky_model == 'ASHRAEClearSky': sky_condition = OriginalClearSkyCondition( int(lines[2]), int(lines[3]), float(lines[26]), lines[20]) elif sky_model == 'ASHRAETau': sky_condition = RevisedClearSkyCondition( int(lines[2]), int(lines[3]), float(lines[24]), float(lines[25]), lines[20]) else: sky_condition = SkyCondition( sky_model, int(lines[2]), int(lines[3]), lines[20]) if sky_model == 'Schedule': sky_condition.beam_shced = lines[22] sky_condition.diff_shced = lines[23] return cls(name, day_type, location, dry_bulb_condition, humidity_condition, wind_condition, sky_condition)
Initalize from an EnergyPlus string of a SizingPeriod:DesignDay. args: ep_string: A full string representing a SizingPeriod:DesignDay.
def _op_to_matrix(self, op: Optional[ops.Operation], qubits: Tuple[ops.Qid, ...] ) -> Optional[np.ndarray]: """Determines the effect of an operation on the given qubits. If the operation is a 1-qubit operation on one of the given qubits, or a 2-qubit operation on both of the given qubits, and also the operation has a known matrix, then a matrix is returned. Otherwise None is returned. Args: op: The operation to understand. qubits: The qubits we care about. Order determines matrix tensor order. Returns: None, or else a matrix equivalent to the effect of the operation. """ q1, q2 = qubits matrix = protocols.unitary(op, None) if matrix is None: return None assert op is not None if op.qubits == qubits: return matrix if op.qubits == (q2, q1): return MergeInteractions._flip_kron_order(matrix) if op.qubits == (q1,): return np.kron(matrix, np.eye(2)) if op.qubits == (q2,): return np.kron(np.eye(2), matrix) return None
Determines the effect of an operation on the given qubits. If the operation is a 1-qubit operation on one of the given qubits, or a 2-qubit operation on both of the given qubits, and also the operation has a known matrix, then a matrix is returned. Otherwise None is returned. Args: op: The operation to understand. qubits: The qubits we care about. Order determines matrix tensor order. Returns: None, or else a matrix equivalent to the effect of the operation.
def delete_node(self, node_name, graph=None): """ Deletes this node and all edges referencing it. """ if not graph: graph = self.graph if node_name not in graph: raise KeyError('node %s does not exist' % node_name) graph.pop(node_name) for node, edges in six.iteritems(graph): if node_name in edges: edges.remove(node_name)
Deletes this node and all edges referencing it.
def get_line_pattern_rules(declarations, dirs): """ Given a list of declarations, return a list of output.Rule objects. Optionally provide an output directory for local copies of image files. """ property_map = {'line-pattern-file': 'file', 'line-pattern-width': 'width', 'line-pattern-height': 'height', 'line-pattern-type': 'type', 'line-pattern-meta-output': 'meta-output', 'line-pattern-meta-writer': 'meta-writer'} property_names = property_map.keys() # a place to put rules rules = [] for (filter, values) in filtered_property_declarations(declarations, property_names): line_pattern_file, line_pattern_type, line_pattern_width, line_pattern_height \ = values.has_key('line-pattern-file') \ and post_process_symbolizer_image_file(str(values['line-pattern-file'].value), dirs) \ or (None, None, None, None) line_pattern_width = values.has_key('line-pattern-width') and values['line-pattern-width'].value or line_pattern_width line_pattern_height = values.has_key('line-pattern-height') and values['line-pattern-height'].value or line_pattern_height symbolizer = line_pattern_file and output.LinePatternSymbolizer(line_pattern_file, line_pattern_type, line_pattern_width, line_pattern_height) if symbolizer: rules.append(make_rule(filter, symbolizer)) return rules
Given a list of declarations, return a list of output.Rule objects. Optionally provide an output directory for local copies of image files.
def __check_classes(self): """ Check if any of the default classes (`Authentication`, `Configuration` and / or `Responses`) have been overwitten and if they're still valid """ # msg took from BaseAuthentication msg = ( "Sanic JWT was not initialized properly. It did not received " "an instance of {}" ) if not issubclass(self.authentication_class, Authentication): raise exceptions.InitializationFailure( message=msg.format("Authentication") ) if not issubclass(self.configuration_class, Configuration): raise exceptions.InitializationFailure( message=msg.format("Configuration") ) if not issubclass(self.responses_class, Responses): raise exceptions.InitializationFailure( message=msg.format("Responses") )
Check if any of the default classes (`Authentication`, `Configuration` and / or `Responses`) have been overwitten and if they're still valid
def read(self, size=None): """Read at most size bytes from this buffer. Bytes read from this buffer are consumed and are permanently removed. Args: size: If provided, read no more than size bytes from the buffer. Otherwise, this reads the entire buffer. Returns: The bytes read from this buffer. """ if size is None: size = self.__size ret_list = [] while size > 0 and self.__buf: data = self.__buf.popleft() size -= len(data) ret_list.append(data) if size < 0: ret_list[-1], remainder = ret_list[-1][:size], ret_list[-1][size:] self.__buf.appendleft(remainder) ret = b''.join(ret_list) self.__size -= len(ret) return ret
Read at most size bytes from this buffer. Bytes read from this buffer are consumed and are permanently removed. Args: size: If provided, read no more than size bytes from the buffer. Otherwise, this reads the entire buffer. Returns: The bytes read from this buffer.
def lowercase(state): """Convert all column names to their lower case versions to improve robustness :Example: Suppose we are testing the following SELECT statements * solution: ``SELECT artist_id as id FROM artists`` * student : ``SELECT artist_id as ID FROM artists`` We can write the following SCTs: :: # fails, as id and ID have different case Ex().check_column('id').has_equal_value() # passes, as lowercase() is being used Ex().lowercase().check_column('id').has_equal_value() """ return state.to_child( student_result={k.lower(): v for k, v in state.student_result.items()}, solution_result={k.lower(): v for k, v in state.solution_result.items()}, )
Convert all column names to their lower case versions to improve robustness :Example: Suppose we are testing the following SELECT statements * solution: ``SELECT artist_id as id FROM artists`` * student : ``SELECT artist_id as ID FROM artists`` We can write the following SCTs: :: # fails, as id and ID have different case Ex().check_column('id').has_equal_value() # passes, as lowercase() is being used Ex().lowercase().check_column('id').has_equal_value()
def get_contents_debug_adapter_protocol(self, lst, fmt=None): ''' This method is to be used in the case where the variables are all saved by its id (and as such don't need to have the `resolve` method called later on, so, keys don't need to embed the reference in the key). Note that the return should be ordered. :return list(tuple(name:str, value:object, evaluateName:str)) ''' l = len(lst) ret = [] format_str = '%0' + str(int(len(str(l - 1)))) + 'd' if fmt is not None and fmt.get('hex', False): format_str = '0x%0' + str(int(len(hex(l).lstrip('0x')))) + 'x' for i, item in enumerate(lst): ret.append((format_str % i, item, '[%s]' % i)) if i > MAX_ITEMS_TO_HANDLE: ret.append((TOO_LARGE_ATTR, TOO_LARGE_MSG, None)) break ret.append(('__len__', len(lst), partial(_apply_evaluate_name, evaluate_name='len(%s)'))) # Needed in case the class extends the built-in type and has some additional fields. from_default_resolver = defaultResolver.get_contents_debug_adapter_protocol(lst, fmt=fmt) if from_default_resolver: ret = from_default_resolver + ret return ret
This method is to be used in the case where the variables are all saved by its id (and as such don't need to have the `resolve` method called later on, so, keys don't need to embed the reference in the key). Note that the return should be ordered. :return list(tuple(name:str, value:object, evaluateName:str))
def series_resistance(self, channel, resistor_index=None): ''' Parameters ---------- channel : int Analog channel index. resistor_index : int, optional Series resistor channel index. If :data:`resistor_index` is not specified, the resistor-index from the current context _(i.e., the result of :attr:`series_resistor_index`)_ is used. Otherwise, the series-resistor is temporarily set to the value of :data:`resistor_index` to set the capacitance before restoring back to the original value. See definition of :meth:`safe_series_resistor_index_read` decorator. Returns ------- float Return the current series resistance value for the specified channel. ''' if resistor_index is None: resistor_index = self.series_resistor_index(channel) value = self._series_resistance(channel) try: if channel == 0: self.calibration.R_hv[resistor_index] = value else: self.calibration.R_fb[resistor_index] = value except: pass return value
Parameters ---------- channel : int Analog channel index. resistor_index : int, optional Series resistor channel index. If :data:`resistor_index` is not specified, the resistor-index from the current context _(i.e., the result of :attr:`series_resistor_index`)_ is used. Otherwise, the series-resistor is temporarily set to the value of :data:`resistor_index` to set the capacitance before restoring back to the original value. See definition of :meth:`safe_series_resistor_index_read` decorator. Returns ------- float Return the current series resistance value for the specified channel.
def cartpole(): """Configuration for the cart pole classic control task.""" locals().update(default()) # Environment env = 'CartPole-v1' max_length = 500 steps = 2e5 # 200k normalize_ranges = False # The env reports wrong ranges. # Network network = networks.feed_forward_categorical return locals()
Configuration for the cart pole classic control task.
def _infer_record_outputs(inputs, unlist, file_vs, std_vs, parallel, to_include=None, exclude=None): """Infer the outputs of a record from the original inputs """ fields = [] unlist = set([_get_string_vid(x) for x in unlist]) input_vids = set([_get_string_vid(v) for v in _handle_special_inputs(inputs, file_vs)]) to_include = set([_get_string_vid(x) for x in to_include]) if to_include else None to_exclude = tuple(set([_get_string_vid(x) for x in exclude])) if exclude else None added = set([]) for raw_v in std_vs + [v for v in file_vs if get_base_id(v["id"]) in input_vids]: # unpack record inside this record and un-nested inputs to avoid double nested cur_record = is_cwl_record(raw_v) if cur_record: # unlist = unlist | set([field["name"] for field in cur_record["fields"]]) nested_vs = [{"id": field["name"], "type": field["type"]} for field in cur_record["fields"]] else: nested_vs = [raw_v] for orig_v in nested_vs: if (get_base_id(orig_v["id"]) not in added and (not to_include or get_base_id(orig_v["id"]) in to_include)): if to_exclude is None or not get_base_id(orig_v["id"]).startswith(to_exclude): cur_v = {} cur_v["name"] = get_base_id(orig_v["id"]) cur_v["type"] = orig_v["type"] if cur_v["name"] in unlist: cur_v = _flatten_nested_input(cur_v) fields.append(_add_secondary_to_rec_field(orig_v, cur_v)) added.add(get_base_id(orig_v["id"])) return fields
Infer the outputs of a record from the original inputs
def _ows_check_charm_func(state, message, charm_func_with_configs): """Run a custom check function for the charm to see if it wants to change the state. This is only run if not in 'maintenance' and tests to see if the new state is more important that the previous one determined by the interfaces/relations check. @param state: the previously determined state so far. @param message: the user orientated message so far. @param charm_func: a callable function that returns state, message @returns state, message strings. """ if charm_func_with_configs: charm_state, charm_message = charm_func_with_configs() if (charm_state != 'active' and charm_state != 'unknown' and charm_state is not None): state = workload_state_compare(state, charm_state) if message: charm_message = charm_message.replace("Incomplete relations: ", "") message = "{}, {}".format(message, charm_message) else: message = charm_message return state, message
Run a custom check function for the charm to see if it wants to change the state. This is only run if not in 'maintenance' and tests to see if the new state is more important that the previous one determined by the interfaces/relations check. @param state: the previously determined state so far. @param message: the user orientated message so far. @param charm_func: a callable function that returns state, message @returns state, message strings.
def S_isothermal_pipe_to_two_planes(D, Z, L=1.): r'''Returns the Shape factor `S` of a pipe of constant outer temperature and of outer diameter `D` which is `Z` distance from two infinite isothermal planes of equal temperatures, parallel to each other and enclosing the pipe. Length `L` must be provided, but can be set to 1 to obtain a dimensionless shape factor used in some sources. .. math:: S = \frac{2\pi L}{\ln\frac{8z}{\pi D}} Parameters ---------- D : float Diameter of the pipe, [m] Z : float Distance from the middle of the pipe to either of the planes, [m] L : float, optional Length of the pipe, [m] Returns ------- S : float Shape factor [m] Examples -------- >>> S_isothermal_pipe_to_two_planes(.1, 5, 1) 1.2963749299921428 Notes ----- L should be much larger than both diameters. L should be larger than W. .. math:: Q = Sk(T_1 - T_2) \\ R_{\text{shape}}=\frac{1}{Sk} References ---------- .. [1] Shape Factors for Heat Conduction Through Bodies with Isothermal or Convective Boundary Conditions, J. E. Sunderland, K. R. Johnson, ASHRAE Transactions, Vol. 70, 1964. .. [2] Bergman, Theodore L., Adrienne S. Lavine, Frank P. Incropera, and David P. DeWitt. Introduction to Heat Transfer. 6E. Hoboken, NJ: Wiley, 2011. ''' return 2.*pi*L/log(8.*Z/(pi*D))
r'''Returns the Shape factor `S` of a pipe of constant outer temperature and of outer diameter `D` which is `Z` distance from two infinite isothermal planes of equal temperatures, parallel to each other and enclosing the pipe. Length `L` must be provided, but can be set to 1 to obtain a dimensionless shape factor used in some sources. .. math:: S = \frac{2\pi L}{\ln\frac{8z}{\pi D}} Parameters ---------- D : float Diameter of the pipe, [m] Z : float Distance from the middle of the pipe to either of the planes, [m] L : float, optional Length of the pipe, [m] Returns ------- S : float Shape factor [m] Examples -------- >>> S_isothermal_pipe_to_two_planes(.1, 5, 1) 1.2963749299921428 Notes ----- L should be much larger than both diameters. L should be larger than W. .. math:: Q = Sk(T_1 - T_2) \\ R_{\text{shape}}=\frac{1}{Sk} References ---------- .. [1] Shape Factors for Heat Conduction Through Bodies with Isothermal or Convective Boundary Conditions, J. E. Sunderland, K. R. Johnson, ASHRAE Transactions, Vol. 70, 1964. .. [2] Bergman, Theodore L., Adrienne S. Lavine, Frank P. Incropera, and David P. DeWitt. Introduction to Heat Transfer. 6E. Hoboken, NJ: Wiley, 2011.
def init_layout(self): """ Initialize the layout of the toolkit shape. This method is called during the bottom-up pass. This method should initialize the layout of the widget. The child widgets will be fully initialized and layed out when this is called. """ for child in self.children(): self.child_added(child) self.update_shape({})
Initialize the layout of the toolkit shape. This method is called during the bottom-up pass. This method should initialize the layout of the widget. The child widgets will be fully initialized and layed out when this is called.
def fetch_by_client_id(self, client_id): """ Retrieve a client by its identifier. :param client_id: Identifier of a client app. :return: An instance of :class:`oauth2.Client`. :raises: ClientNotFoundError """ if client_id not in self.clients: raise ClientNotFoundError return self.clients[client_id]
Retrieve a client by its identifier. :param client_id: Identifier of a client app. :return: An instance of :class:`oauth2.Client`. :raises: ClientNotFoundError
def fillna(self, column_name, value): """ Fill all missing values with a given value in a given column. If the ``value`` is not the same type as the values in ``column_name``, this method attempts to convert the value to the original column's type. If this fails, an error is raised. Parameters ---------- column_name : str The name of the column to modify. value : type convertible to SArray's type The value used to replace all missing values. Returns ------- out : SFrame A new SFrame with the specified value in place of missing values. See Also -------- dropna Examples -------- >>> sf = turicreate.SFrame({'a':[1, None, None], ... 'b':['13.1', '17.2', None]}) >>> sf = sf.fillna('a', 0) >>> sf +---+------+ | a | b | +---+------+ | 1 | 13.1 | | 0 | 17.2 | | 0 | None | +---+------+ [3 rows x 2 columns] """ # Normal error checking if type(column_name) is not str: raise TypeError("column_name must be a str") ret = self[self.column_names()] ret[column_name] = ret[column_name].fillna(value) return ret
Fill all missing values with a given value in a given column. If the ``value`` is not the same type as the values in ``column_name``, this method attempts to convert the value to the original column's type. If this fails, an error is raised. Parameters ---------- column_name : str The name of the column to modify. value : type convertible to SArray's type The value used to replace all missing values. Returns ------- out : SFrame A new SFrame with the specified value in place of missing values. See Also -------- dropna Examples -------- >>> sf = turicreate.SFrame({'a':[1, None, None], ... 'b':['13.1', '17.2', None]}) >>> sf = sf.fillna('a', 0) >>> sf +---+------+ | a | b | +---+------+ | 1 | 13.1 | | 0 | 17.2 | | 0 | None | +---+------+ [3 rows x 2 columns]
def audio_send_stream(self, httptype=None, channel=None, path_file=None, encode=None): """ Params: path_file - path to audio file channel: - integer httptype - type string (singlepart or multipart) singlepart: HTTP content is a continuos flow of audio packets multipart: HTTP content type is multipart/x-mixed-replace, and each audio packet ends with a boundary string Supported audio encode type according with documentation: PCM ADPCM G.711A G.711.Mu G.726 G.729 MPEG2 AMR AAC """ if httptype is None or channel is None: raise RuntimeError("Requires htttype and channel") file_audio = { 'file': open(path_file, 'rb'), } header = { 'content-type': 'Audio/' + encode, 'content-length': '9999999' } self.command_audio( 'audio.cgi?action=postAudio&httptype={0}&channel={1}'.format( httptype, channel), file_content=file_audio, http_header=header )
Params: path_file - path to audio file channel: - integer httptype - type string (singlepart or multipart) singlepart: HTTP content is a continuos flow of audio packets multipart: HTTP content type is multipart/x-mixed-replace, and each audio packet ends with a boundary string Supported audio encode type according with documentation: PCM ADPCM G.711A G.711.Mu G.726 G.729 MPEG2 AMR AAC
def AssignVar(self, value): """Assign a value to this Value.""" self.value = value # Call OnAssignVar on options. [option.OnAssignVar() for option in self.options]
Assign a value to this Value.
def dependents(self, on_predicate=None, from_predicate=None): """Returns a map from targets that satisfy the from_predicate to targets they depend on that satisfy the on_predicate. :API: public """ core = set(self.targets(on_predicate)) dependees = defaultdict(set) for target in self.targets(from_predicate): for dependency in target.dependencies: if dependency in core: dependees[target].add(dependency) return dependees
Returns a map from targets that satisfy the from_predicate to targets they depend on that satisfy the on_predicate. :API: public
def __fetch_issue_attachments(self, issue_id): """Get attachments of an issue""" for attachments_raw in self.client.issue_collection(issue_id, "attachments"): attachments = json.loads(attachments_raw) for attachment in attachments['entries']: yield attachment
Get attachments of an issue
def getEvents(self): """ Returns a list of all events that have occurred. Empties the internal queue. """ caught_events = self._observer.caught_events self._observer.caught_events = [] for event in caught_events: self._observer.activate_event(event["name"]) return caught_events
Returns a list of all events that have occurred. Empties the internal queue.
def session_end(self): """ End a session. Se session_begin for an in depth description of TREZOR sessions. """ self.session_depth -= 1 self.session_depth = max(0, self.session_depth) if self.session_depth == 0: self._session_end()
End a session. Se session_begin for an in depth description of TREZOR sessions.
def report_errors_to_ga(self, errors): """ Report errors to Google Analytics https://developers.google.com/analytics/devguides/collection/protocol/v1/devguide """ hits = [] responses = [] for field_name in sorted(errors): for error_message in errors[field_name]: event = self.format_ga_hit(field_name, error_message) if event: hits.append(event) if self.ga_batch_hits: for hit_batch in _batch_hits(hits): response = requests.post(self.get_ga_batch_endpoint(), data=hit_batch) responses.append(response) else: for hit in hits: response = requests.post(self.get_ga_single_endpoint(), data=hit) responses.append(response) return responses
Report errors to Google Analytics https://developers.google.com/analytics/devguides/collection/protocol/v1/devguide
def postman(host, port=587, auth=(None, None), force_tls=False, options=None): """ Creates a Postman object with TLS and Auth middleware. TLS is placed before authentication because usually authentication happens and is accepted only after TLS is enabled. :param auth: Tuple of (username, password) to be used to ``login`` to the server. :param force_tls: Whether TLS should be forced. :param options: Dictionary of keyword arguments to be used when the SMTP class is called. """ return Postman( host=host, port=port, middlewares=[ middleware.tls(force=force_tls), middleware.auth(*auth), ], **options )
Creates a Postman object with TLS and Auth middleware. TLS is placed before authentication because usually authentication happens and is accepted only after TLS is enabled. :param auth: Tuple of (username, password) to be used to ``login`` to the server. :param force_tls: Whether TLS should be forced. :param options: Dictionary of keyword arguments to be used when the SMTP class is called.