code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def create_release_settings_action(target, source, env): with open(str(source[0]), "r") as fileobj: settings = json.load(fileobj) settings['release'] = True settings['release_date'] = datetime.datetime.utcnow().isoformat() settings['dependency_versions'] = {} for dep in env['TILE'].dependencies: tile = IOTile(os.path.join('build', 'deps', dep['unique_id'])) settings['dependency_versions'][dep['unique_id']] = str(tile.parsed_version) with open(str(target[0]), "w") as fileobj: json.dump(settings, fileobj, indent=4)
Copy module_settings.json and add release and build information
def make_soma(points, soma_check=None, soma_class=SOMA_CONTOUR): if soma_check: soma_check(points) stype = _get_type(points, soma_class) if stype is None: raise SomaError('Invalid soma points') return stype(points)
Make a soma object from a set of points Infers the soma type (SomaSinglePoint, SomaSimpleContour) from the points and the 'soma_class' Parameters: points: collection of points forming a soma. soma_check: optional validation function applied to points. Should raise a SomaError if points not valid. soma_class(str): one of 'contour' or 'cylinder' to specify the type Raises: SomaError if no soma points found, points incompatible with soma, or if soma_check(points) fails.
def from_jsondict(cls, dict_, decode_string=base64.b64decode, **additional_args): r decode = lambda k, x: cls._decode_value(k, x, decode_string, **additional_args) kwargs = cls._restore_args(_mapdict_kv(decode, dict_)) try: return cls(**dict(kwargs, **additional_args)) except TypeError: print("CLS %s" % cls) print("ARG %s" % dict_) print("KWARG %s" % kwargs) raise
r"""Create an instance from a JSON style dict. Instantiate this class with parameters specified by the dict. This method takes the following arguments. .. tabularcolumns:: |l|L| =============== ===================================================== Argument Descrpition =============== ===================================================== dict\_ A dictionary which describes the parameters. For example, {"Param1": 100, "Param2": 200} decode_string (Optional) specify how to decode strings. The default is base64. This argument is used only for attributes which don't have explicit type annotations in _TYPE class attribute. additional_args (Optional) Additional kwargs for constructor. =============== =====================================================
def process_args(mod_id, args, type_args): res = list(args) if len(args) > len(type_args): raise ValueError( 'Too many arguments specified for module "{}" (Got {}, expected ' '{})'.format(mod_id, len(args), len(type_args)) ) for i in range(len(args), len(type_args)): arg_info = type_args[i] if "default" in arg_info: args.append(arg_info["default"]) else: raise ValueError( 'Not enough module arguments supplied for module "{}" (Got ' '{}, expecting {})'.format( mod_id, len(args), len(type_args) ) ) return args
Takes as input a list of arguments defined on a module and the information about the required arguments defined on the corresponding module type. Validates that the number of supplied arguments is valid and fills any missing arguments with their default values from the module type
def get_one(self, cls=None, **kwargs): case = cls() if cls else self._CasesClass() for attr, value in kwargs.iteritems(): setattr(case, attr, value) return case
Returns a one case.
def open_font(self, name): fid = self.display.allocate_resource_id() ec = error.CatchError(error.BadName) request.OpenFont(display = self.display, onerror = ec, fid = fid, name = name) self.sync() if ec.get_error(): self.display.free_resource_id(fid) return None else: cls = self.display.get_resource_class('font', fontable.Font) return cls(self.display, fid, owner = 1)
Open the font identifed by the pattern name and return its font object. If name does not match any font, None is returned.
def kube_cronjob_next_schedule_time(self, metric, scraper_config): check_basename = scraper_config['namespace'] + '.cronjob.on_schedule_check' curr_time = int(time.time()) for sample in metric.samples: on_schedule = int(sample[self.SAMPLE_VALUE]) - curr_time tags = [ self._format_tag(label_name, label_value, scraper_config) for label_name, label_value in iteritems(sample[self.SAMPLE_LABELS]) ] tags += scraper_config['custom_tags'] if on_schedule < 0: message = "The service check scheduled at {} is {} seconds late".format( time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(int(sample[self.SAMPLE_VALUE]))), on_schedule ) self.service_check(check_basename, self.CRITICAL, tags=tags, message=message) else: self.service_check(check_basename, self.OK, tags=tags)
Time until the next schedule
def _decode_var(cls, string): str_match = cls.quoted_string_regex.match(string) if str_match: return string.strip("'" if str_match.groups()[0] else '"') elif string.isdigit() and cls.is_digit_regex.match(string) is not None: return int(string) elif string.lower() in ("true", "false"): return string.lower() == "true" elif string.lstrip("-").isdigit(): try: return int(string) except ValueError: return string elif "." in string.lstrip("-"): try: return float(string) except ValueError: return string else: return string
Decodes a given string into the appropriate type in Python. :param str string: The string to decode :return: The decoded value
def get_installed_version(dist_name, working_set=None): req = pkg_resources.Requirement.parse(dist_name) if working_set is None: working_set = pkg_resources.WorkingSet() dist = working_set.find(req) return dist.version if dist else None
Get the installed version of dist_name avoiding pkg_resources cache
def is44(msg): if allzeros(msg): return False d = hex2bin(data(msg)) if wrongstatus(d, 5, 6, 23): return False if wrongstatus(d, 35, 36, 46): return False if wrongstatus(d, 47, 48, 49): return False if wrongstatus(d, 50, 51, 56): return False if bin2int(d[0:4]) > 4: return False vw = wind44(msg) if vw is not None and vw[0] > 250: return False temp, temp2 = temp44(msg) if min(temp, temp2) > 60 or max(temp, temp2) < -80: return False return True
Check if a message is likely to be BDS code 4,4. Meteorological routine air report Args: msg (String): 28 bytes hexadecimal message string Returns: bool: True or False
def withHeartbeater(cls, heartbeater): instance = cls(heartbeater) heartbeater.writeHeartbeat = instance.heartbeat return instance
Connect a SockJSProtocolMachine to its heartbeater.
def get(self, tags=[], trigger_ids=[]): params = {} if len(tags) > 0: params['tags'] = ','.join(tags) if len(trigger_ids) > 0: params['triggerIds'] = ','.join(trigger_ids) url = self._service_url('triggers', params=params) triggers_dict = self._get(url) return Trigger.list_to_object_list(triggers_dict)
Get triggers with optional filtering. Querying without parameters returns all the trigger definitions. :param tags: Fetch triggers with matching tags only. Use * to match all values. :param trigger_ids: List of triggerIds to fetch
def remove_instance(self, instance): if instance.is_external: logger.info("Request external process to stop for %s", instance.name) instance.stop_process() logger.info("External process stopped.") instance.clear_queues(self.daemon.sync_manager) self.instances.remove(instance)
Request to cleanly remove the given instance. If instance is external also shutdown it cleanly :param instance: instance to remove :type instance: object :return: None
def update_result_ctrl(self, event): if not self: return printLen = 0 self.result_ctrl.SetValue('') if hasattr(event, 'msg'): self.result_ctrl.AppendText(event.msg) printLen = len(event.msg) if hasattr(event, 'err'): errLen = len(event.err) errStyle = wx.TextAttr(wx.RED) self.result_ctrl.AppendText(event.err) self.result_ctrl.SetStyle(printLen, printLen+errLen, errStyle) if not hasattr(event, 'err') or event.err == '': if self._ok_pressed: self.Destroy() self._ok_pressed = False
Update event result following execution by main window
def to_dataframe(self, fieldnames=(), verbose=True, index=None, coerce_float=False, datetime_index=False): return read_frame(self, fieldnames=fieldnames, verbose=verbose, index_col=index, coerce_float=coerce_float, datetime_index=datetime_index)
Returns a DataFrame from the queryset Paramaters ----------- fieldnames: The model field names(columns) to utilise in creating the DataFrame. You can span a relationships in the usual Django ORM way by using the foreign key field name separated by double underscores and refer to a field in a related model. index: specify the field to use for the index. If the index field is not in fieldnames it will be appended. This is mandatory for timeseries. verbose: If this is ``True`` then populate the DataFrame with the human readable versions for foreign key fields else use the actual values set in the model coerce_float: Attempt to convert values to non-string, non-numeric objects (like decimal.Decimal) to floating point. datetime_index: specify whether index should be converted to a DateTimeIndex.
def _new_song(self): s = self.song if self.shuffle: self.song = self.shuffles[random.randrange(len(self.shuffles))] else: self.song += 1 if self.song >= len(self.loop): self.song = 0 self.dif_song = s != self.song self.pos = 0
Used internally to get a metasong index.
def get_optional(self, key: str, default: Optional[Any] = None) -> Optional[Any]: return self.state.get(key, default)
Simply get a argument with given key. Deprecated. Use `session.state.get()` instead.
def border(self, L): if self.shape == L_shape: L.append(self.value) else: for x in self.sons: x.border(L)
Append to L the border of the subtree.
def stream_request_body(cls: Type[RequestHandler]) -> Type[RequestHandler]: if not issubclass(cls, RequestHandler): raise TypeError("expected subclass of RequestHandler, got %r", cls) cls._stream_request_body = True return cls
Apply to `RequestHandler` subclasses to enable streaming body support. This decorator implies the following changes: * `.HTTPServerRequest.body` is undefined, and body arguments will not be included in `RequestHandler.get_argument`. * `RequestHandler.prepare` is called when the request headers have been read instead of after the entire body has been read. * The subclass must define a method ``data_received(self, data):``, which will be called zero or more times as data is available. Note that if the request has an empty body, ``data_received`` may not be called. * ``prepare`` and ``data_received`` may return Futures (such as via ``@gen.coroutine``, in which case the next method will not be called until those futures have completed. * The regular HTTP method (``post``, ``put``, etc) will be called after the entire body has been read. See the `file receiver demo <https://github.com/tornadoweb/tornado/tree/master/demos/file_upload/>`_ for example usage.
def is_module_reloadable(self, module, modname): if self.has_cython: return False else: if (self.is_module_in_pathlist(module) or self.is_module_in_namelist(modname)): return False else: return True
Decide if a module is reloadable or not.
def add_node_set_configuration(self, param_name, node_to_value): for nid, val in future.utils.iteritems(node_to_value): self.add_node_configuration(param_name, nid, val)
Set Nodes parameter :param param_name: parameter identifier (as specified by the chosen model) :param node_to_value: dictionary mapping each node a parameter value
def pre_process_data(filepath): positive_path = os.path.join(filepath, 'pos') negative_path = os.path.join(filepath, 'neg') pos_label = 1 neg_label = 0 dataset = [] for filename in glob.glob(os.path.join(positive_path, '*.txt')): with open(filename, 'r') as f: dataset.append((pos_label, f.read())) for filename in glob.glob(os.path.join(negative_path, '*.txt')): with open(filename, 'r') as f: dataset.append((neg_label, f.read())) shuffle(dataset) return dataset
This is dependent on your training data source but we will try to generalize it as best as possible.
def models_get(self, resource_url): obj_dir, obj_json, is_active, cache_id = self.get_object(resource_url) model = ModelHandle(obj_json) if not cache_id in self.cache: self.cache_add(resource_url, cache_id) return model
Get handle for model resource at given Url. Parameters ---------- resource_url : string Url for subject resource at SCO-API Returns ------- models.ModelHandle Handle for local copy of subject resource
def enter_config_value(self, key, default=""): value = input('Please enter a value for ' + key + ': ') if value: return value else: return default
Prompts user for a value
def _qstr(self, question): "we need to cope with a list, or a list of lists" parts = [] for entry in question: if type(entry) is list: parts.append(self._qstr(entry)) else: parts.append('"%s"<%d>' % (self._count_data.get_candidate_title(entry), entry)) return ', '.join(parts)
we need to cope with a list, or a list of lists
def interpoled_resampling(W, x): N = W.shape[0] idx = np.argsort(x) xs = x[idx] ws = W[idx] cs = np.cumsum(avg_n_nplusone(ws)) u = random.rand(N) xrs = np.empty(N) where = np.searchsorted(cs, u) for n in range(N): m = where[n] if m==0: xrs[n] = xs[0] elif m==N: xrs[n] = xs[-1] else: xrs[n] = interpol(cs[m-1], cs[m], xs[m-1], xs[m], u[n]) return xrs
Resampling based on an interpolated CDF, as described in Malik and Pitt. Parameters ---------- W: (N,) array weights x: (N,) array particles Returns ------- xrs: (N,) array the resampled particles
def interaction_method(self, kind, x): if self.info is None or self.code != ERR_INTERACTION_REQUIRED: raise InteractionError( 'not an interaction-required error (code {})'.format( self.code) ) entry = self.info.interaction_methods.get(kind) if entry is None: raise InteractionMethodNotFound( 'interaction method {} not found'.format(kind) ) return x.from_dict(entry)
Checks whether the error is an InteractionRequired error that implements the method with the given name, and JSON-unmarshals the method-specific data into x by calling its from_dict method with the deserialized JSON object. @param kind The interaction method kind (string). @param x A class with a class method from_dict that returns a new instance of the interaction info for the given kind. @return The result of x.from_dict.
def convert_unit(value, unit, to): if unit == to: return value units = ('W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns') factors = (7, 24, 60, 60, 1000, 1000, 1000) monthly_units = ('Y', 'Q', 'M') monthly_factors = (4, 3) try: i, j = units.index(unit), units.index(to) except ValueError: try: i, j = monthly_units.index(unit), monthly_units.index(to) factors = monthly_factors except ValueError: raise ValueError( 'Cannot convert to or from variable length interval' ) factor = functools.reduce(operator.mul, factors[min(i, j) : max(i, j)], 1) assert factor > 1 if i < j: return value * factor assert i > j return value // factor
Convert `value`, is assumed to be in units of `unit`, to units of `to`. Parameters ---------- value : Union[numbers.Real, ibis.expr.types.NumericValue] Returns ------- Union[numbers.Integral, ibis.expr.types.NumericValue] Examples -------- >>> one_second = 1000 >>> x = convert_unit(one_second, 'ms', 's') >>> x 1 >>> one_second = 1 >>> x = convert_unit(one_second, 's', 'ms') >>> x 1000 >>> x = convert_unit(one_second, 's', 's') >>> x 1 >>> x = convert_unit(one_second, 's', 'M') Traceback (most recent call last): ... ValueError: Cannot convert to or from variable length interval
def get_commands_aliases_and_macros_for_completion(self) -> List[str]: visible_commands = set(self.get_visible_commands()) alias_names = set(self.get_alias_names()) macro_names = set(self.get_macro_names()) return list(visible_commands | alias_names | macro_names)
Return a list of visible commands, aliases, and macros for tab completion
def endless_permutations(N, random_state=None): generator = check_random_state(random_state) while True: batch_inds = generator.permutation(N) for b in batch_inds: yield b
Generate an endless sequence of random integers from permutations of the set [0, ..., N). If we call this N times, we will sweep through the entire set without replacement, on the (N+1)th call a new permutation will be created, etc. Parameters ---------- N: int the length of the set random_state: int or RandomState, optional random seed Yields ------ int: a random int from the set [0, ..., N)
def binormal_curve_single_list(obj, param_list, normalize): ret_vector = [] for param in param_list: temp = binormal_curve_single(obj, param, normalize) ret_vector.append(temp) return tuple(ret_vector)
Evaluates the curve binormal vectors at the given list of parameter values. :param obj: input curve :type obj: abstract.Curve :param param_list: parameter list :type param_list: list or tuple :param normalize: if True, the returned vector is converted to a unit vector :type normalize: bool :return: a list containing "point" and "vector" pairs :rtype: tuple
def get_rss_feed_content(url, offset=0, limit=None, exclude_items_in=None): end = limit + offset if limit is not None else None response = _get(url) try: feed_data = feedparser.parse(response.text) if not feed_data.feed: logger.warning('No valid feed data found at {}'.format(url)) return False content = feed_data.entries except Exception as parse_error: logger.warning( 'Failed to parse feed from {}: {}'.format(url, str(parse_error)) ) return False if exclude_items_in: exclude_ids = [item['guid'] for item in exclude_items_in] content = [item for item in content if item['guid'] not in exclude_ids] content = content[offset:end] for item in content: updated_time = time.mktime(item['updated_parsed']) item['updated_datetime'] = datetime.fromtimestamp(updated_time) return content
Get the entries from an RSS feed
def align_add(tree, key, item, align_thres=2.0): for near_key, near_list in get_near_items(tree, key): if abs(key - near_key) < align_thres: near_list.append(item) return tree[key] = [item]
Adding the item object to a binary tree with the given key while allow for small key differences close_enough_func that checks if two keys are within threshold
def get_sql_insert(table: str, fieldlist: Sequence[str], delims: Tuple[str, str] = ("", "")) -> str: return ( "INSERT INTO " + delimit(table, delims) + " (" + ",".join([delimit(x, delims) for x in fieldlist]) + ") VALUES (" + ",".join(["?"] * len(fieldlist)) + ")" )
Returns ?-marked SQL for an INSERT statement.
def register_resource(mod, view, **kwargs): resource_name = view.__name__.lower()[:-8] endpoint = kwargs.get('endpoint', "{}_api".format(resource_name)) plural_resource_name = inflect.engine().plural(resource_name) path = kwargs.get('url', plural_resource_name).strip('/') url = '/{}'.format(path) setattr(view, '_url', url) view_func = view.as_view(endpoint) mod.add_url_rule(url, view_func=view_func, methods=['GET', 'POST', 'OPTIONS']) mod.add_url_rule('{}/<obj_id>'.format(url), view_func=view_func, methods=['GET', 'PATCH', 'PUT', 'DELETE', 'OPTIONS'])
Register the resource on the resource name or a custom url
def setImgShape(self, shape): self.img = type('Dummy', (object,), {}) self.img.shape = shape
image shape must be known for calculating camera matrix if method==Manual and addPoints is used instead of addImg this method must be called before .coeffs are obtained
def getch(self): self.pos += 1 return self.string[self.pos - 1:self.pos]
Get a single character and advance the scan pointer. >>> s = Scanner("abc") >>> s.getch() 'a' >>> s.getch() 'b' >>> s.getch() 'c' >>> s.pos 3
def create_month_selectbox(name, selected_month=0, ln=None): ln = default_ln(ln) out = "<select name=\"%s\">\n" % name for i in range(0, 13): out += "<option value=\"%i\"" % i if (i == selected_month): out += " selected=\"selected\"" out += ">%s</option>\n" % get_i18n_month_name(i, ln) out += "</select>\n" return out
Creates an HTML menu for month selection. Value of selected field is numeric. @param name: name of the control, your form will be sent with name=value... @param selected_month: preselect a month. use 0 for the Label 'Month' @param ln: language of the menu @return: html as string
def build_truncated_gr_mfd(mfd): return Node("truncGutenbergRichterMFD", {"aValue": mfd.a_val, "bValue": mfd.b_val, "minMag": mfd.min_mag, "maxMag": mfd.max_mag})
Parses the truncated Gutenberg Richter MFD as a Node :param mfd: MFD as instance of :class: `openquake.hazardlib.mfd.truncated_gr.TruncatedGRMFD` :returns: Instance of :class:`openquake.baselib.node.Node`
def _reads_in_peaks(bam_file, peaks_file, sample): if not peaks_file: return {} rip = number_of_mapped_reads(sample, bam_file, bed_file = peaks_file) return {"metrics": {"RiP": rip}}
Calculate number of reads in peaks
def create_terms_index(es, index_name: str): with open(mappings_terms_fn, "r") as f: mappings_terms = yaml.load(f, Loader=yaml.SafeLoader) try: es.indices.create(index=index_name, body=mappings_terms) except Exception as e: log.error(f"Could not create elasticsearch terms index: {e}")
Create terms index
def _pycall_path_simple( x1: int, y1: int, x2: int, y2: int, handle: Any ) -> float: return ffi.from_handle(handle)(x1, y1, x2, y2)
Does less and should run faster, just calls the handle function.
def migrate_secret_key(old_key): if 'SECRET_KEY' not in current_app.config or \ current_app.config['SECRET_KEY'] is None: raise click.ClickException( 'SECRET_KEY is not set in the configuration.') for ep in iter_entry_points('invenio_base.secret_key'): try: ep.load()(old_key=old_key) except Exception: current_app.logger.error( 'Failed to initialize entry point: {0}'.format(ep)) raise click.secho('Successfully changed secret key.', fg='green')
Call entry points exposed for the SECRET_KEY change.
def process(self, model=None, context=None): self.filter(model, context) return self.validate(model, context)
Perform validation and filtering at the same time, return a validation result object. :param model: object or dict :param context: object, dict or None :return: shiftschema.result.Result
def newer(self, source, target): if not os.path.exists(source): raise DistlibException("file '%r' does not exist" % os.path.abspath(source)) if not os.path.exists(target): return True return os.stat(source).st_mtime > os.stat(target).st_mtime
Tell if the target is newer than the source. Returns true if 'source' exists and is more recently modified than 'target', or if 'source' exists and 'target' doesn't. Returns false if both exist and 'target' is the same age or younger than 'source'. Raise PackagingFileError if 'source' does not exist. Note that this test is not very accurate: files created in the same second will have the same "age".
def add_menu(self, name, link=None): if self.menu_began: if self.menu_separator_tag: self.write(self.menu_separator_tag) else: self.write('<ul class="horizontal">') self.menu_began = True self.write('<li>') if link: self.write('<a href="{}">', self._rel(link)) self.write(name) if link: self.write('</a>') self.write('</li>')
Adds a menu entry, will create it if it doesn't exist yet
def end_stream(self, stream_id): with (yield from self._get_stream(stream_id).wlock): yield from self._resumed.wait() self._conn.end_stream(stream_id) self._flush()
Close the given stream locally. This may block until the underlying transport becomes writable, or other coroutines release the wlock on this stream. :param stream_id: Which stream to close.
def remove(self, session_id): session = self._items.get(session_id, None) if session is not None: session.promoted = -1 session.on_delete(True) del self._items[session_id] return True return False
Remove session object from the container `session_id` Session identifier
def table(schema, name=None): if not isinstance(schema, Schema): if isinstance(schema, dict): schema = Schema.from_dict(schema) else: schema = Schema.from_tuples(schema) node = ops.UnboundTable(schema, name=name) return node.to_expr()
Create an unbound Ibis table for creating expressions. Cannot be executed without being bound to some physical table. Useful for testing Parameters ---------- schema : ibis Schema name : string, default None Name for table Returns ------- table : TableExpr
def entity_types(self): r = fapi.get_entity_types(self.namespace, self.name, self.api_url) fapi._check_response_code(r, 200) return r.json().keys()
List entity types in workspace.
def _sysv_enable(name): if not _service_is_chkconfig(name) and not _chkconfig_add(name): return False cmd = '/sbin/chkconfig {0} on'.format(name) return not __salt__['cmd.retcode'](cmd, python_shell=False)
Enable the named sysv service to start at boot. The service will be enabled using chkconfig with default run-levels if the service is chkconfig compatible. If chkconfig is not available, then this will fail.
def _init_qualifier_decl(qualifier_decl, qual_repo): assert qualifier_decl.name not in qual_repo if qualifier_decl.tosubclass is None: qualifier_decl.tosubclass = True if qualifier_decl.overridable is None: qualifier_decl.overridable = True if qualifier_decl.translatable is None: qualifier_decl.translatable = False
Initialize the flavors of a qualifier declaration if they are not already set.
def use_gradient(grad_f): grad_f_name = register_to_random_name(grad_f) def function_wrapper(f): def inner(*inputs): state = {"out_value": None} out = f(*inputs) def store_out(out_value): state["out_value"] = out_value store_name = "store_" + f.__name__ store = tf.py_func(store_out, [out], (), stateful=True, name=store_name) def mock_f(*inputs): return state["out_value"] with tf.control_dependencies([store]): with gradient_override_map({"PyFunc": grad_f_name}): mock_name = "mock_" + f.__name__ mock_out = tf.py_func(mock_f, inputs, out.dtype, stateful=True, name=mock_name) mock_out.set_shape(out.get_shape()) return mock_out return inner return function_wrapper
Decorator for easily setting custom gradients for TensorFlow functions. * DO NOT use this function if you need to serialize your graph. * This function will cause the decorated function to run slower. Example: def _foo_grad(op, grad): ... @use_gradient(_foo_grad) def foo(x1, x2, x3): ... Args: grad_f: function to use as gradient. Returns: A decorator to apply to the function you wish to override the gradient of.
def cancel(self): conn = self._assert_open() conn._try_activate_cursor(self) self._session.cancel_if_pending()
Cancel current statement
def get_explanation_dict(self, entry): centry = self.process_entry(entry) if centry is None: uncorrected_energy = entry.uncorrected_energy corrected_energy = None else: uncorrected_energy = centry.uncorrected_energy corrected_energy = centry.energy d = {"compatibility": self.__class__.__name__, "uncorrected_energy": uncorrected_energy, "corrected_energy": corrected_energy} corrections = [] corr_dict = self.get_corrections_dict(entry) for c in self.corrections: cd = {"name": str(c), "description": c.__doc__.split("Args")[0].strip(), "value": corr_dict.get(str(c), 0)} corrections.append(cd) d["corrections"] = corrections return d
Provides an explanation dict of the corrections that are being applied for a given compatibility scheme. Inspired by the "explain" methods in many database methodologies. Args: entry: A ComputedEntry. Returns: (dict) of the form {"Compatibility": "string", "Uncorrected_energy": float, "Corrected_energy": float, "Corrections": [{"Name of Correction": { "Value": float, "Explanation": "string"}]}
def cp(source, bucket, checksum, key_prefix): from .models import Bucket from .helpers import populate_from_path for object_version in populate_from_path( Bucket.get(bucket), source, checksum=checksum, key_prefix=key_prefix): click.secho(str(object_version)) db.session.commit()
Create new bucket from all files in directory.
def keypoint_vflip(kp, rows, cols): x, y, angle, scale = kp c = math.cos(angle) s = math.sin(angle) angle = math.atan2(-s, c) return [x, (rows - 1) - y, angle, scale]
Flip a keypoint vertically around the x-axis.
async def file(location, mime_type=None, headers=None, _range=None): filename = path.split(location)[-1] async with open_async(location, mode='rb') as _file: if _range: await _file.seek(_range.start) out_stream = await _file.read(_range.size) headers['Content-Range'] = 'bytes %s-%s/%s' % ( _range.start, _range.end, _range.total) else: out_stream = await _file.read() mime_type = mime_type or guess_type(filename)[0] or 'text/plain' return HTTPResponse(status=200, headers=headers, content_type=mime_type, body_bytes=out_stream)
Return a response object with file data. :param location: Location of file on system. :param mime_type: Specific mime_type. :param headers: Custom Headers. :param _range:
def make(directory): if os.path.exists(directory): if os.path.isdir(directory): click.echo('Directory already exists') else: click.echo('Path exists and is not a directory') sys.exit() os.makedirs(directory) os.mkdir(os.path.join(directory, 'jsons')) copy_default_config(os.path.join(directory, 'config.yaml'))
Makes a RAS Machine directory
def deriv2(self, p): p = self._clean(p) fl = np.log(1 - p) d2 = -1 / ((1 - p)**2 * fl) d2 *= 1 + 1 / fl return d2
Second derivative of the C-Log-Log ink function Parameters ---------- p : array-like Mean parameters Returns ------- g''(p) : array The second derivative of the CLogLog link function
def update_model_cache(table_name): model_cache_info = ModelCacheInfo(table_name, uuid.uuid4().hex) model_cache_backend.share_model_cache_info(model_cache_info)
Updates model cache by generating a new key for the model
def from_event(cls, event): source = Chatter(event.source) return cls(source, event.target, event.arguments[0], event.tags)
Create a message from an event :param event: the event that was received of type ``pubmsg`` or ``privmsg`` :type event: :class:`Event3` :returns: a message that resembles the event :rtype: :class:`Message3` :raises: None
def handle_annotation_pattern(self, line: str, position: int, tokens: ParseResults) -> ParseResults: annotation = tokens['name'] self.raise_for_redefined_annotation(line, position, annotation) self.annotation_to_pattern[annotation] = re.compile(tokens['value']) return tokens
Handle statements like ``DEFINE ANNOTATION X AS PATTERN "Y"``. :raises: RedefinedAnnotationError
def read_hdf5_dict(h5f, names=None, path=None, on_missing='error', **kwargs): if path: h5f = h5f[path] if names is None: names = kwargs.pop('flags', None) if names is None: try: names = find_flag_groups(h5f, strict=True) except KeyError: names = None if not names: raise ValueError("Failed to automatically parse available flag " "names from HDF5, please give a list of names " "to read via the ``names=`` keyword") out = DataQualityDict() for name in names: try: out[name] = read_hdf5_flag(h5f, name, **kwargs) except KeyError as exc: if on_missing == 'ignore': pass elif on_missing == 'warn': warnings.warn(str(exc)) else: raise ValueError('no H5Group found for flag ' '{0!r}'.format(name)) return out
Read a `DataQualityDict` from an HDF5 file
def leb128_encode(value): if value == 0: return b'\x00' result = [] while value != 0: byte = value & 0x7f value >>= 7 if value != 0: byte |= 0x80 result.append(byte) return bytes(result)
Encodes an integer using LEB128. :param int value: The value to encode. :return: The LEB128-encoded integer. :rtype: bytes
def find_python(): python = ( _state.get("pythonExecutable") or next(( exe for exe in os.getenv("PYBLISH_QML_PYTHON_EXECUTABLE", "").split(os.pathsep) if os.path.isfile(exe)), None ) or which("python") or which("python3") ) if not python or not os.path.isfile(python): raise ValueError("Could not locate Python executable.") return python
Search for Python automatically
def free(self, ptr): raise NotImplementedError("%s not implemented for %s" % (self.free.__func__.__name__, self.__class__.__name__))
A somewhat faithful implementation of libc `free`. :param ptr: the location in memory to be freed
def todate(self): arr = get_gregorian_date_from_julian_day(self.tojulianday()) return datetime.date(int(arr[0]), int(arr[1]), int(arr[2]))
Calculates the corresponding day in the gregorian calendar. this is the main use case of this library. :return: Corresponding date in gregorian calendar. :rtype: :py:class:`datetime.date`
def _get_all_forums(self): if not hasattr(self, '_all_forums'): self._all_forums = list(Forum.objects.all()) return self._all_forums
Returns all forums.
def filter_service_by_regex_name(regex): host_re = re.compile(regex) def inner_filter(items): service = items["service"] if service is None: return False return host_re.match(service.service_description) is not None return inner_filter
Filter for service Filter on regex :param regex: regex to filter :type regex: str :return: Filter :rtype: bool
def pick_free_port(hostname=REDIRECT_HOST, port=0): import socket s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.bind((hostname, port)) except OSError as e: log.warning("Could not bind to %s:%s %s", hostname, port, e) if port == 0: print('Unable to find an open port for authentication.') raise AuthenticationException(e) else: return pick_free_port(hostname, 0) addr, port = s.getsockname() s.close() return port
Try to bind a port. Default=0 selects a free port.
def get_feature_variable_integer(self, feature_key, variable_key, user_id, attributes=None): variable_type = entities.Variable.Type.INTEGER return self._get_feature_variable_for_type(feature_key, variable_key, variable_type, user_id, attributes)
Returns value for a certain integer variable attached to a feature flag. Args: feature_key: Key of the feature whose variable's value is being accessed. variable_key: Key of the variable whose value is to be accessed. user_id: ID for user. attributes: Dict representing user attributes. Returns: Integer value of the variable. None if: - Feature key is invalid. - Variable key is invalid. - Mismatch with type of variable.
def get_default_envlist(version): if version in ['pypy', 'pypy3']: return version match = re.match(r'^(\d)\.(\d)(?:\.\d+)?$', version or '') if match: major, minor = match.groups() return 'py{major}{minor}'.format(major=major, minor=minor) return guess_python_env()
Parse a default tox env based on the version. The version comes from the ``TRAVIS_PYTHON_VERSION`` environment variable. If that isn't set or is invalid, then use sys.version_info to come up with a reasonable default.
def _parse_invite(client, command, actor, args): target, _, channel = args.rpartition(" ") client.dispatch_event("INVITE", actor, target, channel.lower())
Parse an INVITE and dispatch an event.
def get_empty_config(): empty_color_config = get_empty_color_config() result = Config( examples_dir=None, custom_dir=None, color_config=empty_color_config, use_color=None, pager_cmd=None, editor_cmd=None, squeeze=None, subs=None ) return result
Return an empty Config object with no options set.
def length(self): return math.sqrt((self.X * self.X) + (self.Y * self.Y))
Gets the length of this Vector
def put(self, key, value): super(DirectoryTreeDatastore, self).put(key, value) str_key = str(key) if str_key == '/': return dir_key = key.parent.instance('directory') directory = self.directory(dir_key) if str_key not in directory: directory.append(str_key) super(DirectoryTreeDatastore, self).put(dir_key, directory)
Stores the object `value` named by `key`self. DirectoryTreeDatastore stores a directory entry.
def write(self, data): if self.is_closing(): return self._write_buffer += data if len(self._write_buffer) >= self._output_buffer_limit_high: self._protocol.pause_writing() if self._write_buffer: self._can_write.set()
Send `data` over the IBB. If `data` is larger than the block size is is chunked and sent in chunks. Chunks from one call of :meth:`write` will always be sent in series.
def _basic_return(self, frame_in): reply_text = try_utf8_decode(frame_in.reply_text) message = ( "Message not delivered: %s (%s) to queue '%s' from exchange '%s'" % ( reply_text, frame_in.reply_code, frame_in.routing_key, frame_in.exchange ) ) exception = AMQPMessageError(message, reply_code=frame_in.reply_code) self.exceptions.append(exception)
Handle a Basic Return Frame and treat it as an error. :param specification.Basic.Return frame_in: Amqp frame. :return:
def raw(self): if self._raw: return text_type(self._raw).strip("\r\n") else: return text_type(base64decode(self._b64encoded)).strip("\r\n")
Return raw key. returns: str: raw key
def getMessage(self): msg = str(self.msg) if self.args: msg = msg % self.args if platform.system().lower() == 'windows' or self.levelno < 10: return msg elif self.levelno >= 50: return utils.return_colorized(msg, 'critical') elif self.levelno >= 40: return utils.return_colorized(msg, 'error') elif self.levelno >= 30: return utils.return_colorized(msg, 'warn') elif self.levelno >= 20: return utils.return_colorized(msg, 'info') else: return utils.return_colorized(msg, 'debug')
Returns a colorized log message based on the log level. If the platform is windows the original message will be returned without colorization windows escape codes are crazy. :returns: ``str``
def find_breaking_changes( old_schema: GraphQLSchema, new_schema: GraphQLSchema ) -> List[BreakingChange]: return ( find_removed_types(old_schema, new_schema) + find_types_that_changed_kind(old_schema, new_schema) + find_fields_that_changed_type_on_object_or_interface_types( old_schema, new_schema ) + find_fields_that_changed_type_on_input_object_types( old_schema, new_schema ).breaking_changes + find_types_removed_from_unions(old_schema, new_schema) + find_values_removed_from_enums(old_schema, new_schema) + find_arg_changes(old_schema, new_schema).breaking_changes + find_interfaces_removed_from_object_types(old_schema, new_schema) + find_removed_directives(old_schema, new_schema) + find_removed_directive_args(old_schema, new_schema) + find_added_non_null_directive_args(old_schema, new_schema) + find_removed_directive_locations(old_schema, new_schema) )
Find breaking changes. Given two schemas, returns a list containing descriptions of all the types of breaking changes covered by the other functions down below.
def cleanup(self): "Remove the directory containin the clone and virtual environment." log.info('Removing temp dir %s', self._tempdir.name) self._tempdir.cleanup()
Remove the directory containin the clone and virtual environment.
def mat_to_numpy_arr(self): import numpy as np self.dat['mat'] = np.asarray(self.dat['mat'])
convert list to numpy array - numpy arrays can not be saved as json
def register_for_duty(self, context): cctxt = self.client.prepare() return cctxt.call(context, 'register_for_duty', host=self.host)
Report that a config agent is ready for duty.
def generate_name(self, name=None): if name == None: name = self.RobotNamer.generate() self.name = name.replace('-','_')
generate a Robot Name for the instance to use, if the user doesn't supply one.
def argmax(attrs, inputs, proto_obj): axis = attrs.get('axis', 0) keepdims = attrs.get('keepdims', 1) argmax_op = symbol.argmax(inputs[0], axis=axis, keepdims=keepdims) cast_attrs = {'dtype': 'int64'} return 'cast', cast_attrs, argmax_op
Returns indices of the maximum values along an axis
def start(self, driver=None): if driver is not None: assert driver in [ 'alsa', 'oss', 'jack', 'portaudio', 'sndmgr', 'coreaudio', 'Direct Sound', 'dsound', 'pulseaudio' ] fluid_settings_setstr(self.settings, 'audio.driver', driver) self.audio_driver = new_fluid_audio_driver(self.settings, self.synth)
Start audio output driver in separate background thread. Call this function any time after creating the Synth object. If you don't call this function, use get_samples() to generate samples. Optional keyword argument: driver: which audio driver to use for output Possible choices: 'alsa', 'oss', 'jack', 'portaudio' 'sndmgr', 'coreaudio', 'Direct Sound', 'dsound', 'pulseaudio' Not all drivers will be available for every platform, it depends on which drivers were compiled into FluidSynth for your platform.
def email_generator(names=None, domains=None, unique=False): if names is None: names = [name.encode('ascii', 'ignore').lower().replace(b' ', b'') for name in ENGLISH_MONARCHS] if domains is None: domains = DOMAINS if unique: uniquifyer = lambda: str(next(_unique_counter)) else: uniquifyer = lambda: '' while True: yield '{0}{1}@{2}'.format( random.choice(names), uniquifyer(), random.choice(domains))
Creates a generator for generating email addresses. :arg names: list of names to use; defaults to ENGLISH_MONARCHS lowercased, ascii-fied, and stripped of whitespace :arg domains: list of domains to use; defaults to DOMAINS :arg unique: True if you want the username part of the email addresses to be unique :returns: generator Example:: from eadred.helpers import email_generator gen = email_generator() for i in range(50): mymodel = SomeModel(email=gen.next()) mymodel.save() Example 2: >>> gen = email_generator() >>> gen.next() 'eadwig@example.net' >>> gen.next() 'henrybeauclerc@mail1.example.org' >>> gen.next() 'williamrufus@example.com'
def get_all(rc_file='~/.odoorpcrc'): conf = ConfigParser() conf.read([os.path.expanduser(rc_file)]) sessions = {} for name in conf.sections(): sessions[name] = { 'type': conf.get(name, 'type'), 'host': conf.get(name, 'host'), 'protocol': conf.get(name, 'protocol'), 'port': conf.getint(name, 'port'), 'timeout': conf.getfloat(name, 'timeout'), 'user': conf.get(name, 'user'), 'passwd': conf.get(name, 'passwd'), 'database': conf.get(name, 'database'), } return sessions
Return all session configurations from the `rc_file` file. >>> import odoorpc >>> from pprint import pprint as pp >>> pp(odoorpc.session.get_all()) # doctest: +SKIP {'foo': {'database': 'db_name', 'host': 'localhost', 'passwd': 'password', 'port': 8069, 'protocol': 'jsonrpc', 'timeout': 120, 'type': 'ODOO', 'user': 'admin'}, ...} .. doctest:: :hide: >>> import odoorpc >>> session = '%s_session' % DB >>> odoo.save(session) >>> data = odoorpc.session.get_all() >>> data[session]['host'] == HOST True >>> data[session]['protocol'] == PROTOCOL True >>> data[session]['port'] == int(PORT) True >>> data[session]['database'] == DB True >>> data[session]['user'] == USER True >>> data[session]['passwd'] == PWD True >>> data[session]['type'] == 'ODOO' True
def list_all_python_programs(self): self.tot_lines = 0 self.tot_bytes = 0 self.tot_files = 0 self.tot_loc = 0 self.lstPrograms = [] fl = mod_fl.FileList([self.fldr], ['*.py'], ["__pycache__", "/venv/", "/venv2/", ".git"]) for fip in fl.get_list(): if '__init__.py' not in fip: self.add(fip, 'TODO - add comment') f = mod_file.TextFile(fip) self.tot_lines += f.count_lines_in_file() self.tot_loc += f.count_lines_of_code() self.tot_bytes += f.size self.tot_files += 1 print('All Python Program Statistics') print('Files = ', self.tot_files, ' Bytes = ', self.tot_bytes, ' Lines = ', self.tot_lines, ' Lines of Code = ', self.tot_loc)
collects a filelist of all .py programs
def Search(self,key): results = [] for disk in self.disks: if disk.id.lower().find(key.lower()) != -1: results.append(disk) elif key.lower() in disk.partition_paths: results.append(disk) return(results)
Search disk list by partial mount point or ID
def generate(env): env['MIDL'] = 'MIDL.EXE' env['MIDLFLAGS'] = SCons.Util.CLVar('/nologo') env['MIDLCOM'] = '$MIDL $MIDLFLAGS /tlb ${TARGETS[0]} /h ${TARGETS[1]} /iid ${TARGETS[2]} /proxy ${TARGETS[3]} /dlldata ${TARGETS[4]} $SOURCE 2> NUL' env['BUILDERS']['TypeLibrary'] = midl_builder
Add Builders and construction variables for midl to an Environment.
def Binary(x): if isinstance(x, text_type) and not (JYTHON or IRONPYTHON): return x.encode() return bytes(x)
Return x as a binary type.
def changelog_cli(ctx): if ctx.invoked_subcommand: return from peltak.core import shell from . import logic shell.cprint(logic.changelog())
Generate changelog from commit messages.
def port(self, port): if not isinstance(port, int): raise WebDriverException("Port needs to be an integer") try: port = int(port) if port < 1 or port > 65535: raise WebDriverException("Port number must be in the range 1..65535") except (ValueError, TypeError): raise WebDriverException("Port needs to be an integer") self._port = port self.set_preference("webdriver_firefox_port", self._port)
Sets the port that WebDriver will be running on
def CreateUnit(self, parent=None, value=None, bid_amount=None): unit = { 'xsi_type': 'ProductPartition', 'partitionType': 'UNIT' } if parent is not None: unit['parentCriterionId'] = parent['id'] unit['caseValue'] = value if bid_amount is not None and bid_amount > 0: bidding_strategy_configuration = { 'bids': [{ 'xsi_type': 'CpcBid', 'bid': { 'xsi_type': 'Money', 'microAmount': str(bid_amount) } }] } adgroup_criterion = { 'xsi_type': 'BiddableAdGroupCriterion', 'biddingStrategyConfiguration': bidding_strategy_configuration } else: adgroup_criterion = { 'xsi_type': 'NegativeAdGroupCriterion' } adgroup_criterion['adGroupId'] = self.adgroup_id adgroup_criterion['criterion'] = unit self.CreateAddOperation(adgroup_criterion) return unit
Creates a unit node. Args: parent: The node that should be this node's parent. value: The value being partitioned on. bid_amount: The amount to bid for matching products, in micros. Returns: A new unit node.
def get_table_schema(self, dataset, table, project_id=None): project_id = self._get_project_id(project_id) try: result = self.bigquery.tables().get( projectId=project_id, tableId=table, datasetId=dataset).execute(num_retries=self.num_retries) except HttpError as e: if int(e.resp['status']) == 404: logger.warn('Table %s.%s does not exist', dataset, table) return None raise return result['schema']['fields']
Return the table schema. Parameters ---------- dataset : str The dataset containing the `table`. table : str The table to get the schema for project_id: str, optional The project of the dataset. Returns ------- list A ``list`` of ``dict`` objects that represent the table schema. If the table doesn't exist, None is returned.
def is_valid_number_for_region(numobj, region_code): country_code = numobj.country_code if region_code is None: return False metadata = PhoneMetadata.metadata_for_region_or_calling_code(country_code, region_code.upper()) if (metadata is None or (region_code != REGION_CODE_FOR_NON_GEO_ENTITY and country_code != country_code_for_valid_region(region_code))): return False nsn = national_significant_number(numobj) return (_number_type_helper(nsn, metadata) != PhoneNumberType.UNKNOWN)
Tests whether a phone number is valid for a certain region. Note this doesn't verify the number is actually in use, which is impossible to tell by just looking at a number itself. If the country calling code is not the same as the country calling code for the region, this immediately exits with false. After this, the specific number pattern rules for the region are examined. This is useful for determining for example whether a particular number is valid for Canada, rather than just a valid NANPA number. Warning: In most cases, you want to use is_valid_number instead. For example, this method will mark numbers from British Crown dependencies such as the Isle of Man as invalid for the region "GB" (United Kingdom), since it has its own region code, "IM", which may be undesirable. Arguments: numobj -- The phone number object that we want to validate. region_code -- The region that we want to validate the phone number for. Returns a boolean that indicates whether the number is of a valid pattern.
def set_cell(self, i, j, value): bool_tests = [ value in self._possibles[i][j], value in self._poss_rows[i], value in self._poss_cols[j], value in self._poss_box[(i // self.order) * self.order + (j // self.order)], value not in self.row(i), value not in self.col(j), value not in self.box(i, j) ] if all(bool_tests): self[i][j] = value else: raise SudokuHasNoSolutionError("This value cannot be set here!")
Set a cell's value, with a series of safety checks :param i: The row number :type i: int :param j: The column number :type j: int :param value: The value to set :type value: int :raises: :py:class:`dlxsudoku.exceptions.SudokuHasNoSolutionError`