code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def time_question_features(self, text): features = {} all_words = " ".join(self.positive + self.negative).split() all_first_words = [] for sentence in self.positive + self.negative: all_first_words.append( sentence.split(' ', 1)[0] ) for word in text.split(): features['first_word({})'.format(word)] = (word in all_first_words) for word in text.split(): features['contains({})'.format(word)] = (word in all_words) for letter in 'abcdefghijklmnopqrstuvwxyz': features['count({})'.format(letter)] = text.lower().count(letter) features['has({})'.format(letter)] = (letter in text.lower()) return features
Provide an analysis of significant features in the string.
def get_class_method(cls_or_inst, method_name): cls = cls_or_inst if isinstance(cls_or_inst, type) else cls_or_inst.__class__ meth = getattr(cls, method_name, None) if isinstance(meth, property): meth = meth.fget elif isinstance(meth, cached_property): meth = meth.func return meth
Returns a method from a given class or instance. When the method doest not exist, it returns `None`. Also works with properties and cached properties.
def random_expr_with_required_var(depth, required_var, optional_list, ops): if not depth: if required_var: return required_var return str(optional_list[random.randrange(len(optional_list))]) max_depth_side = random.randrange(2) other_side_depth = random.randrange(depth) required_var_side = random.randrange(2) left = random_expr_with_required_var( depth - 1 if max_depth_side else other_side_depth, required_var if required_var_side else None, optional_list, ops) right = random_expr_with_required_var( depth - 1 if not max_depth_side else other_side_depth, required_var if not required_var_side else None, optional_list, ops) op = ops[random.randrange(len(ops))] return ExprNode(left, right, op)
Generate a random expression tree with a required variable. The required variable appears exactly once in the expression. Args: depth: At least one leaf will be this many levels down from the top. required_var: A char. This char is guaranteed to be placed exactly once at a leaf somewhere in the tree. This is the var to solve for. optional_list: A list of chars. These chars are randomly selected as leaf values. These are constant vars. ops: A list of ExprOp instances. Returns: An ExprNode instance which is the root of the generated expression tree.
def make_request(endpoint, **kwargs): data = kwargs.get('json', []) package = kwargs.get('package', None) method = kwargs.get('method', 'GET') function = getattr(requests, method.lower()) try: if package: response = function(endpoint, data=data, files={'file': package}) else: response = function(endpoint, json=data) except requests.exceptions.ConnectionError: LOG.error("Couldn't connect to NApps server %s.", endpoint) sys.exit(1) return response
Send a request to server.
def IsPathSuffix(mod_path, path): return (mod_path.endswith(path) and (len(mod_path) == len(path) or mod_path[:-len(path)].endswith(os.sep)))
Checks whether path is a full path suffix of mod_path. Args: mod_path: Must be an absolute path to a source file. Must not have file extension. path: A relative path. Must not have file extension. Returns: True if path is a full path suffix of mod_path. False otherwise.
def read_scenarios(filename): filename = os.path.abspath(filename) blocks = {} parser = ConfigParser() try: parser.read(filename) except MissingSectionHeaderError: base_name = os.path.basename(filename) name = os.path.splitext(base_name)[0] section = '[%s]\n' % name content = section + open(filename).read() parser.readfp(StringIO(content)) for section in parser.sections(): items = parser.items(section) items.append(('scenario_name', section)) items.append(('full_path', filename)) blocks[section] = {} for key, value in items: blocks[section][key] = value return blocks
Read keywords dictionary from file. :param filename: Name of file holding scenarios . :return Dictionary of with structure like this {{ 'foo' : { 'a': 'b', 'c': 'd'}, { 'bar' : { 'd': 'e', 'f': 'g'}} A scenarios file may look like this: [jakarta_flood] hazard: /path/to/hazard.tif exposure: /path/to/exposure.tif function: function_id aggregation: /path/to/aggregation_layer.tif extent: minx, miny, maxx, maxy Notes: path for hazard, exposure, and aggregation are relative to scenario file path
def len_gt(name, value): ret = {'name': name, 'result': False, 'comment': '', 'changes': {}} if name not in __reg__: ret['result'] = False ret['comment'] = 'Value {0} not in register'.format(name) return ret if len(__reg__[name]['val']) > value: ret['result'] = True return ret
Only succeed if length of the given register location is greater than the given value. USAGE: .. code-block:: yaml foo: check.len_gt: - value: 42 run_remote_ex: local.cmd: - tgt: '*' - func: test.ping - require: - check: foo
def write_interactions(G, path, delimiter=' ', encoding='utf-8'): for line in generate_interactions(G, delimiter): line += '\n' path.write(line.encode(encoding))
Write a DyNetx graph in interaction list format. Parameters ---------- G : graph A DyNetx graph. path : basestring The desired output filename delimiter : character Column delimiter
def export_organizations(self, outfile): exporter = SortingHatOrganizationsExporter(self.db) dump = exporter.export() try: outfile.write(dump) outfile.write('\n') except IOError as e: raise RuntimeError(str(e)) return CMD_SUCCESS
Export organizations information to a file. The method exports information related to organizations, to the given 'outfile' output file. :param outfile: destination file object
def _reset_session(self): headers = {"User-Agent": self._user_agent} self._session = requests.Session() self._session.headers.update(headers) self._is_logged_in = False
Set session information
def pprint(self, imports=None, prefix="\n ",unknown_value='<?>', qualify=False, separator=""): r = Parameterized.pprint(self,imports,prefix, unknown_value=unknown_value, qualify=qualify,separator=separator) classname=self.__class__.__name__ return r.replace(".%s("%classname,".%s.instance("%classname)
Same as Parameterized.pprint, except that X.classname(Y is replaced with X.classname.instance(Y
def run(self): while not self._finished.isSet(): self._func(self._reference) self._finished.wait(self._func._interval / 1000.0)
Keep running this thread until it's stopped
def export(self, exporter=None, force_stroke=False): exporter = SVGExporter() if exporter is None else exporter if self._data is None: raise Exception("This SWF was not loaded! (no data)") if len(self.tags) == 0: raise Exception("This SWF doesn't contain any tags!") return exporter.export(self, force_stroke)
Export this SWF using the specified exporter. When no exporter is passed in the default exporter used is swf.export.SVGExporter. Exporters should extend the swf.export.BaseExporter class. @param exporter : the exporter to use @param force_stroke : set to true to force strokes on fills, useful for some edge cases.
def inrypl(vertex, direct, plane): assert (isinstance(plane, stypes.Plane)) vertex = stypes.toDoubleVector(vertex) direct = stypes.toDoubleVector(direct) nxpts = ctypes.c_int() xpt = stypes.emptyDoubleVector(3) libspice.inrypl_c(vertex, direct, ctypes.byref(plane), ctypes.byref(nxpts), xpt) return nxpts.value, stypes.cVectorToPython(xpt)
Find the intersection of a ray and a plane. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/inrypl_c.html :param vertex: Vertex vector of ray. :type vertex: 3-Element Array of floats :param direct: Direction vector of ray. :type direct: 3-Element Array of floats :param plane: A SPICE plane. :type plane: spiceypy.utils.support_types.Plane :return: Number of intersection points of ray and plane, Intersection point, if nxpts == 1. :rtype: tuple
def outlays(self): return pd.DataFrame({x.name: x.outlays for x in self.securities})
Returns a DataFrame of outlays for each child SecurityBase
def node_cmd(cmd_name, node_dict): sc = {"run": cmd_startstop, "stop": cmd_startstop, "connect": cmd_connect, "details": cmd_details} node_num = node_selection(cmd_name, len(node_dict)) refresh_main = None if node_num != 0: (node_valid, node_info) = node_validate(node_dict, node_num, cmd_name) if node_valid: sub_cmd = sc[cmd_name] refresh_main = sub_cmd(node_dict[node_num], cmd_name, node_info) else: ui_print_suffix(node_info, C_ERR) sleep(1.5) else: ui_print(" - Exit Command") sleep(0.5) return refresh_main
Process commands that target specific nodes.
def filter_from_alias(self, alias, backend=None): def alias_filter(key_item): key, item = key_item return ((alias is None or alias in key) and (backend is None or item.backend == backend)) items = six.moves.filter(alias_filter, six.iteritems(self)) aliases = collections.OrderedDict(sorted(items, key=lambda a: a[0].lower())) return aliases
Return aliases that start with the given `alias`, optionally filtered by backend.
def notify_change(self, screen_id, x_origin, y_origin, width, height): if not isinstance(screen_id, baseinteger): raise TypeError("screen_id can only be an instance of type baseinteger") if not isinstance(x_origin, baseinteger): raise TypeError("x_origin can only be an instance of type baseinteger") if not isinstance(y_origin, baseinteger): raise TypeError("y_origin can only be an instance of type baseinteger") if not isinstance(width, baseinteger): raise TypeError("width can only be an instance of type baseinteger") if not isinstance(height, baseinteger): raise TypeError("height can only be an instance of type baseinteger") self._call("notifyChange", in_p=[screen_id, x_origin, y_origin, width, height])
Requests a size change. in screen_id of type int Logical guest screen number. in x_origin of type int Location of the screen in the guest. in y_origin of type int Location of the screen in the guest. in width of type int Width of the guest display, in pixels. in height of type int Height of the guest display, in pixels.
def _set(self, jsonData) : self["username"] = jsonData["user"] self["active"] = jsonData["active"] self["extra"] = jsonData["extra"] try: self["changePassword"] = jsonData["changePassword"] except Exception as e: pass try : self["password"] = jsonData["passwd"] except KeyError : self["password"] = "" self.URL = "%s/user/%s" % (self.connection.URL, self["username"])
Initialize all fields at once. If no password is specified, it will be set as an empty string
def _check_cmd(call): if call['retcode'] != 0: comment = '' std_err = call.get('stderr') std_out = call.get('stdout') if std_err: comment += std_err if std_out: comment += std_out raise CommandExecutionError('Error running command: {0}'.format(comment)) return call
Check the output of the cmd.run_all function call.
def apply_network(network, x, chunksize=None): network_is_cuda = next(network.parameters()).is_cuda x = torch.from_numpy(x) with torch.no_grad(): if network_is_cuda: x = x.cuda() if chunksize is None: return from_var(network(x)) return np.concatenate( [from_var(network(x[i: i + chunksize])) for i in range(0, len(x), chunksize)])
Apply a pytorch network, potentially in chunks
def closest(self, obj, group, defaults=True): components = (obj.__class__.__name__, group_sanitizer(obj.group), label_sanitizer(obj.label)) target = '.'.join([c for c in components if c]) return self.find(components).options(group, target=target, defaults=defaults)
This method is designed to be called from the root of the tree. Given any LabelledData object, this method will return the most appropriate Options object, including inheritance. In addition, closest supports custom options by checking the object
def with_defaults(self, obj): self.check_valid_keys(obj) obj = dict(obj) for (key, value) in self.defaults.items(): if key not in obj: obj[key] = value return obj
Given a dict of hyperparameter settings, return a dict containing those settings augmented by the defaults for any keys missing from the dict.
def transform_data(self, data, request=None, response=None, context=None): transform = self.transform if hasattr(transform, 'context'): self.transform.context = context if transform and not (isinstance(transform, type) and isinstance(data, transform)): if self._params_for_transform: return transform(data, **self._arguments(self._params_for_transform, request, response)) else: return transform(data) return data
Runs the transforms specified on this endpoint with the provided data, returning the data modified
def classify_harmonic(self, partial_labels, use_CMN=True): labels = np.array(partial_labels, copy=True) unlabeled = labels == -1 fl, classes = _onehot(labels[~unlabeled]) L = self.laplacian(normed=False) if ss.issparse(L): L = L.tocsr()[unlabeled].toarray() else: L = L[unlabeled] Lul = L[:,~unlabeled] Luu = L[:,unlabeled] fu = -np.linalg.solve(Luu, Lul.dot(fl)) if use_CMN: scale = (1 + fl.sum(axis=0)) / fu.sum(axis=0) fu *= scale labels[unlabeled] = classes[fu.argmax(axis=1)] return labels
Harmonic function method for semi-supervised classification, also known as the Gaussian Mean Fields algorithm. partial_labels: (n,) array of integer labels, -1 for unlabeled. use_CMN : when True, apply Class Mass Normalization From "Semi-Supervised Learning Using Gaussian Fields and Harmonic Functions" by Zhu, Ghahramani, and Lafferty in 2003. Based on the matlab code at: http://pages.cs.wisc.edu/~jerryzhu/pub/harmonic_function.m
def qstring(option): if (re.match(NODE_ATTR_RE, option) is None and re.match(CHEF_CONST_RE, option) is None): return "'%s'" % option else: return option
Custom quoting method for jinja.
def single_case(self, i, case): if self.single_stack: self.single_stack.pop() self.single_stack.append(case) try: t = next(i) if self.use_format and t in _CURLY_BRACKETS: self.handle_format(t, i) elif t == '\\': try: t = next(i) self.reference(t, i) except StopIteration: self.result.append(t) raise elif self.single_stack: self.result.append(self.convert_case(t, self.get_single_stack())) except StopIteration: pass
Uppercase or lowercase the next character.
def early_arbiter_linking(self, arbiter_name, params): if not self.arbiters: params.update({ 'name': arbiter_name, 'arbiter_name': arbiter_name, 'host_name': socket.gethostname(), 'address': '127.0.0.1', 'port': 7770, 'spare': '0' }) logger.warning("There is no arbiter, I add myself (%s) reachable on %s:%d", arbiter_name, params['address'], params['port']) arb = ArbiterLink(params, parsing=True) self.arbiters = ArbiterLinks([arb]) self.arbiters.fill_default() self.modules.fill_default() self.arbiters.linkify(modules=self.modules) self.modules.linkify()
Prepare the arbiter for early operations :param arbiter_name: default arbiter name if no arbiter exist in the configuration :type arbiter_name: str :return: None
def get_lock(self, path): if self.lockfile: return self.lockfile.LockFile(path) else: with self.job_locks_lock: if path not in self.job_locks: lock = threading.Lock() self.job_locks[path] = lock else: lock = self.job_locks[path] return lock
Get a job lock corresponding to the path - assumes parent directory exists but the file itself does not.
def compose(*funcs): if not funcs: return lambda *args: args[0] if args else None if len(funcs) == 1: return funcs[0] last = funcs[-1] rest = funcs[0:-1] return lambda *args: reduce(lambda ax, func: func(ax), reversed(rest), last(*args))
chained function composition wrapper creates function f, where f(x) = arg0(arg1(arg2(...argN(x)))) if *funcs is empty, an identity function is returned. Args: *funcs: list of functions to chain Returns: a new function composed of chained calls to *args
def _save_multi(data, file_name, sep=";"): logger.debug("saving multi") with open(file_name, "w", newline='') as f: logger.debug(f"{file_name} opened") writer = csv.writer(f, delimiter=sep) try: writer.writerows(itertools.zip_longest(*data)) except Exception as e: logger.info(f"Exception encountered in batch._save_multi: {e}") raise ExportFailed logger.debug("wrote rows using itertools in _save_multi")
convenience function for storing data column-wise in a csv-file.
def process(self): for locale in self._fields.keys(): self._client._put( "{0}/files/{1}/process".format( self.__class__.base_url( self.space.id, self.id, environment_id=self._environment_id ), locale ), {}, headers=self._update_headers() ) return self.reload()
Calls the process endpoint for all locales of the asset. API reference: https://www.contentful.com/developers/docs/references/content-management-api/#/reference/assets/asset-processing
def create(*events): or_event = threading.Event() def changed(): if any([event.is_set() for event in events]): or_event.set() else: or_event.clear() for e in events: orify(e, changed) changed() return or_event
Creates a new multi_event The multi_event listens to all events passed in the "events" parameter. :param events: a list of threading.Events :return: The multi_event :rtype: threading.Event
def get_error_redirect(self, provider, reason): info = self.model._meta.app_label, self.model._meta.model_name return reverse('admin:%s_%s_changelist' % info)
Return url to redirect on login failure.
def showstructure(self, dataman=True, column=True, subtable=False, sort=False): return self._showstructure(dataman, column, subtable, sort)
Show table structure in a formatted string. The structure of this table and optionally its subtables is shown. It shows the data manager info and column descriptions. Optionally the columns are sorted in alphabetical order. `dataman` Show data manager info? If False, only column info is shown. If True, data manager info and columns per data manager are shown. `column` Show column description per data manager? Only takes effect if dataman=True. `subtable` Show the structure of all subtables (recursively). The names of subtables are always shown. 'sort' Sort the columns in alphabetical order?
def get_state_for_transition(self, transition): if not isinstance(transition, Transition): raise TypeError("transition must be of type Transition") if transition.to_state == self.state_id or transition.to_state is None: return self else: return self.states[transition.to_state]
Calculate the target state of a transition :param transition: The transition of which the target state is determined :return: the to-state of the transition :raises exceptions.TypeError: if the transition parameter is of wrong type
def delete(self, using=None): if self.is_alone: self.topic.delete() else: super(AbstractPost, self).delete(using) self.topic.update_trackers()
Deletes the post instance.
def extract(text, default=UNKNOWN): if not text: return default found = CALLING_CONVENTION_TYPES.pattern.match(text) if found: return found.group('cc') return default
extracts calling convention from the text. If the calling convention could not be found, the "default"is used
def _day_rule_matches(self, rule, dt): if dt.weekday() == 4: sat = dt + datetime.timedelta(days=1) if super(USFederalHolidays, self)._day_rule_matches(rule, sat): return True elif dt.weekday() == 0: sun = dt - datetime.timedelta(days=1) if super(USFederalHolidays, self)._day_rule_matches(rule, sun): return True return super(USFederalHolidays, self)._day_rule_matches(rule, dt)
Day-of-month-specific US federal holidays that fall on Sat or Sun are observed on Fri or Mon respectively. Note that this method considers both the actual holiday and the day of observance to be holidays.
def init_reddit(generator): auth_dict = generator.settings.get('REDDIT_POSTER_AUTH') if auth_dict is None: log.info("Could not find REDDIT_POSTER_AUTH key in settings, reddit plugin won't function") generator.get_reddit = lambda: None return reddit = praw.Reddit(**auth_dict) generator.get_reddit = lambda: reddit
this is a hack to make sure the reddit object keeps track of a session trough article scanning, speeding up networking as the connection can be kept alive.
def group_callback(self, iocb): if _debug: IOGroup._debug("group_callback %r", iocb) for iocb in self.ioMembers: if not iocb.ioComplete.isSet(): if _debug: IOGroup._debug(" - waiting for child: %r", iocb) break else: if _debug: IOGroup._debug(" - all children complete") self.ioState = COMPLETED self.trigger()
Callback when a child iocb completes.
def get(self, sid): return CallContext(self._version, account_sid=self._solution['account_sid'], sid=sid, )
Constructs a CallContext :param sid: The unique string that identifies this resource :returns: twilio.rest.api.v2010.account.call.CallContext :rtype: twilio.rest.api.v2010.account.call.CallContext
def call(self, **data): uri, body, headers = self.prepare(data) return self.dispatch(uri, body, headers)
Issue the call. :param data: Data to pass in the *body* of the request.
def set_clear_color(self, color='black', alpha=None): self.glir.command('FUNC', 'glClearColor', *Color(color, alpha).rgba)
Set the screen clear color This is a wrapper for gl.glClearColor. Parameters ---------- color : str | tuple | instance of Color Color to use. See vispy.color.Color for options. alpha : float | None Alpha to use.
def reference(self, taskfileinfo): assert self.status() is None,\ "Can only reference, if the entity is not already referenced/imported. Use replace instead." refobj = self.create_refobject() with self.set_parent_on_new(refobj): self.get_refobjinter().reference(taskfileinfo, refobj) self.set_refobj(refobj) self.fetch_new_children() self.update_restrictions() self.emit_data_changed()
Reference the entity into the scene. Only possible if the current status is None. This will create a new refobject, then call :meth:`RefobjInterface.reference` and afterwards set the refobj on the :class:`Reftrack` instance. :param taskfileinfo: the taskfileinfo to reference :type taskfileinfo: :class:`jukeboxcore.filesys.TaskFileInfo` :returns: None :rtype: None :raises: :class:`ReftrackIntegrityError`
def socket_read(fp): response = '' oldlen = 0 newlen = 0 while True: response += fp.read(buffSize) newlen = len(response) if newlen - oldlen == 0: break else: oldlen = newlen return response
Buffered read from socket. Reads all data available from socket. @fp: File pointer for socket. @return: String of characters read from buffer.
def get_attrs(cls): ignore = dir(type('dummy', (object,), {})) + ['__metaclass__'] attrs = [ item for item in inspect.getmembers(cls) if item[0] not in ignore and not isinstance( item[1], ( types.FunctionType, types.MethodType, classmethod, staticmethod, property))] attrs.sort(key=lambda attr: (getattr(attr[1], 'idx', -1), attr[0])) return attrs
Get all class attributes ordered by definition
def unpack_glist(glist_ptr, cffi_type, transfer_full=True): current = glist_ptr while current: yield ffi.cast(cffi_type, current.data) if transfer_full: free(current.data) current = current.next if transfer_full: lib.g_list_free(glist_ptr)
Takes a glist ptr, copies the values casted to type_ in to a list and frees all items and the list. If an item is returned all yielded before are invalid.
def rotate(self, log): self.write(log, rotate=True) self.write({})
Move the current log to a new file with timestamp and create a new empty log file.
def dockDetailPane(self, detailPane, title=None, area=None): title = detailPane.classLabel() if title is None else title area = Qt.LeftDockWidgetArea if area is None else area dockWidget = self.dockWidget(detailPane, title, area) dockWidget.visibilityChanged.connect(detailPane.dockVisibilityChanged) if len(self._detailDockWidgets) > 0: self.tabifyDockWidget(self._detailDockWidgets[-1], dockWidget) self._detailDockWidgets.append(dockWidget) return dockWidget
Creates a dockWidget and add the detailPane with a default title. By default the detail widget is added to the Qt.LeftDockWidgetArea.
def _date_based_where(self, type, query, where): value = str(where['value']).zfill(2) value = self.parameter(value) return 'strftime(\'%s\', %s) %s %s'\ % (type, self.wrap(where['column']), where['operator'], value)
Compiled a date where based clause :param type: The date type :type type: str :param query: A QueryBuilder instance :type query: QueryBuilder :param where: The condition :type where: dict :return: The compiled clause :rtype: str
def NHot(n, *xs, simplify=True): if not isinstance(n, int): raise TypeError("expected n to be an int") if not 0 <= n <= len(xs): fstr = "expected 0 <= n <= {}, got {}" raise ValueError(fstr.format(len(xs), n)) xs = [Expression.box(x).node for x in xs] num = len(xs) terms = list() for hot_idxs in itertools.combinations(range(num), n): hot_idxs = set(hot_idxs) _xs = [xs[i] if i in hot_idxs else exprnode.not_(xs[i]) for i in range(num)] terms.append(exprnode.and_(*_xs)) y = exprnode.or_(*terms) if simplify: y = y.simplify() return _expr(y)
Return an expression that means "exactly N input functions are true". If *simplify* is ``True``, return a simplified expression.
def _sign_threshold_signature_fulfillment(cls, input_, message, key_pairs): input_ = deepcopy(input_) message = sha3_256(message.encode()) if input_.fulfills: message.update('{}{}'.format( input_.fulfills.txid, input_.fulfills.output).encode()) for owner_before in set(input_.owners_before): ccffill = input_.fulfillment subffills = ccffill.get_subcondition_from_vk( base58.b58decode(owner_before)) if not subffills: raise KeypairMismatchException('Public key {} cannot be found ' 'in the fulfillment' .format(owner_before)) try: private_key = key_pairs[owner_before] except KeyError: raise KeypairMismatchException('Public key {} is not a pair ' 'to any of the private keys' .format(owner_before)) for subffill in subffills: subffill.sign( message.digest(), base58.b58decode(private_key.encode())) return input_
Signs a ThresholdSha256. Args: input_ (:class:`~bigchaindb.common.transaction. Input`) The Input to be signed. message (str): The message to be signed key_pairs (dict): The keys to sign the Transaction with.
async def scan(self, timeout=1): adapters = await self.loop.run_in_executor(None, ifaddr.get_adapters) ips = [ip.ip for adapter in ifaddr.get_adapters() for ip in adapter.ips if ip.is_IPv4] if not ips: return [] tasks = [] discoveries = [] for ip in ips: manager = ScanManager(ip) lifx_discovery = LifxDiscovery(self.loop, manager) discoveries.append(lifx_discovery) lifx_discovery.start(listen_ip=ip) tasks.append(self.loop.create_task(manager.lifx_ip())) (done, pending) = await aio.wait(tasks, timeout=timeout) for discovery in discoveries: discovery.cleanup() for task in pending: task.cancel() return [task.result() for task in done]
Return a list of local IP addresses on interfaces with LIFX bulbs.
def compile(self, root, relpaths): with named_temporary_file() as fp: fp.write(to_bytes(_COMPILER_MAIN % {'root': root, 'relpaths': relpaths}, encoding='utf-8')) fp.flush() try: out, _ = Executor.execute([self._interpreter.binary, '-sE', fp.name]) except Executor.NonZeroExit as e: raise self.CompilationFailure( 'encountered %r during bytecode compilation.\nstderr was:\n%s\n' % (e, e.stderr) ) return out.splitlines()
Compiles the given python source files using this compiler's interpreter. :param string root: The root path all the source files are found under. :param list relpaths: The realtive paths from the `root` of the source files to compile. :returns: A list of relative paths of the compiled bytecode files. :raises: A :class:`Compiler.Error` if there was a problem bytecode compiling any of the files.
def material_formula(self): try: form = self.header.formula except IndexError: form = 'No formula provided' return "".join(map(str, form))
Returns chemical formula of material from feff.inp file
def new_name(self, template=u"xxx_todo_changeme"): name = template while name in self.used_names: name = template + unicode(self.numbers.next()) self.used_names.add(name) return name
Return a string suitable for use as an identifier The new name is guaranteed not to conflict with other identifiers.
def as_required_fields(self, fields=[]): fields = self.filter_fields(fields) for f in fields: f = self.fields[f.name] f.required = True
set required to True
def updaten(self, rangesets): for rng in rangesets: if isinstance(rng, set): self.update(rng) else: self.update(RangeSet(rng))
Update a rangeset with the union of itself and several others.
def resolve_path_from_base(path_to_resolve, base_path): return os.path.abspath( os.path.join( base_path, os.path.expanduser(path_to_resolve)))
If path-to_resolve is a relative path, create an absolute path with base_path as the base. If path_to_resolve is an absolute path or a user path (~), just resolve it to an absolute path and return.
def upload_timeline(self, timeline_name, plaso_storage_path): resource_url = '{0:s}/upload/'.format(self.api_base_url) files = {'file': open(plaso_storage_path, 'rb')} data = {'name': timeline_name} response = self.session.post(resource_url, files=files, data=data) try: response_dict = response.json() except ValueError: raise RuntimeError( 'Could not decode JSON response from Timesketch' ' (Status {0:d}):\n{1:s}'.format( response.status_code, response.content)) index_id = response_dict['objects'][0]['id'] return index_id
Create a timeline with the specified name from the given plaso file. Args: timeline_name (str): Name of timeline plaso_storage_path (str): Local path of plaso file to be uploaded Returns: int: ID of uploaded timeline Raises: RuntimeError: When the JSON response from Timesketch cannot be decoded.
def hasBidAsk(self) -> bool: return ( self.bid != -1 and not isNan(self.bid) and self.bidSize > 0 and self.ask != -1 and not isNan(self.ask) and self.askSize > 0)
See if this ticker has a valid bid and ask.
def schedule_exception_in(secs, exception, target): schedule_exception_at(time.time() + secs, exception, target)
schedule a greenlet receive an exception after a number of seconds :param secs: the number of seconds to wait before raising :type secs: int or float :param exception: the exception to raise in the greenlet :type exception: Exception :param target: the greenlet that should receive the exception :type target: greenlet
def walk_level(path, level=1): if level is None: level = float('inf') path = expand_path(path) if os.path.isdir(path): root_level = path.count(os.path.sep) for root, dirs, files in os.walk(path): yield root, dirs, files if root.count(os.path.sep) >= root_level + level: del dirs[:] elif os.path.isfile(path): yield os.path.dirname(path), [], [os.path.basename(path)] else: raise RuntimeError("Can't find a valid folder or file for path {0}".format(repr(path)))
Like os.walk, but takes `level` kwarg that indicates how deep the recursion will go. Notes: TODO: refactor `level`->`depth` References: http://stackoverflow.com/a/234329/623735 Args: path (str): Root path to begin file tree traversal (walk) level (int, optional): Depth of file tree to halt recursion at. None = full recursion to as deep as it goes 0 = nonrecursive, just provide a list of files at the root level of the tree 1 = one level of depth deeper in the tree Examples: >>> root = os.path.dirname(__file__) >>> all((os.path.join(base,d).count('/') == (root.count('/')+1)) ... for (base, dirs, files) in walk_level(root, level=0) for d in dirs) True
def generate_example(config, ext='json'): template_name = 'example.{0}'.format(ext.lower()) template = ENV.get_template(template_name) return template.render(config=config)
Generate an example file based on the given Configuration object. Args: config (confpy.core.configuration.Configuration): The configuration object on which to base the example. ext (str): The file extension to render. Choices: JSON and INI. Returns: str: The text of the example file.
def GetAllUsers(self, pagination_size=10): next_page_token, accounts = self.rpc_helper.DownloadAccount( None, pagination_size) while accounts: for account in accounts: yield GitkitUser.FromApiResponse(account) next_page_token, accounts = self.rpc_helper.DownloadAccount( next_page_token, pagination_size)
Gets all user info from Gitkit server. Args: pagination_size: int, how many users should be returned per request. The account info are retrieved in pagination. Yields: A generator to iterate all users.
def title(self, value: typing.Union[None, str]): if not self._project: raise RuntimeError('Failed to assign title to an unloaded project') self._project.title = value
Modifies the title of the project, which is initially loaded from the `cauldron.json` file.
def web_hooks(self, include_global=True): from fabric_bolt.web_hooks.models import Hook ors = [Q(project=self)] if include_global: ors.append(Q(project=None)) hooks = Hook.objects.filter(reduce(operator.or_, ors)) return hooks
Get all web hooks for this project. Includes global hooks.
def status(self): try: res = self.run(['info']) if res[0]['clientName'] == '*unknown*': return ConnectionStatus.INVALID_CLIENT self.run(['user', '-o']) except errors.CommandError as err: if 'password (P4PASSWD) invalid or unset' in str(err.args[0]): return ConnectionStatus.NO_AUTH if 'Connect to server failed' in str(err.args[0]): return ConnectionStatus.OFFLINE return ConnectionStatus.OK
The status of the connection to perforce
def _get_nadir_pixel(earth_mask, sector): if sector == FULL_DISC: logger.debug('Computing nadir pixel') rmin, rmax, cmin, cmax = bbox(earth_mask) nadir_row = rmin + (rmax - rmin) // 2 nadir_col = cmin + (cmax - cmin) // 2 return nadir_row, nadir_col return None, None
Find the nadir pixel Args: earth_mask: Mask identifying earth and space pixels sector: Specifies the scanned sector Returns: nadir row, nadir column
def rollsingle(self, func, window=20, name=None, fallback=False, align='right', **kwargs): rname = 'roll_{0}'.format(func) if fallback: rfunc = getattr(lib.fallback, rname) else: rfunc = getattr(lib, rname, None) if not rfunc: rfunc = getattr(lib.fallback, rname) data = np.array([list(rfunc(serie, window)) for serie in self.series()]) name = name or self.makename(func, window=window) dates = asarray(self.dates()) desc = settings.desc if (align == 'right' and not desc) or desc: dates = dates[window-1:] else: dates = dates[:-window+1] return self.clone(dates, data.transpose(), name=name)
Efficient rolling window calculation for min, max type functions
def _translate_timeperiod(self, timeperiod): if self.time_grouping == 1: return timeperiod year, month, day, hour = time_helper.tokenize_timeperiod(timeperiod) if self.time_qualifier == QUALIFIER_HOURLY: stem = self._do_stem_grouping(timeperiod, int(hour)) result = '{0}{1}{2}{3:02d}'.format(year, month, day, stem) elif self.time_qualifier == QUALIFIER_DAILY: stem = self._do_stem_grouping(timeperiod, int(day)) result = '{0}{1}{2:02d}{3}'.format(year, month, stem, hour) else: stem = self._do_stem_grouping(timeperiod, int(month)) result = '{0}{1:02d}{2}{3}'.format(year, stem, day, hour) return result
method translates given timeperiod to the grouped timeperiod
def monthdayscalendar(cls, year, month): weeks = [] week = [] for day in NepCal.itermonthdays(year, month): week.append(day) if len(week) == 7: weeks.append(week) week = [] if len(week) > 0: weeks.append(week) return weeks
Return a list of the weeks in the month month of the year as full weeks. Weeks are lists of seven day numbers.
def _none_rejecter(validation_callable ): def reject_none(x): if x is not None: return validation_callable(x) else: raise ValueIsNone(wrong_value=x) reject_none.__name__ = 'reject_none({})'.format(get_callable_name(validation_callable)) return reject_none
Wraps the given validation callable to reject None values. When a None value is received by the wrapper, it is not passed to the validation_callable and instead this function will raise a WrappingFailure. When any other value is received the validation_callable is called as usual. :param validation_callable: :return:
def _ParseDateTimeValue(self, byte_stream, file_offset): datetime_value_map = self._GetDataTypeMap('cups_ipp_datetime_value') try: value = self._ReadStructureFromByteStream( byte_stream, file_offset, datetime_value_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError( 'Unable to parse datetime value with error: {0!s}'.format(exception)) direction_from_utc = chr(value.direction_from_utc) rfc2579_date_time_tuple = ( value.year, value.month, value.day_of_month, value.hours, value.minutes, value.seconds, value.deciseconds, direction_from_utc, value.hours_from_utc, value.minutes_from_utc) return dfdatetime_rfc2579_date_time.RFC2579DateTime( rfc2579_date_time_tuple=rfc2579_date_time_tuple)
Parses a CUPS IPP RFC2579 date-time value from a byte stream. Args: byte_stream (bytes): byte stream. file_offset (int): offset of the attribute data relative to the start of the file-like object. Returns: dfdatetime.RFC2579DateTime: RFC2579 date-time stored in the value. Raises: ParseError: when the RFC2579 date-time value cannot be parsed.
def create_roteiro(self): return Roteiro( self.networkapi_url, self.user, self.password, self.user_ldap)
Get an instance of roteiro services facade.
def _fix_valid_indices(cls, valid_indices, insertion_index, dim): indices = np.array(sorted(valid_indices[dim])) slice_index = np.sum(indices <= insertion_index) indices[slice_index:] += 1 indices = np.insert(indices, slice_index, insertion_index + 1) valid_indices[dim] = indices.tolist() return valid_indices
Add indices for H&S inserted elements.
def save(self): while True: username = sha1(str(random.random()).encode('utf-8')).hexdigest()[:5] try: get_user_model().objects.get(username__iexact=username) except get_user_model().DoesNotExist: break self.cleaned_data['username'] = username return super(SignupFormOnlyEmail, self).save()
Generate a random username before falling back to parent signup form
def replace_all_tokens(self, token): b = self.data_buffer for y, row in b.items(): for x, char in row.items(): b[y][x] = _CHAR_CACHE[char.char, token]
For all the characters in the screen. Set the token to the given `token`.
def bulk_load_docs(es, docs): chunk_size = 200 try: results = elasticsearch.helpers.bulk(es, docs, chunk_size=chunk_size) log.debug(f"Elasticsearch documents loaded: {results[0]}") if len(results[1]) > 0: log.error("Bulk load errors {}".format(results)) except elasticsearch.ElasticsearchException as e: log.error("Indexing error: {}\n".format(e))
Bulk load docs Args: es: elasticsearch handle docs: Iterator of doc objects - includes index_name
def data_it(db_data, user_type): data_type = { 'array': (list), 'dict': (dict), 'entity': (dict), 'list': (list), 'str': (string_types), 'string': (string_types), } if user_type is None: if db_data is None: return True elif user_type.lower() in ['null', 'none']: if db_data is None: return True elif user_type.lower() in 'binary': try: base64.b64decode(db_data) return True except Exception: return False elif data_type.get(user_type.lower()) is not None: if isinstance(db_data, data_type.get(user_type.lower())): return True return False
Validate data is type. Args: db_data (dict|str|list): The data store in Redis. user_data (str): The user provided data. Returns: bool: True if the data passed validation.
def _setup_regex(self): self.RE_COMMENTS = cache.RE_COMMENTS self.RE_MODULE = cache.RE_MODULE self.RE_TYPE = cache.RE_TYPE self.RE_EXEC = cache.RE_EXEC self.RE_MEMBERS = cache.RE_MEMBERS self.RE_DEPEND = cache.RE_DEPEND
Sets up the constant regex strings etc. that can be used to parse the strings for determining context.
def IntersectPath(self, path, intersection): node = self._root for name in path.split('.'): if name not in node: return elif not node[name]: intersection.AddPath(path) return node = node[name] intersection.AddLeafNodes(path, node)
Calculates the intersection part of a field path with this tree. Args: path: The field path to calculates. intersection: The out tree to record the intersection part.
def get_availability(channels, start, end, connection=None, host=None, port=None): from ..segments import (Segment, SegmentList, SegmentListDict) connection.set_epoch(start, end) names = list(map( _get_nds2_name, find_channels(channels, epoch=(start, end), connection=connection, unique=True), )) result = connection.get_availability(names) out = SegmentListDict() for name, result in zip(channels, result): out[name] = SegmentList([Segment(s.gps_start, s.gps_stop) for s in result.simple_list()]) return out
Query an NDS2 server for data availability Parameters ---------- channels : `list` of `str` list of channel names to query; this list is mapped to NDS channel names using :func:`find_channels`. start : `int` GPS start time of query end : `int` GPS end time of query connection : `nds2.connection`, optional open NDS2 connection to use for query host : `str`, optional name of NDS2 server to query, required if ``connection`` is not given port : `int`, optional port number on host to use for NDS2 connection Returns ------- segdict : `~gwpy.segments.SegmentListDict` dict of ``(name, SegmentList)`` pairs Raises ------ ValueError if the given channel name cannot be mapped uniquely to a name in the NDS server database. See also -------- nds2.connection.get_availability for documentation on the underlying query method
def _post_action(self, action): reward = self.reward(action) self.done = (self.timestep >= self.horizon) and not self.ignore_done return reward, self.done, {}
Do any housekeeping after taking an action.
def virtualbox_host(): if query_yes_no(question='Uninstall virtualbox-dkms?', default='yes'): run('sudo apt-get remove virtualbox-dkms') install_packages([ 'virtualbox', 'virtualbox-qt', 'virtualbox-dkms', 'virtualbox-guest-dkms', 'virtualbox-guest-additions-iso', ]) users = [env.user] for username in users: run(flo('sudo adduser {username} vboxusers'))
Install a VirtualBox host system. More Infos: * overview: https://wiki.ubuntuusers.de/VirtualBox/ * installation: https://wiki.ubuntuusers.de/VirtualBox/Installation/
def newsnr_threshold(self, threshold): if not self.opt.chisq_bins: raise RuntimeError('Chi-square test must be enabled in order to ' 'use newsnr threshold') remove = [i for i, e in enumerate(self.events) if ranking.newsnr(abs(e['snr']), e['chisq'] / e['chisq_dof']) < threshold] self.events = numpy.delete(self.events, remove)
Remove events with newsnr smaller than given threshold
def remove_prefix(text, prefix): null, prefix, rest = text.rpartition(prefix) return rest
Remove the prefix from the text if it exists. >>> remove_prefix('underwhelming performance', 'underwhelming ') 'performance' >>> remove_prefix('something special', 'sample') 'something special'
def get_volumes_for_instance(self, arg, device=None): instance = self.get(arg) filters = {'attachment.instance-id': instance.id} if device is not None: filters['attachment.device'] = device return self.get_all_volumes(filters=filters)
Return all EC2 Volume objects attached to ``arg`` instance name or ID. May specify ``device`` to limit to the (single) volume attached as that device.
def _get_indent(self, node): lineno = node.lineno if lineno > len(self._lines): return -1 wsindent = self._wsregexp.match(self._lines[lineno - 1]) return len(wsindent.group(1))
Get node indentation level.
def value_net(rng_key, batch_observations_shape, num_actions, bottom_layers=None): del num_actions if bottom_layers is None: bottom_layers = [] bottom_layers.extend([ layers.Dense(1), ]) net = layers.Serial(*bottom_layers) return net.initialize(batch_observations_shape, rng_key), net
A value net function.
def get_service_account_token(request, service_account='default'): token_json = get( request, 'instance/service-accounts/{0}/token'.format(service_account)) token_expiry = _helpers.utcnow() + datetime.timedelta( seconds=token_json['expires_in']) return token_json['access_token'], token_expiry
Get the OAuth 2.0 access token for a service account. Args: request (google.auth.transport.Request): A callable used to make HTTP requests. service_account (str): The string 'default' or a service account email address. The determines which service account for which to acquire an access token. Returns: Union[str, datetime]: The access token and its expiration. Raises: google.auth.exceptions.TransportError: if an error occurred while retrieving metadata.
def text(self): parts = [("%s" if isinstance(p, Insert) else p) for p in self.parts] parts = [("%%" if p == "%" else p) for p in parts] return "".join(parts)
The text displayed on the block. String containing ``"%s"`` in place of inserts. eg. ``'say %s for %s secs'``
def user_disable_throw_rest_endpoint(self, username, url='rest/scriptrunner/latest/custom/disableUser', param='userName'): url = "{}?{}={}".format(url, param, username) return self.get(path=url)
The disable method throw own rest enpoint
def validate(user_input, ret_errs=False, print_errs=False): errs = run_validator(user_input) passed = len(errs) == 0 if print_errs: for err in errs: print(err) if ret_errs: return passed, errs return passed
Wrapper for run_validator function that returns True if the user_input contains a valid STIX pattern or False otherwise. The error messages may also be returned or printed based upon the ret_errs and print_errs arg values.
def from_json_and_lambdas(cls, file: str, lambdas): with open(file, "r") as f: data = json.load(f) return cls.from_dict(data, lambdas)
Builds a GrFN from a JSON object. Args: cls: The class variable for object creation. file: Filename of a GrFN JSON file. Returns: type: A GroundedFunctionNetwork object.
def _fetchAzureAccountKey(accountName): try: return os.environ['AZURE_ACCOUNT_KEY_' + accountName] except KeyError: try: return os.environ['AZURE_ACCOUNT_KEY'] except KeyError: configParser = RawConfigParser() configParser.read(os.path.expanduser(credential_file_path)) try: return configParser.get('AzureStorageCredentials', accountName) except NoOptionError: raise RuntimeError("No account key found for '%s', please provide it in '%s'" % (accountName, credential_file_path))
Find the account key for a given Azure storage account. The account key is taken from the AZURE_ACCOUNT_KEY_<account> environment variable if it exists, then from plain AZURE_ACCOUNT_KEY, and then from looking in the file ~/.toilAzureCredentials. That file has format: [AzureStorageCredentials] accountName1=ACCOUNTKEY1== accountName2=ACCOUNTKEY2==
def Failed(self): interval = self._current_interval_sec self._current_interval_sec = min( self.max_interval_sec, self._current_interval_sec * self.multiplier) return interval
Indicates that a request has failed. Returns: Time interval to wait before retrying (in seconds).
def populate_times(self): stop_time = self.meta_data.get("stop_time") if stop_time: stop_naive = datetime.utcfromtimestamp(stop_time) self.stop_time = stop_naive.replace(tzinfo=tzutc()) creation_time = self.meta_data.get("creation_time") if creation_time: creation_naive = datetime.utcfromtimestamp(creation_time) self.creation_time = creation_naive.replace(tzinfo=tzutc()) start_time = self.meta_data.get("start_time") if start_time: start_naive = datetime.utcfromtimestamp(start_time) self.start_time = start_naive.replace(tzinfo=tzutc())
Populates all different meta data times that comes with measurement if they are present.
def get_thread(self, thread_id, update_if_cached=True, raise_404=False): cached_thread = self._thread_cache.get(thread_id) if cached_thread: if update_if_cached: cached_thread.update() return cached_thread res = self._requests_session.get( self._url.thread_api_url( thread_id = thread_id ) ) if raise_404: res.raise_for_status() elif not res.ok: return None thread = Thread._from_request(self, res, thread_id) self._thread_cache[thread_id] = thread return thread
Get a thread from 4chan via 4chan API. Args: thread_id (int): Thread ID update_if_cached (bool): Whether the thread should be updated if it's already in our cache raise_404 (bool): Raise an Exception if thread has 404'd Returns: :class:`basc_py4chan.Thread`: Thread object