code
stringlengths
51
2.34k
docstring
stringlengths
11
171
def from_model(cls, model_instance, default_value=False, **kwargs): if not isinstance(model_instance, DataCollection): raise TypeError("model_instance must be a subclass of \ prestans.types.DataCollection, %s given" % (model_instance.__class__.__name__)) elif isinstance(model_instance, Array) and model_instance.is_scalar: return AttributeFilter(is_array_scalar=True) attribute_filter_instance = model_instance.get_attribute_filter(default_value) for name, value in iter(kwargs.items()): if name in attribute_filter_instance: setattr(attribute_filter_instance, name, value) else: raise KeyError(name) return attribute_filter_instance
wrapper for Model's get_attribute_filter
def write_human(self, buffer_): if self.IAC in buffer_: buffer_ = buffer_.replace(self.IAC, self.IAC + self.IAC) self.msg("send %r", buffer_) for char in buffer_: delta = random.gauss(80, 20) self.sock.sendall(char) time.sleep(delta / 1000.0)
Emulates human typing speed
def remove_default_content(portal): logger.info("*** Delete Default Content ***") object_ids = portal.objectIds() delete_ids = filter(lambda id: id in object_ids, CONTENTS_TO_DELETE) portal.manage_delObjects(ids=delete_ids)
Remove default Plone contents
def _initSymbols(ptc): ptc.am = ['', ''] ptc.pm = ['', ''] for idx, xm in enumerate(ptc.locale.meridian[:2]): target = ['am', 'pm'][idx] setattr(ptc, target, [xm]) target = getattr(ptc, target) if xm: lxm = xm.lower() target.extend((xm[0], '{0}.{1}.'.format(*xm), lxm, lxm[0], '{0}.{1}.'.format(*lxm)))
Initialize symbols and single character constants.
def _repr_html_(self, **kwargs): if self._parent is None: self.add_to(Figure()) out = self._parent._repr_html_(**kwargs) self._parent = None else: out = self._parent._repr_html_(**kwargs) return out
Displays the HTML Map in a Jupyter notebook.
def _get_decoder(self, units, vocab_size, embed, prefix): with self.name_scope(): decoder = nn.HybridSequential(prefix=prefix) decoder.add(nn.Dense(units, flatten=False)) decoder.add(GELU()) decoder.add(BERTLayerNorm(in_channels=units)) decoder.add(nn.Dense(vocab_size, flatten=False, params=embed.collect_params())) assert decoder[3].weight == list(embed.collect_params().values())[0], \ 'The weights of word embedding are not tied with those of decoder' return decoder
Construct a decoder for the masked language model task
def default(self, obj): if isinstance(obj, np.ndarray): data_b64 = base64.b64encode(obj.data).decode('utf-8') return dict(__ndarray__=data_b64, dtype=str(obj.dtype), shape=obj.shape) elif sps.issparse(obj): data_b64 = base64.b64encode(obj.data).decode('utf-8') return dict(__ndarray__=data_b64, dtype=str(obj.dtype), shape=obj.shape, indices=obj.indices, indptr=obj.indptr) elif hasattr(obj, '__dict__'): return obj.__dict__ return json.JSONEncoder.default(self, obj)
if input object is a ndarray it will be converted into a dict holding dtype, shape and the data base64 encoded
def setCredentials(self, username, password): self.addHeader('Credentials', dict(userid=username.decode('utf-8'), password=password.decode('utf-8')), True)
Sets authentication credentials for accessing the remote gateway.
def required(self, method, _dict, require): for key in require: if key not in _dict: raise LunrError("'%s' is required argument for method '%s'" % (key, method))
Ensure the required items are in the dictionary
def Get(self, interface_name, property_name): self.log('Get %s.%s' % (interface_name, property_name)) if not interface_name: interface_name = self.interface try: return self.GetAll(interface_name)[property_name] except KeyError: raise dbus.exceptions.DBusException( 'no such property ' + property_name, name=self.interface + '.UnknownProperty')
Standard D-Bus API for getting a property value
def consult_pre_response_hook(self, item_session: ItemSession) -> Actions: try: return self.hook_dispatcher.call( PluginFunctions.handle_pre_response, item_session ) except HookDisconnected: return Actions.NORMAL
Return scripting action when a response begins.
def _set_values(self): for k, v in self._data.items(): if isinstance(v, dict): try: rel_model = self._model for attr in k.split('__'): rel_model = getattr(rel_model, attr).field.related_model except AttributeError: pass else: k = k.replace('__', '_') if 'id' in v and v['id'] is None: v = None else: v = rel_model(**v) setattr(self, k, v)
Populate instance with given.
def _setup_ssh(self): global paramiko if paramiko is none: import paramiko self.ssh = paramiko.SSHClient() self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) self.ssh.connect(self.server, username=self.user, pkey=self.pkey)
Initializes the connection to the server via SSH.
def update(self, data): if not isinstance(data, list): data = [data] master = Handler.ALL_VERS_DATA for record in data: for k,v in iteritems(record): try: record[k] = int(v) except ValueError: record[k] = v try: label = record["label"] except KeyError: raise ValueError("Must provide a valid label argument. Given:%s%s"%(\ os.linesep, ("%s "%(os.linesep)).join( ["%15s:%s"%(k,v) for k,v in iteritems(kwargs)] ))) try: masterLabel = master[label] except KeyError: master[label] = record self._updated = True continue for k,v in iteritems(record): try: if masterLabel[k] == v: continue except KeyError: pass self._updated = True try: master[label].update(record) except KeyError: break
update known data with with newly provided data
def submit_form_action(step, url): form = world.browser.find_element_by_xpath(str('//form[@action="%s"]' % url)) form.submit()
Submit the form having given action URL.
def _run_custom_data_engine(self, start_id_obj, end_id_obj, start_timeperiod, end_timeperiod): collection_name = context.process_context[self.process_name].source iteration = 0 while True: cursor = self.ds.cursor_fine(collection_name, start_id_obj, end_id_obj, iteration, start_timeperiod, end_timeperiod) if iteration == 0 and cursor.count(with_limit_and_skip=True) == 0: msg = 'No entries in {0} at range [{1} : {2}]'.format(collection_name, start_id_obj, end_id_obj) self.logger.warning(msg) break start_id_obj = None for document in cursor: start_id_obj = document['_id'] self._process_single_document(document) self.performance_tracker.increment_success() if start_id_obj is None: break iteration += 1 self._cursor_exploited() msg = 'Cursor exploited after {0} iterations'.format(iteration) self.logger.info(msg)
fine-tuned data engine. MongoDB legacy
def copy_with_new_atts(self, **attributes): return FmtStr(*[Chunk(bfs.s, bfs.atts.extend(attributes)) for bfs in self.chunks])
Returns a new FmtStr with the same content but new formatting
def str2int(self, str_value): str_value = tf.compat.as_text(str_value) if self._str2int: return self._str2int[str_value] failed_parse = False try: int_value = int(str_value) except ValueError: failed_parse = True if failed_parse or not 0 <= int_value < self._num_classes: raise ValueError("Invalid string class label %s" % str_value) return int_value
Conversion class name string => integer.
def list_healthchecks(self, service_id, version_number): content = self._fetch("/service/%s/version/%d/healthcheck" % (service_id, version_number)) return map(lambda x: FastlyHealthCheck(self, x), content)
List all of the healthchecks for a particular service and version.
def transform(self, offset, newseqid=None): for feature in self: feature._range.transform(offset) if newseqid is not None: feature.seqid = newseqid
Transform the feature's coordinates by the given offset.
def H_acceptor_count(mol): mol.require("Valence") return sum(1 for _, a in mol.atoms_iter() if a.H_acceptor)
Hydrogen bond acceptor count
def sqlite_by_object(self, destination, progress): db = SQLiteDatabase(destination) db.create() for script in self.sqlite_dump_string(progress): db.cursor.executescript(script) db.close()
This is probably not very fast.
def suppressed(self, obj): obj.__suppressed__[self.name] = True yield obj.__suppressed__[self.name] = False
Suppress calls within this context to avoid feedback loops
def remove_entities(status, entitylist): try: entities = status.entities text = status.text except AttributeError: entities = status.get('entities', dict()) text = status['text'] indices = [ent['indices'] for etype, entval in list(entities.items()) for ent in entval if etype in entitylist] indices.sort(key=lambda x: x[0], reverse=True) for start, end in indices: text = text[:start] + text[end:] return text
Remove entities for a list of items.
def create_content(self): self.render_node() if self.options.compress_spaces: self.content = self.RE_SPACELESS.sub(' ', self.content) if self.options.compress: to_cache = self.encode_content() else: to_cache = self.content to_cache = self.join_content_version(to_cache) try: self.cache_set(to_cache) except Exception: if is_template_debug_activated(): raise logger.exception('Error when saving the cached template fragment')
Render the template, apply options on it, and save it to the cache.
def icnr(x, scale=2, init=nn.init.kaiming_normal_): "ICNR init of `x`, with `scale` and `init` function." ni,nf,h,w = x.shape ni2 = int(ni/(scale**2)) k = init(torch.zeros([ni2,nf,h,w])).transpose(0, 1) k = k.contiguous().view(ni2, nf, -1) k = k.repeat(1, 1, scale**2) k = k.contiguous().view([nf,ni,h,w]).transpose(0, 1) x.data.copy_(k)
ICNR init of `x`, with `scale` and `init` function.
def call(subcommand, args): args['<napp>'] = parse_napps(args['<napp>']) func = getattr(NAppsAPI, subcommand) func(args)
Call a subcommand passing the args.
def create_permissao_administrativa(self): return PermissaoAdministrativa( self.networkapi_url, self.user, self.password, self.user_ldap)
Get an instance of permissao_administrativa services facade.
def start(self): self.startTime = time.time() self.configure(text='{0:<d} s'.format(0)) self.update()
Starts the timer from zero
def select_extended(cat_table): try: l = [len(row.strip()) > 0 for row in cat_table['Extended_Source_Name'].data] return np.array(l, bool) except KeyError: return cat_table['Extended']
Select only rows representing extended sources from a catalog table
def MakeBuildDirectory(self): self.build_dir = config.CONFIG.Get( "PyInstaller.build_dir", context=self.context) self.work_path = config.CONFIG.Get( "PyInstaller.workpath_dir", context=self.context) self.CleanDirectory(self.build_dir) self.CleanDirectory(self.work_path)
Prepares the build directory.
def add_sqlvm_to_group(instance, sql_virtual_machine_group_resource_id, sql_service_account_password, cluster_operator_account_password, cluster_bootstrap_account_password=None): if not is_valid_resource_id(sql_virtual_machine_group_resource_id): raise CLIError("Invalid SQL virtual machine group resource id.") instance.sql_virtual_machine_group_resource_id = sql_virtual_machine_group_resource_id instance.wsfc_domain_credentials = WsfcDomainCredentials(cluster_bootstrap_account_password=cluster_bootstrap_account_password, cluster_operator_account_password=cluster_operator_account_password, sql_service_account_password=sql_service_account_password) return instance
Add a SQL virtual machine to a SQL virtual machine group.
def key_callback(self, window, key, scancode, action, mods): if key == glfw.KEY_ESCAPE and action == glfw.PRESS: glfw.SetWindowShouldClose(self.window, True)
press ESCAPE to quite the application
def match_regex(pattern: str, message=None, *, flags=0, fullmatch: bool = False) -> Filter_T: pattern = re.compile(pattern, flags) def validate(value): if fullmatch: if not re.fullmatch(pattern, value): _raise_failure(message) else: if not re.match(pattern, value): _raise_failure(message) return value return validate
Validate any string object to ensure it matches a given pattern.
def _src_media_url_for_video(self, video): src_url = None best_height = 0 best_source = None video_sources = self.bc_client.get_video_sources(video['id']) for source in video_sources: height = source.get('height', 0) codec = source.get('codec') if source.get('src') and codec and codec.upper() == 'H264' and height <= 1080 and height > best_height: best_source = source if best_source is not None: src_url = best_source['src'] return src_url
Get the url for the video media that we can send to Clarify
def show(self): from matplotlib import pyplot as plt if self.already_run: for ref in self.volts.keys(): plt.plot(self.t, self.volts[ref], label=ref) plt.title("Simulation voltage vs time") plt.legend() plt.xlabel("Time [ms]") plt.ylabel("Voltage [mV]") else: pynml.print_comment("First you have to 'go()' the simulation.", True) plt.show()
Plot the result of the simulation once it's been intialized
def _BlobToChunks(blob_id, blob): chunk_begins = list(range(0, len(blob), BLOB_CHUNK_SIZE)) or [0] chunks = [] for i, chunk_begin in enumerate(chunk_begins): chunks.append({ "blob_id": blob_id, "chunk_index": i, "blob_chunk": blob[chunk_begin:chunk_begin + BLOB_CHUNK_SIZE] }) return chunks
Splits a Blob into chunks of size BLOB_CHUNK_SIZE.
def load_configuration(self) -> None: config_file = self.default_config_file if self.config_file: config_file = self.config_file self.config = ConfigParser() self.config.read(config_file)
Read the configuration from a configuration file
def getStormQuery(self, text): query = s_syntax.Parser(text).query() query.init(self) return query
Parse storm query text and return a Query object.
def _point_in_bbox(point, bounds): return not(point['coordinates'][1] < bounds[0] or point['coordinates'][1] > bounds[2] or point['coordinates'][0] < bounds[1] or point['coordinates'][0] > bounds[3])
valid whether the point is inside the bounding box
def delete(self, path, data=None, params=None): uri = self.config.get_target() + path headers = { 'Authorization': self.config.get_access_token() } logging.debug("URI=DELETE " + str(uri)) logging.debug("HEADERS=" + str(headers)) response = self.session.delete( uri, headers=headers, params=params, data=json.dumps(data)) if response.status_code == 204: return response else: logging.debug("STATUS=" + str(response.status_code)) logging.debug("CONTENT=" + str(response.content)) response.raise_for_status()
Generic DELETE with headers
def _post_transition(self, result, *args, **kwargs): for hook in self._filter_hooks(HOOK_AFTER, HOOK_ON_ENTER): hook(self.instance, result, *args, **kwargs)
Performs post-transition actions.
def setup(app): app.connect('builder-inited', RunSphinxAPIDoc) app.add_config_value( 'recommonmark_config', { 'enable_auto_doc_ref': False}, True) app.add_transform(AutoStructify) app.add_transform(ProcessLink)
Called at Sphinx initialization.
def _converged(self, X): if len(self.responsibilities) < 2: return False diff = np.linalg.norm(self.responsibilities[-1] - self.responsibilities[-2]) return diff <= self.tolerance
Covergence if || likehood - last_likelihood || < tolerance
def _coolest(self): for _ in range(steps(self.temperature, 1.0, self.command_set.temperature_steps)): self._cooler()
Group temperature as cool as possible.
def _init_base_objects(self, ssl_version: OpenSslVersionEnum, underlying_socket: Optional[socket.socket]) -> None: self._is_handshake_completed = False self._ssl_version = ssl_version self._ssl_ctx = self._NASSL_MODULE.SSL_CTX(ssl_version.value) self._sock = underlying_socket
Setup the socket and SSL_CTX objects.
def write(self, fptr): length = 8 + len(self.label.encode()) fptr.write(struct.pack('>I4s', length, b'lbl ')) fptr.write(self.label.encode())
Write a Label box to file.
def parse_compound_file(f, context=None): f.readline() for lineno, row in enumerate(csv.reader(f, delimiter='\t')): compound_id, names, formula = row[:3] names = (decode_name(name) for name in names.split(',<br>')) m = re.match(r'^(.*)\*(\d*)$', formula) if m is not None: if m.group(2) != '': formula = '({}){}'.format(m.group(1), m.group(2)) else: formula = '({})n'.format(m.group(1)) formula = formula.strip() if formula == '' or formula == 'noformula': formula = None mark = FileMark(context, lineno, 0) yield CompoundEntry(compound_id, names, formula, filemark=mark)
Iterate over the compound entries in the given file
def packb(obj, **kwargs): kwargs.setdefault('use_bin_type', True) return msgpack.packb(obj, **kwargs)
wrap msgpack.packb, setting use_bin_type=True by default
def levenberg_marquardt(self, start_x=None, damping=1.0e-3, tolerance=1.0e-6): if start_x is None: start_x = self._analytical_fitter.fit(self._c) return optimise_levenberg_marquardt(start_x, self._a, self._c, tolerance)
Optimise value of x using levenberg marquardt
def argset(name, *args, **kwargs): def _arg(f): if not hasattr(f, '_subcommand_argsets'): f._subcommand_argsets = {} f._subcommand_argsets.setdefault(name, []).append((args, kwargs)) return f return _arg
Decorator to add sets of required mutually exclusive args to subcommands.
def add(self, *files): try: _run_command(("git", "add") + files) except CalledProcessError: for f in files: if not Path(f).exists(): raise FileNotFoundError(f"No such file or directory: {f}")
Add one or more files to the index running git-add.
def watch_prefix(self, key_prefix, **kwargs): kwargs['range_end'] = \ _increment_last_byte(key_prefix) return self.watch(key_prefix, **kwargs)
The same as ``watch``, but watches a range of keys with a prefix.
def to_dict(self): return dict( reference_name=self.reference_name, annotation_name=self.annotation_name, annotation_version=self.annotation_version, gtf_path_or_url=self._gtf_path_or_url, transcript_fasta_paths_or_urls=self._transcript_fasta_paths_or_urls, protein_fasta_paths_or_urls=self._protein_fasta_paths_or_urls, decompress_on_download=self.decompress_on_download, copy_local_files_to_cache=self.copy_local_files_to_cache, cache_directory_path=self.cache_directory_path)
Returns a dictionary of the essential fields of this Genome.
def show_nmea(): data_window = curses.newwin(24, 79, 0, 0) for new_data in gpsd_socket: if new_data: screen.nodelay(1) key_press = screen.getch() if key_press == ord('q'): shut_down() elif key_press == ord('j'): gpsd_socket.watch(enable=False, gpsd_protocol='nmea') gpsd_socket.watch(gpsd_protocol='json') show_human() data_window.border(0) data_window.addstr(0, 2, 'AGPS3 Python {}.{}.{} GPSD Interface Showing NMEA protocol'.format(*sys.version_info), curses.A_BOLD) data_window.addstr(2, 2, '{}'.format(gpsd_socket.response)) data_window.refresh() else: sleep(.1)
NMEA output in curses terminal
def count(self): "The number of items, pruned or otherwise, contained by this branch." if getattr(self, '_count', None) is None: self._count = getattr(self.node, 'count', 0) return self._count
The number of items, pruned or otherwise, contained by this branch.
def merge(self, ontologies): if self.xref_graph is None: self.xref_graph = nx.MultiGraph() logger.info("Merging source: {} xrefs: {}".format(self, len(self.xref_graph.edges()))) for ont in ontologies: logger.info("Merging {} into {}".format(ont, self)) g = self.get_graph() srcg = ont.get_graph() for n in srcg.nodes(): g.add_node(n, **srcg.node[n]) for (o,s,m) in srcg.edges(data=True): g.add_edge(o,s,**m) if ont.xref_graph is not None: for (o,s,m) in ont.xref_graph.edges(data=True): self.xref_graph.add_edge(o,s,**m)
Merges specified ontology into current ontology
def name(self): if self._name is None: return re.sub(r"(?<=\w)([A-Z])", r" \1", self.__class__.__name__) else: return self._name
The data type name.
def _attach_new(self, records, current, touch=True): changes = { 'attached': [], 'updated': [] } for id, attributes in records.items(): if id not in current: self.attach(id, attributes, touch) changes['attached'].append(id) elif len(attributes) > 0 and self.update_existing_pivot(id, attributes, touch): changes['updated'].append(id) return changes
Attach all of the IDs that aren't in the current dict.
def on_app_shutdown(self, app): if self.filewatcher: self.filewatcher.stop() if self.profile: self.upload_page.on_destroy() self.download_page.on_destroy()
Dump profile content to disk
def _delete_entity(partition_key, row_key, if_match): _validate_not_none('if_match', if_match) _validate_not_none('partition_key', partition_key) _validate_not_none('row_key', row_key) request = HTTPRequest() request.method = 'DELETE' request.headers = [_DEFAULT_ACCEPT_HEADER, ('If-Match', _to_str(if_match))] return request
Constructs a delete entity request.
def setup(args): kwargs = {'wrapper_script' : args.wrapper_script, 'debug' : args.verbose==3, 'database' : args.database} if args.local: jm = local.JobManagerLocal(**kwargs) else: jm = sge.JobManagerSGE(**kwargs) if args.verbose not in range(0,4): raise ValueError("The verbosity level %d does not exist. Please reduce the number of '--verbose' parameters in your call to maximum 3" % level) log_level = { 0: logging.ERROR, 1: logging.WARNING, 2: logging.INFO, 3: logging.DEBUG }[args.verbose] handler = logging.StreamHandler() handler.setFormatter(logging.Formatter("%(asctime)s %(levelname)s %(name)s: %(message)s")) logger.addHandler(handler) logger.setLevel(log_level) return jm
Returns the JobManager and sets up the basic infrastructure
def diff_archives(archive1, archive2, verbosity=0, interactive=True): util.check_existing_filename(archive1) util.check_existing_filename(archive2) if verbosity >= 0: util.log_info("Comparing %s with %s ..." % (archive1, archive2)) res = _diff_archives(archive1, archive2, verbosity=verbosity, interactive=interactive) if res == 0 and verbosity >= 0: util.log_info("... no differences found.")
Print differences between two archives.
def _isnan(self): if self._can_hold_na: return isna(self) else: values = np.empty(len(self), dtype=np.bool_) values.fill(False) return values
Return if each value is NaN.
def slack_package(prgnam): binaries, cache, binary = [], "0", "" for pkg in find_package(prgnam, _meta_.output): if pkg.startswith(prgnam) and pkg[:-4].endswith("_SBo"): binaries.append(pkg) for bins in binaries: if LooseVersion(bins) > LooseVersion(cache): binary = bins cache = binary if not binary: Msg().build_FAILED(prgnam) raise SystemExit(1) return ["".join(_meta_.output + binary)]
Return maximum binary Slackware package from output directory
def from_etree(root): cite_list = [] citations = root.xpath('Citations/EventIVORN') if citations: description = root.xpath('Citations/Description') if description: description_text = description[0].text else: description_text = None for entry in root.Citations.EventIVORN: if entry.text: cite_list.append( Cite(ref_ivorn=entry.text, cite_type=entry.attrib['cite'], description=description_text) ) else: logger.info( 'Ignoring empty citation in {}'.format( root.attrib['ivorn'])) return cite_list
Load up the citations, if present, for initializing with the Voevent.
def etcd(url=DEFAULT_URL, mock=False, **kwargs): if mock: from etc.adapters.mock import MockAdapter adapter_class = MockAdapter else: from etc.adapters.etcd import EtcdAdapter adapter_class = EtcdAdapter return Client(adapter_class(url, **kwargs))
Creates an etcd client.
def str_kind(self, kind): if len(kind) == 0: return 'Nothing' elif len(kind) == 1: return kind[0].__name__ elif len(kind) == 2: return kind[0].__name__ + ' or ' + kind[1].__name__ else: return 'one of {' + ', '.join(t.__name__ for t in kind) + '}'
Get a string describing a kind.
def run_from_argv(self, argv): self.test_runner = test_runner_class super(Command, self).run_from_argv(argv)
Set the default Gherkin test runner for its options to be parsed.
def group_selection(request): groups = get_user_groups(request.user) count = len(groups) if count == 1: return redirect(groups[0]) context = { 'groups': groups, 'count': count, } return render(request, 'multitenancy/group-landing.html', context)
Allow user to select a TenantGroup if they have more than one.
def arg(*args, **kwargs): def decorate(func): func.__cmd_name__ = kwargs.pop( 'cmd_name', getattr(func, '__cmd_name__', func.__name__)) func.__cls__ = utils.check_class() if not hasattr(func, '__arguments__'): func.__arguments__ = utils.get_functarguments(func) if len(args) or len(kwargs): arg_name = kwargs.get( 'dest', args[-1].lstrip('-').replace('-', '_')) try: idx = func.__named__.index(arg_name) del func.__named__[idx] del func.__arguments__[idx] except ValueError: pass func.__arguments__.append((args, kwargs,)) if func.__cls__ is None and isinstance(func, types.FunctionType): ap_ = ArgParseInator(skip_init=True) if func.__cmd_name__ not in ap_.commands: ap_.commands[func.__cmd_name__] = func return func return decorate
Dcorates a function or a class method to add to the argument parser
def terminate_all(self): for worker in self._workers: worker.terminate() self._queue_workers = deque()
Terminate all worker processes.
def attach_intf_router(self, tenant_id, tenant_name, router_id): in_sub = self.get_in_subnet_id(tenant_id) out_sub = self.get_out_subnet_id(tenant_id) subnet_lst = set() subnet_lst.add(in_sub) subnet_lst.add(out_sub) ret = self.os_helper.add_intf_router(router_id, tenant_id, subnet_lst) return ret, in_sub, out_sub
Routine to attach the interface to the router.
def _sed_esc(string, escape_all=False): special_chars = "^.[$()|*+?{" string = string.replace("'", "'\"'\"'").replace("/", "\\/") if escape_all is True: for char in special_chars: string = string.replace(char, "\\" + char) return string
Escape single quotes and forward slashes
def git_wrapper(path): path = os.path.abspath(path) if path not in _wrapper_cache: if hasattr(Repo, 'commits'): _wrapper_cache[path] = _GitWrapperLegacy(path) else: _wrapper_cache[path] = _GitWrapper(path) return _wrapper_cache[path]
Get appropriate wrapper factory and cache instance for path
def comment(self, message): message = str(message) sw_repr = { '$type': 'Core.Models.Record.Comments, Core', 'createdByUser': self._record._swimlane.user.as_usergroup_selection(), 'createdDate': pendulum.now().to_rfc3339_string(), 'message': message } comment = Comment(self._swimlane, sw_repr) self._elements.append(comment) self._record._raw['comments'].setdefault(self._field.id, []) self._record._raw['comments'][self._field.id].append(comment._raw) return comment
Add new comment to record comment field
def in_feed(self, feed): kwargs = {self.model._rel_to_feed: feed} return self.filter(**kwargs)
Return the objects in the target feed
def xyzlabel(labelx, labely, labelz): xlabel(labelx) ylabel(labely) zlabel(labelz)
Set all labels at once.
def to_bytes(val): if val is NoResponse: return val val = val.replace('\\r', '\r').replace('\\n', '\n') return val.encode()
Takes a text message and return a tuple
def add_child(self, child): child.parent = self self.children.append(child) return child
Adds self as parent to child, and then adds child.
def disconnect(self, client): self.clients.remove(client) del self.connect_args[client] client.disconnect()
Remove client from pool.
def user_pk_to_url_str(user): User = get_user_model() if issubclass(type(User._meta.pk), models.UUIDField): if isinstance(user.pk, six.string_types): return user.pk return user.pk.hex ret = user.pk if isinstance(ret, six.integer_types): ret = int_to_base36(user.pk) return str(ret)
This should return a string.
def filesize(value): suffix = 'o' for unit in '', 'K', 'M', 'G', 'T', 'P', 'E', 'Z': if abs(value) < 1024.0: return "%3.1f%s%s" % (value, unit, suffix) value /= 1024.0 return "%.1f%s%s" % (value, 'Y', suffix)
Display a human readable filesize
def loadIntoTextureD3D11_Async(self, textureId, pDstTexture): fn = self.function_table.loadIntoTextureD3D11_Async result = fn(textureId, pDstTexture) return result
Helper function to copy the bits into an existing texture.
def _add_line_segment(self, x, y): self._drawing_operations.append(_LineSegment.new(self, x, y))
Add a |_LineSegment| operation to the drawing sequence.
def _parse_args(): parser = optparse.OptionParser() parser.add_option( '--user', dest='user_install', action='store_true', default=False, help='install in user site package (requires Python 2.6 or later)') parser.add_option( '--download-base', dest='download_base', metavar="URL", default=DEFAULT_URL, help='alternative URL from where to download the setuptools package') parser.add_option( '--insecure', dest='downloader_factory', action='store_const', const=lambda: download_file_insecure, default=get_best_downloader, help='Use internal, non-validating downloader' ) options, args = parser.parse_args() return options
Parse the command line for options
def msgblock(key, text, side='|'): blocked_text = ''.join( [' + --- ', key, ' ---\n'] + [' ' + side + ' ' + line + '\n' for line in text.split('\n')] + [' L ___ ', key, ' ___\n'] ) return blocked_text
puts text inside a visual ascii block
def delete(self): if not self._sync: del self._buffer shutil.rmtree(self.cache_dir)
Delete the write buffer and cache directory.
def _remove_wire_nets(block): wire_src_dict = _ProducerList() wire_removal_set = set() for net in block.logic: if net.op == 'w': wire_src_dict[net.dests[0]] = net.args[0] if not isinstance(net.dests[0], Output): wire_removal_set.add(net.dests[0]) new_logic = set() for net in block.logic: if net.op != 'w' or isinstance(net.dests[0], Output): new_args = tuple(wire_src_dict.find_producer(x) for x in net.args) new_net = LogicNet(net.op, net.op_param, new_args, net.dests) new_logic.add(new_net) block.logic = new_logic for dead_wirevector in wire_removal_set: del block.wirevector_by_name[dead_wirevector.name] block.wirevector_set.remove(dead_wirevector) block.sanity_check()
Remove all wire nodes from the block.
def asDict(self): dct = super(RtiRegItem, self).asDict() dct['extensions'] = self.extensions return dct
Returns a dictionary for serialization.
def using_git(cwd): try: git_log = shell_out(["git", "log"], cwd=cwd) return True except (CalledProcessError, OSError): return False
Test whether the directory cwd is contained in a git repository.
def _check_exists(database: Database, table: LdapObjectClass, key: str, value: str): try: get_one(table, Q(**{key: value}), database=database) return True except ObjectDoesNotExist: return False
Check if a given LDAP object exists.
def rex(expr): r = re.compile(expr) return lambda key: isinstance(key, six.string_types) and r.match(key)
Regular expression matcher to use together with transform functions
def close(self): self.is_closed = True self.is_ready = False self._command_queue.put(None)
Closes the connection to the bridge.
def conversations(self): body = { "conversation_type": self.conversation_type, "audience_definition": self.audience_definition, "targeting_inputs": self.targeting_inputs } return self.__get(account=self.account, client=self.account.client, params=json.dumps(body))
Get the conversation topics for an input targeting criteria
def parse_hostname(cls, hostname): if hostname.startswith('['): return cls.parse_ipv6_hostname(hostname) else: try: new_hostname = normalize_ipv4_address(hostname) except ValueError: new_hostname = hostname new_hostname = normalize_hostname(new_hostname) if any(char in new_hostname for char in FORBIDDEN_HOSTNAME_CHARS): raise ValueError('Invalid hostname: {}' .format(ascii(hostname))) return new_hostname
Parse the hostname and normalize.
def copy_files_to_folder(src, dest, xtn='*.txt'): try: all_files = glob.glob(os.path.join(src,xtn)) for f in all_files: copy_file(f, dest) except Exception as ex: print('ERROR copy_files_to_folder - ' + str(ex))
copies all the files from src to dest folder
def draw(graph, fname): ag = networkx.nx_agraph.to_agraph(graph) ag.draw(fname, prog='dot')
Draw a graph and save it into a file
def change_parent_of_project(self, ID, NewParrentID): log.info('Change parrent for project %s to %s' % (ID, NewParrentID)) data = {'parent_id': NewParrentID} self.put('projects/%s/change_parent.json' % ID, data)
Change parent of project.
def mse(X): return np.mean(np.square(np.abs(np.fft.fft(X, axis=1))), axis=1)
computes mean spectral energy for each variable in a segmented time series