code
stringlengths
51
2.34k
docstring
stringlengths
11
171
def bar_(self, label=None, style=None, opts=None, options={}): try: return self._get_chart("bar", style=style, opts=opts, label=label, options=options) except Exception as e: self.err(e, self.bar_, "Can not draw bar chart")
Get a bar chart
def getmodule(object): if ismodule(object): return object if isclass(object): return sys.modules.get(object.__module__) try: file = getabsfile(object) except TypeError: return None if modulesbyfile.has_key(file): return sys.modules[modulesbyfile[file]] for...
Return the module an object was defined in, or None if not found.
def _readhex(self, length, start): if length % 4: raise InterpretError("Cannot convert to hex unambiguously - " "not multiple of 4 bits.") if not length: return '' s = self._slice(start, start + length).tobytes() try: ...
Read bits and interpret as a hex string.
def getRelativePath(basepath, path): basepath = splitpath(os.path.abspath(basepath)) path = splitpath(os.path.abspath(path)) afterCommon = False for c in basepath: if afterCommon or path[0] != c: path.insert(0, os.path.pardir) afterCommon = True else: ...
Get a path that is relative to the given base path.
def cli(ctx, config, quiet): ctx.obj = {} ctx.obj['config'] = load_config(config.read()) ctx.obj['quiet'] = quiet log(ctx, ' * ' + rnd_scotty_quote() + ' * ')
AWS ECS Docker Deployment Tool
def _ReloadArtifacts(self): self._artifacts = {} self._LoadArtifactsFromFiles(self._sources.GetAllFiles()) self.ReloadDatastoreArtifacts()
Load artifacts from all sources.
def unregister_transform(self, node_class, transform, predicate=None): self.transforms[node_class].remove((transform, predicate))
Unregister the given transform.
def symbol(currency, *, native=True): currency = validate_currency(currency) if native: return _currencies[currency]['symbol_native'] return _currencies[currency]['symbol']
return symbol of currency
def nice_true_ces(tc): cause_list = [] next_list = [] cause = '<--' effect = '-->' for event in tc: if event.direction == Direction.CAUSE: cause_list.append(["{0:.4f}".format(round(event.alpha, 4)), event.mechanism, cause, event.purview]) el...
Format a true |CauseEffectStructure|.
def create_and_run_collector(document, options): collector = None if not options.report == 'off': collector = Collector() collector.store.configure(document) Event.configure(collector_queue=collector.queue) collector.start() return collector
Create and run collector process for report data.
def _resolve_time(value): if value is None or isinstance(value,(int,long)): return value if NUMBER_TIME.match(value): return long(value) simple = SIMPLE_TIME.match(value) if SIMPLE_TIME.match(value): multiplier = long( simple.groups()[0] ) constant = SIMPLE_TIMES[ simple.groups()[1] ] return...
Resolve the time in seconds of a configuration value.
def fix_positions(self): shift_x = 0 for m in self.__reactants: max_x = self.__fix_positions(m, shift_x, 0) shift_x = max_x + 1 arrow_min = shift_x if self.__reagents: for m in self.__reagents: max_x = self.__fix_positions(m, shift_x, 1...
fix coordinates of molecules in reaction
def begin(self): self._device.writeList(HT16K33_SYSTEM_SETUP | HT16K33_OSCILLATOR, []) self.set_blink(HT16K33_BLINK_OFF) self.set_brightness(15)
Initialize driver with LEDs enabled and all turned off.
def save(self): self.cells = list(self.renumber()) if not self.cells[-1].endswith('\n'): self.cells[-1] += '\n' with open(self.filename, 'w') as file_open: file_open.write('\n\n'.join(self.cells))
Format and save cells.
def dropIndex(cls, fields) : "removes an index created with ensureIndex " con = RabaConnection(cls._raba_namespace) rlf, ff = cls._parseIndex(fields) for name in rlf : con.dropIndex(name, 'anchor_raba_id') con.dropIndex(cls.__name__, ff) con.commit()
removes an index created with ensureIndex
def head(self, item): uri = "/%s/%s" % (self.uri_base, utils.get_id(item)) return self._head(uri)
Makes a HEAD request on a specific item.
def biopython_protein_scale(inseq, scale, custom_scale_dict=None, window=7): if scale == 'kd_hydrophobicity': scale_dict = kd_hydrophobicity_one elif scale == 'bulkiness': scale_dict = bulkiness_one elif scale == 'custom': scale_dict = custom_scale_dict else: raise ValueE...
Use Biopython to calculate properties using a sliding window over a sequence given a specific scale to use.
def _make_txn(signer, setting_key, payload): serialized_payload = payload.SerializeToString() header = TransactionHeader( signer_public_key=signer.get_public_key().as_hex(), family_name='sawtooth_settings', family_version='1.0', inputs=_config_inputs(setting_key), outputs...
Creates and signs a sawtooth_settings transaction with with a payload.
def _traverse_command(self, name, *args, **kwargs): if not name in self.available_commands: raise AttributeError("%s is not an available command for %s" % (name, self.__class__.__name__)) attr = getattr(self.connection, "%s" % name) key = self.key ...
Add the key to the args and call the Redis command.
def ensure_stacker_compat_config(config_filename): try: with open(config_filename, 'r') as stream: yaml.safe_load(stream) except yaml.constructor.ConstructorError as yaml_error: if yaml_error.problem.startswith( 'could not determine a constructor for the tag \'!'): ...
Ensure config file can be loaded by Stacker.
def checksum_contracts(self) -> None: checksums: Dict[str, str] = {} for contracts_dir in self.contracts_source_dirs.values(): file: Path for file in contracts_dir.glob('*.sol'): checksums[file.name] = hashlib.sha256(file.read_bytes()).hexdigest() self.ove...
Remember the checksum of each source, and the overall checksum.
def _create_plain_field(self, attr, options): method = self._get_field_method(attr.py_type) or self._create_other_field klass, options = method(attr, options) if attr.is_unique: options['validators'].append(validators.UniqueEntityValidator(attr.entity)) return klass, options
Creates the form element.
def update(self, **kwargs): for key, value in kwargs.items(): setattr(self, key, value)
Creates or updates a property for the instance for each parameter.
def install_supervisor(self, update=False): script = supervisor.Recipe( self.buildout, self.name, {'user': self.options.get('user'), 'program': self.options.get('program'), 'command': templ_cmd.render(config=self.conf_filename, prefix=self.prefix), ...
install supervisor config for redis
def _get_results_from_api(identifiers, endpoints, api_key, api_secret): if api_key is not None and api_secret is not None: client = housecanary.ApiClient(api_key, api_secret) else: client = housecanary.ApiClient() wrapper = getattr(client, endpoints[0].split('/')[0]) if len(endpoints) > ...
Use the HouseCanary API Python Client to access the API
def _dmi_cast(key, val, clean=True): if clean and not _dmi_isclean(key, val): return elif not re.match(r'serial|part|asset|product', key, flags=re.IGNORECASE): if ',' in val: val = [el.strip() for el in val.split(',')] else: try: val = int(val) ...
Simple caster thingy for trying to fish out at least ints & lists from strings
def extract_pls_rsp(rsp_str): try: rsp = json.loads(rsp_str) except ValueError: traceback.print_exc() err = sys.exc_info()[1] err_str = ERROR_STR_PREFIX + str(err) return RET_ERROR, err_str, None error_code = int(rsp['retType']) if error_code != 1: error_s...
Extract the response of PLS
def export_to_dom(self): namespaces = 'xmlns="http://www.neuroml.org/lems/%s" ' + \ 'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" ' + \ 'xsi:schemaLocation="http://www.neuroml.org/lems/%s %s"' namespaces = namespaces%(self.target_lems_version,self.targe...
Exports this model to a DOM.
def execute(self): self.autocomplete() if len(self.argv): cmd = self.argv[0] cmd_argv = self.get_argv_for_command() self.run_command(cmd, cmd_argv) else: self.show_help()
Executes whole process of parsing and running command.
def _handleSmsStatusReport(self, notificationLine): self.log.debug('SMS status report received') cdsiMatch = self.CDSI_REGEX.match(notificationLine) if cdsiMatch: msgMemory = cdsiMatch.group(1) msgIndex = cdsiMatch.group(2) report = self.readStoredSms(msgIndex...
Handler for SMS status reports
def flush(self): self._check_open_file() if self.allow_update and not self.is_stream: contents = self._io.getvalue() if self._append: self._sync_io() old_contents = (self.file_object.byte_contents if is_byte_string(c...
Flush file contents to 'disk'.
def dictionary_validator(key_type, value_type): def _validate_dictionary(instance, attribute, value): if not isinstance(value, dict): raise TypeError('"{}" must be a dictionary'.format(attribute.name)) for key, data in value.items(): if not isinstance(key, key_type): ...
Validator for ``attrs`` that performs deep type checking of dictionaries.
def _dispatch_event(self, event: LutronEvent, params: Dict): for handler, context in self._subscribers: handler(self, context, event, params)
Dispatches the specified event to all the subscribers.
def _py_expand_short(subsequence, sequence, max_l_dist): subseq_len = len(subsequence) if subseq_len == 0: return (0, 0) scores = list(range(1, subseq_len + 1)) min_score = subseq_len min_score_idx = -1 for seq_index, char in enumerate(sequence): a = seq_index c = a + 1 ...
Straightforward implementation of partial match expansion.
def _to_dict(self): physical_prop_names = find_PhysicalProperty(self) physical_prop_vals = [getattr(self, prop) for prop in physical_prop_names] return dict(zip(physical_prop_names, physical_prop_vals))
Return a dictionary representation of the current object.
def diffuse_template(self, **kwargs): kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) self._replace_none(kwargs_copy) localpath = NameFactory.diffuse_template_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(...
return the file name for other diffuse map templates
def theme_static_with_version(ctx, filename, external=False): if current_app.theme_manager.static_folder: url = assets.cdn_for('_themes.static', filename=current.identifier + '/' + filename, _external=external) else: url = assets.cdn_for(...
Override the default theme static to add cache burst
def strip(notebook): for cell in notebook.cells: if cell.cell_type == 'code': cell.outputs = [] cell.execution_count = None
Remove outputs from a notebook.
def _is_mobile(ntype): return (ntype == PhoneNumberType.MOBILE or ntype == PhoneNumberType.FIXED_LINE_OR_MOBILE or ntype == PhoneNumberType.PAGER)
Checks if the supplied number type supports carrier lookup
def install(): tmp_weboob_dir = '/tmp/weboob' while (os.path.exists(tmp_weboob_dir)): tmp_weboob_dir += '1' print 'Fetching sources in temporary dir {}'.format(tmp_weboob_dir) result = cmd_exec('git clone {} {}'.format(WEBOOB_REPO, tmp_weboob_dir)) if (result['error']): print result[...
Install weboob system-wide
def create(self, id): resp = self.client.accounts.create(id=id) self.display(resp)
Create a new tenant id
def fromTFExample(bytestr): example = tf.train.Example() example.ParseFromString(bytestr) return example
Deserializes a TFExample from a byte string
def schema_map(schema): mapper = {} for name in getFieldNames(schema): mapper[name] = name return mapper
Return a valid ICachedItemMapper.map for schema
def _write_to_graph(self): traces = [] for byte_code, trace_data in self.coverage.items(): traces += [list(trace_data.keys()), list(trace_data.values()), "r--"] plt.plot(*traces) plt.axis([0, self.end - self.begin, 0, 100]) plt.xlabel("Duration (seconds)") plt...
Write the coverage results to a graph
def _clean_result(self, text): text = re.sub('\s\s+', ' ', text) text = re.sub('\.\.+', '.', text) text = text.replace("'", "\\'") return text
Remove double spaces, punctuation and escapes apostrophes.
def _question_line(self, section): token = self.tok.get(want_leading = True) if not token.is_whitespace(): self.last_name = dns.name.from_text(token.value, None) name = self.last_name token = self.tok.get() if not token.is_identifier(): raise dns.exception...
Process one line from the text format question section.
def _decode_temp(byte_1, byte_2): temp = (byte_1 << 8) + byte_2 if (temp > 32767): temp = temp - 65536 temp = temp / 10 return temp
Decode a signed short temperature as two bytes to a single number.
def to_vararray(var_instance, bounds): assert isinstance(var_instance, SymbolVAR) from symbols import BOUNDLIST from symbols import VARARRAY assert isinstance(bounds, BOUNDLIST) var_instance.__class__ = VARARRAY var_instance.class_ = CLASS.array var_instance.bound...
Converts a var_instance to a var array one
def app_profile_path(cls, project, instance, app_profile): return google.api_core.path_template.expand( "projects/{project}/instances/{instance}/appProfiles/{app_profile}", project=project, instance=instance, app_profile=app_profile, )
Return a fully-qualified app_profile string.
def reloadGraphs(self) : "reloads the graph list" r = self.connection.session.get(self.graphsURL) data = r.json() if r.status_code == 200 : self.graphs = {} for graphData in data["graphs"] : try : self.graphs[graphData["_key"]] ...
reloads the graph list
def find_api_id(self): allapis = self.client.get_rest_apis() api_name = self.trigger_settings['api_name'] api_id = None for api in allapis['items']: if api['name'] == api_name: api_id = api['id'] self.log.info("Found API for: %s", api_name) ...
Given API name, find API ID.
def catCSVs(folder, ouputFileName, removeDups = False) : strCmd = r %(folder, ouputFileName) os.system(strCmd) if removeDups : removeDuplicates(ouputFileName, ouputFileName)
Concatenates all csv in 'folder' and wites the results in 'ouputFileName'. My not work on non Unix systems
def colordict(self): d = {} i=0 n = len(self.constraints) for c in self.constraints: d[c] = cm.jet(1.*i/n) i+=1 return d
Dictionary holding colors that correspond to constraints.
def energy(self): e = 0 for i in range(len(self.state)): e += self.distance_matrix[self.state[i-1]][self.state[i]] return e
Calculates the length of the route.
def base_url(klass, space_id, resource_id=None, public=False, environment_id=None, **kwargs): if public: environment_slug = "" if environment_id is not None: environment_slug = "/environments/{0}".format(environment_id) return "spaces/{0}{1}/public/content_typ...
Returns the URI for the content type.
def _get_app_config(self, app_name): matches = [app_config for app_config in apps.get_app_configs() if app_config.name == app_name] if not matches: return return matches[0]
Returns an app config for the given name, not by label.
def urlencode(params): if not isinstance(params, dict): raise TypeError("Only dicts are supported.") params = flatten(params) url_params = OrderedDict() for param in params: value = param.pop() name = parametrize(param) if isinstance(value, (list, tuple)): nam...
Urlencode a multidimensional dict.
def _get_s3_files(local_dir, file_info, params): assert len(file_info) == 1 files = file_info.values()[0] fnames = [] for k in ["1", "2"]: if files[k] not in fnames: fnames.append(files[k]) out = [] for fname in fnames: bucket, key = fname.replace("s3://", "").split("...
Retrieve s3 files to local directory, handling STORMSeq inputs.
def sqlvm_group_create(client, cmd, sql_virtual_machine_group_name, resource_group_name, location, sql_image_offer, sql_image_sku, domain_fqdn, cluster_operator_account, sql_service_account, storage_account_url, storage_account_key, cluster_bootstrap_account=None, ...
Creates a SQL virtual machine group.
def home(request): try: DBSession.query(User).first() except DBAPIError: return Response( conn_err_msg, content_type="text/plain", status_int=500, ) return {"project": "pyramid_tut"}
Try to connect to database, and list available examples.
def _read_depth_images(self, num_images): depth_images = self._ros_read_images(self._depth_image_buffer, num_images, self.staleness_limit) for i in range(0, num_images): depth_images[i] = depth_images[i] * MM_TO_METERS if self._flip_images: depth_images[i] = np.fl...
Reads depth images from the device
def load_yaml(fname): yaml = YAML(typ="safe") yaml.allow_duplicate_keys = True HassSafeConstructor.name = fname yaml.Constructor = HassSafeConstructor with open(fname, encoding="utf-8") as conf_file: return yaml.load(conf_file) or {}
Load a YAML file.
def cli(sequencepath, report, refseq_database): main(sequencepath, report, refseq_database, num_threads=multiprocessing.cpu_count())
Pass command line arguments to, and run the feature extraction functions
def dynare_import(filename,full_output=False, debug=False): import os basename = os.path.basename(filename) fname = re.compile('(.*)\.(.*)').match(basename).group(1) f = open(filename) txt = f.read() model = parse_dynare_text(txt,full_output=full_output, debug=debug) model.name = fname r...
Imports model defined in specified file
def start(self, ccallbacks=None): self.__manage_g = gevent.spawn(self.__manage_connections, ccallbacks) self.__ready_ev.wait()
Establish and maintain connections.
def busy_display(): sys.stdout.write("\033[?25l") sys.stdout.flush() for x in range(1800): symb = ['\\', '|', '/', '-'] sys.stdout.write("\033[D{}".format(symb[x % 4])) sys.stdout.flush() gevent.sleep(0.1)
Display animation to show activity.
def _try_import(module_name): try: mod = importlib.import_module(module_name) return mod except ImportError: err_msg = ("Tried importing %s but failed. See setup.py extras_require. " "The dataset you are trying to use may have additional " "dependencies.") utils.reraise...
Try importing a module, with an informative error message on failure.
def treeplot(self, qlist, credible_interval): for y, _, label, values, color in self.iterator(): ntiles = np.percentile(values.flatten(), qlist) ntiles[0], ntiles[-1] = hpd(values.flatten(), credible_interval) yield y, label, ntiles, color
Get data for each treeplot for the variable.
def worker_loop_v1(dataset, key_queue, data_queue, batchify_fn): while True: idx, samples = key_queue.get() if idx is None: break batch = batchify_fn([dataset[i] for i in samples]) data_queue.put((idx, batch))
Worker loop for multiprocessing DataLoader.
def restore(self): if self.proxy_object is None: if self.getter: setattr(self.getter_class, self.attr_name, self.getter) elif self.is_local: setattr(self.orig_object, self.attr_name, self.orig_value) else: delattr(self.orig_obje...
Restore the saved value for the attribute of the object.
def _array2cstr(arr): out = StringIO() np.save(out, arr) return b64encode(out.getvalue())
Serializes a numpy array to a compressed base64 string
def search_fetch_force(request, id, redirect_to): search = Search.objects.get(id=id) search.fetch(force=True) msg = _("Fetched tweets for %s" % search.criteria) messages.success(request, msg, fail_silently=True) return HttpResponseRedirect(redirect_to)
Forcibly fetch tweets for the search
def listDatasetAccessTypes(self, dataset_access_type=""): if isinstance(dataset_access_type, basestring): try: dataset_access_type = str(dataset_access_type) except: dbsExceptionHandler('dbsException-invalid-input', 'dataset_access_type given is not va...
List dataset access types
def json_wrap(function, *args, **kwargs): try: response = json.loads(function(*args, **kwargs).content) if 'data' in response: return response['data'] or True else: return response except Exception as exc: raise ClientException(exc)
Return the json content of a function that returns a request
def max(cls): max_recid = db.session.query(func.max(cls.recid)).scalar() return max_recid if max_recid else 0
Get max record identifier.
def default_user_agent(name="python-requests"): _implementation = platform.python_implementation() if _implementation == 'CPython': _implementation_version = platform.python_version() elif _implementation == 'PyPy': _implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major, ...
Return a string representing the default user agent.
def identity(ctx, variant_id): if not variant_id: LOG.warning("Please provide a variant id") ctx.abort() adapter = ctx.obj['adapter'] version = ctx.obj['version'] LOG.info("Search variants {0}".format(adapter)) result = adapter.get_clusters(variant_id) if result.count() == 0: ...
Check how well SVs are working in the database
def weighted_tanimoto(a, b, weights): weighted = lambda s: map(lambda (x,y): float(x)*float(y), zip(s, weights)) return tanimoto_coefficient(weighted(a), weighted(b))
Same as the Tanimoto coefficient, but wit weights for each dimension.
def ssh_config(ssh_user, ssh_private_key_file): try: ssh_file = NamedTemporaryFile(delete=False, mode='w+') ssh_file.write('Host *\n') ssh_file.write(' IdentityFile %s\n' % ssh_private_key_file) ssh_file.write(' User %s' % ssh_user) ssh_file.close() yield ssh_fi...
Create temporary ssh config file.
def c_str(string): if not isinstance(string, str): string = string.decode('ascii') return ctypes.c_char_p(string.encode('utf-8'))
Convert a python string to C string.
def create_database_session(engine): try: Session = sessionmaker(bind=engine) return Session() except OperationalError as e: raise DatabaseError(error=e.orig.args[1], code=e.orig.args[0])
Connect to the database
def __load_countries(self, db): try: countries = self.__read_countries_file() except IOError as e: raise LoadError(str(e)) try: with db.connect() as session: for country in countries: session.add(country) except Exce...
Load the list of countries
def select_locale_by_request(self, request, locales=()): default_locale = locales and locales[0] or self.cfg.default_locale if len(locales) == 1 or 'ACCEPT-LANGUAGE' not in request.headers: return default_locale ulocales = [ (q, locale_delim_re.split(v)[0]) fo...
Choose an user's locales by request.
def _load_rules(forcefield): rules = dict() for rule_name, smarts in forcefield.atomTypeDefinitions.items(): overrides = forcefield.atomTypeOverrides.get(rule_name) if overrides is not None: overrides = set(overrides) else: overrides = set() rules[rule_nam...
Load atomtyping rules from a forcefield into SMARTSGraphs.
def load_data(self): try: df = self.live_quote_arg_func(self.tickers) for index, ticker in enumerate(self.tickers): ticker_info = df.loc[index] self.ticker_dict[ticker].append(ticker_info['price'], ticker_inf...
Overwrite this for new source data structures
def resource(self): if self._resource: return self._resource elif self.binding: return self.binding.resource
Resource associated with operation.
def append_rally_point(self, p): if (self.rally_count() > 9): print("Can't have more than 10 rally points, not adding.") return self.rally_points.append(p) self.reindex()
add rallypoint to end of list
def to_dict(self): if not self.url: return None return { 'url': self.url, 'width': self.width, 'height': self.height }
Convert Image into raw dictionary data.
def fetch(index, tokens): if len(tokens) == 0: return set() return set.intersection(*[set(index.get(token, [])) for token in tokens])
Fetch the codes from given tokens
def read_header(self): with fopen(self.vpk_path, 'rb') as f: (self.signature, self.version, self.tree_length ) = struct.unpack("3I", f.read(3*4)) if self.signature != 0x55aa1234: raise ValueError("File is not VPK (invalid magic)") ...
Reads VPK file header from the file
def _read_temp(data): tout = StringIO() tout.write(data) tout.seek(0) output = tout.readlines() tout.close() return output
Return what would be written to disk
def stop_jobs(self, job_ids=None): self.lock() jobs = self.get_jobs(job_ids) for job in jobs: if job.status in ('executing', 'queued', 'waiting') and job.queue_name == 'local': logger.info("Reset job '%s' (%s) in the database", job.name, self._format_log(job.id)) job.submit() self....
Resets the status of the job to 'submitted' when they are labeled as 'executing'.
def cl_picard(self, command, options, memscale=None): options = ["%s=%s" % (x, y) for x, y in options] options.append("VALIDATION_STRINGENCY=SILENT") return self._get_picard_cmd(command, memscale=memscale) + options
Prepare a Picard commandline.
def strip( text ): members = [attr for attr in Colors.__dict__.keys() if not attr.startswith( "__" ) and not attr == 'strip'] for c in members: text = text.replace( vars( Colors )[c], '' ) return text
Strips all color codes from a text.
def DiffArrayObjects(self, oldObj, newObj, isElementLinks=False): if oldObj == newObj: return True if not oldObj or not newObj: return False if len(oldObj) != len(newObj): __Log__.debug('DiffArrayObjects: Array lengths do not match %d != %d' % (len(oldObj), len(n...
Method which deligates the diffing of arrays based on the type
def plot_gaps(plot, columns): from plot_window import window_plot_convolve as plot_window plot_window([[100 - i for i in columns]], len(columns)*.01, plot)
plot % of gaps at each position
def getMouse(self): self.mouse_x.value = -1 self.mouse_y.value = -1 while self.mouse_x.value == -1 and self.mouse_y.value == -1: time.sleep(.1) return (self.mouse_x.value, self.mouse_y.value)
Waits for a mouse click.
def _split_string_to_tokens(text): if not text: return [] ret = [] token_start = 0 is_alnum = [c in _ALPHANUMERIC_CHAR_SET for c in text] for pos in xrange(1, len(text)): if is_alnum[pos] != is_alnum[pos - 1]: token = text[token_start:pos] if token != u" " or token_start == 0: ret....
Splits text to a list of string tokens.
def handle_pubrel(self): self.logger.info("PUBREL received") ret, mid = self.in_packet.read_uint16() if ret != NC.ERR_SUCCESS: return ret evt = event.EventPubrel(mid) self.push_event(evt) return NC.ERR_SUCCESS
Handle incoming PUBREL packet.
def detach(self): if not self._closed: del self._as_parameter_ self._closed = True self._ptr = None
Detach the underlying LLVM resource without disposing of it.