code
stringlengths
51
2.34k
docstring
stringlengths
11
171
def _check_configs(self): configs = set(self._find_configs()) known_configs = set(self.configs.keys()) new_configs = configs - known_configs for cfg in (known_configs - configs): self.log.debug("Compass configuration has been removed: " + cfg) del self.configs[cfg] for cfg in new_configs: self.log.debug("Found new compass configuration: " + cfg) self.configs[cfg] = CompassConfig(cfg)
Reloads the configuration files.
def verify_secret(self, form_instance, secret): warn_untested() if not check_secret(form_instance, secret): self.set_flag("Invalid secret. (%s)") % secret self.save()
Verifies an IPN payment over SSL using EWP.
def profile(curr_step, start_step, end_step, profile_name='profile.json', early_exit=True): if curr_step == start_step: mx.nd.waitall() mx.profiler.set_config(profile_memory=False, profile_symbolic=True, profile_imperative=True, filename=profile_name, aggregate_stats=True) mx.profiler.set_state('run') elif curr_step == end_step: mx.nd.waitall() mx.profiler.set_state('stop') logging.info(mx.profiler.dumps()) mx.profiler.dump() if early_exit: exit()
profile the program between [start_step, end_step).
def update(self, stats): all_stats = stats.getAllExportsAsDict(plugin_list=self.plugins_to_export()) if self.first_line: csv_header = ['timestamp'] csv_data = [time.strftime('%Y-%m-%d %H:%M:%S')] for plugin in self.plugins_to_export(): if isinstance(all_stats[plugin], list): for stat in all_stats[plugin]: if self.first_line: csv_header += ('{}_{}_{}'.format( plugin, self.get_item_key(stat), item) for item in stat) csv_data += itervalues(stat) elif isinstance(all_stats[plugin], dict): if self.first_line: fieldnames = iterkeys(all_stats[plugin]) csv_header += ('{}_{}'.format(plugin, fieldname) for fieldname in fieldnames) csv_data += itervalues(all_stats[plugin]) if self.first_line: self.writer.writerow(csv_header) self.first_line = False self.writer.writerow(csv_data) self.csv_file.flush()
Update stats in the CSV output file.
def check_python_matlab_architecture(bits, lib_dir): if not os.path.isdir(lib_dir): raise RuntimeError("It seem that you are using {bits} version of Python, but there's no matching MATLAB installation in {lib_dir}.".format(bits=bits, lib_dir=lib_dir))
Make sure we can find corresponding installation of Python and MATLAB.
def update(self, other, **kwargs): if other is None: return if not isinstance(other, dict): other = other.to_dict() self.__dict__.update(other, **kwargs)
A dict-like update for Struct attributes.
def read_py(self, fin_txt, get_goids_only, exclude_ungrouped, prt=sys.stdout): goids_fin = self._read_py(fin_txt, get_goids_only, exclude_ungrouped) sections = self._read_finish(goids_fin, prt) if prt is not None: self._prt_read_msg(prt, fin_txt, exclude_ungrouped) return sections
Read GO IDs or sections data from a Python file.
def _pip_search(stdout, stderr): result = {} lines = to_text_string(stdout).split('\n') while '' in lines: lines.remove('') for line in lines: if ' - ' in line: parts = line.split(' - ') name = parts[0].strip() description = parts[1].strip() result[name] = description return result
Callback for pip search.
def listen_init(self): self.dispatcher = ObjectDispatch(self) self.factory = MsgPackProtocolFactory(self.dispatcher) self.server = UnixServer(self.loop, self.factory, self.path) self.server.start()
Setup the service to listen for clients.
def openstack_undercloud_install(self): instack_undercloud_ver, _ = self.run('repoquery --whatprovides /usr/share/instack-undercloud/puppet-stack-config/puppet-stack-config.pp') if instack_undercloud_ver.rstrip('\n') == 'instack-undercloud-0:2.2.0-1.el7ost.noarch': LOG.warn('Workaround for BZ1298189') self.run("sed -i \"s/.*Keystone_domain\['heat_domain'\].*/Service\['keystone'\] -> Class\['::keystone::roles::admin'\] -> Class\['::heat::keystone::domain'\]/\" /usr/share/instack-undercloud/puppet-stack-config/puppet-stack-config.pp") self.run('OS_PASSWORD=bob openstack undercloud install', user='stack') if self.run('rpm -qa openstack-ironic-api')[0].rstrip('\n') == 'openstack-ironic-api-4.2.2-3.el7ost.noarch': LOG.warn('Workaround for BZ1297796') self.run('systemctl start openstack-ironic-api.service') self.add_environment_file(user='stack', filename='stackrc') self.run('heat stack-list', user='stack')
Deploy an undercloud on the host.
def distancemodulus(d): if type(d)==Quantity: x = d.to('pc').value else: x = d if np.size(x)>1: d = np.atleast_1d(x) return 5*np.log10(x/10)
Returns distance modulus given d in parsec.
def xml_row(row, lang): for elem in row: name = elem.get('name') child = elem[0] ftype = re.sub(r'\{[^}]+\}', '', child.tag) if ftype == 'literal': ftype = '{}, {}'.format(ftype, child.attrib.get(XML_LANG, 'none')) yield (name, (child.text, ftype))
Generator for an XML row
def visit_for(self, node): fors = "for %s in %s:\n%s" % ( node.target.accept(self), node.iter.accept(self), self._stmt_list(node.body), ) if node.orelse: fors = "%s\nelse:\n%s" % (fors, self._stmt_list(node.orelse)) return fors
return an astroid.For node as string
def _locate_file(f, base_dir): if base_dir == None: return f file_name = os.path.join(base_dir, f) real = os.path.realpath(file_name) return real
Utility method for finding full path to a filename as string
def create_ssl_context(): ssl_context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) ssl_context.check_hostname = False ssl_context.verify_mode = ssl.CERT_NONE return ssl_context
Create and return SSL Context.
def _FetchRemapping(type_name, mapping_type, python_name=None, json_name=None, mappings=None): if python_name and json_name: raise exceptions.InvalidDataError( 'Cannot specify both python_name and json_name ' 'for %s remapping' % mapping_type) if not (python_name or json_name): raise exceptions.InvalidDataError( 'Must specify either python_name or json_name for %s remapping' % ( mapping_type,)) field_remappings = mappings.get(type_name, {}) if field_remappings: if python_name: return field_remappings.get(python_name) elif json_name: if json_name in list(field_remappings.values()): return [k for k in field_remappings if field_remappings[k] == json_name][0] return None
Common code for fetching a key or value from a remapping dict.
def run_normalization(self): for index, media_file in enumerate( tqdm( self.media_files, desc="File", disable=not self.progress, position=0 )): logger.info("Normalizing file {} ({} of {})".format(media_file, index + 1, self.file_count)) media_file.run_normalization() logger.info("Normalized file written to {}".format(media_file.output_file))
Run the normalization procedures
def run(wf, *, display, n_threads=1): worker = dynamic_exclusion_worker(display, n_threads) return noodles.Scheduler(error_handler=display.error_handler)\ .run(worker, get_workflow(wf))
Run the workflow using the dynamic-exclusion worker.
def show_some(items:Collection, n_max:int=5, sep:str=','): "Return the representation of the first `n_max` elements in `items`." if items is None or len(items) == 0: return '' res = sep.join([f'{o}' for o in items[:n_max]]) if len(items) > n_max: res += '...' return res
Return the representation of the first `n_max` elements in `items`.
def clear(self, startBlock, endBlock): for block in qutepart.iterateBlocksFrom(startBlock): self._setBlockMarked(block, False) if block == endBlock: break
Clear bookmarks on block range including start and end
def _parse_columns(self): column_map = {} for key, value in self.model.keyMap.items(): record_key = key[1:] if record_key: if self.item_key.findall(record_key): pass else: if value['value_datatype'] == 'map': continue datatype = value['value_datatype'] if value['value_datatype'] == 'number': datatype = 'float' if 'integer_data' in value.keys(): if value['integer_data']: datatype = 'integer' replace_key = '' if 'field_metadata' in value.keys(): if 'replace_key' in value['field_metadata'].keys(): if isinstance(value['field_metadata']['replace_key'], str): replace_key = value['field_metadata']['replace_key'] max_length = None if 'max_length' in value.keys(): max_length = value['max_length'] column_map[record_key] = (record_key, datatype, replace_key, max_length) return column_map
a helper method for parsing the column properties from the record schema
def url_for(**options): url_parts = get_url_parts(**options) image_hash = hashlib.md5(b(options['image_url'])).hexdigest() url_parts.append(image_hash) return "/".join(url_parts)
Returns the url for the specified options
def single(wosclient, wos_query, xml_query=None, count=5, offset=1): result = wosclient.search(wos_query, count, offset) xml = _re.sub(' xmlns="[^"]+"', '', result.records, count=1).encode('utf-8') if xml_query: xml = _ET.fromstring(xml) return [el.text for el in xml.findall(xml_query)] else: return _minidom.parseString(xml).toprettyxml()
Perform a single Web of Science query and then XML query the results.
def model(self): if self.z == 0: m = self._model else: if self._internal_wave_unit.physical_type == 'length': rs = self._redshift_model.inverse else: rs = self._redshift_model if self.z_type == 'wavelength_only': m = rs | self._model else: m = rs | self._model | self._redshift_flux_model return m
Model of the spectrum with given redshift.
def format_summary(self): chunks = [chunk.format_chunk_summary() for chunk in self._progress_chunks] return "/".join(chunks)
Generate a summary string for the progress bar.
def cookie_signature(seed, *parts): sha1 = hmac.new(seed, digestmod=hashlib.sha1) for part in parts: if part: sha1.update(part) return sha1.hexdigest()
Generates a cookie signature.
def remove_old_dumps(connection, container: str, days=None): if not days: return if days < 20: LOG.error('A minimum of 20 backups is stored') return options = return_file_objects(connection, container) for dt, o_info in options: now = datetime.datetime.now() delta = now - dt if delta.days > days: LOG.info('Deleting %s', o_info['name']) objectstore.delete_object(connection, container, o_info)
Remove dumps older than x days
def diff(self): if not self.present: if self.existing: return DEL return NOOP is_diff = NOOP if self.present and self.existing: a_obj = self.config.copy() if self.config and diff_dict(a_obj, self.existing, True): is_diff = CHANGED if self.description != self.existing.get('description'): is_diff = CONFLICT elif self.present and not self.existing: is_diff = ADD return is_diff
Determines if changes are needed for the Vault backend
def normalize_path(): whole_path = [ os.path.abspath(path) for path in sys.path if os.path.exists(path) ] whole_set = collections.OrderedDict((("", 1), (os.getcwd(), 1))) for path in whole_path: if path not in whole_set: whole_set[path] = 1 sys.path = list(whole_set) for module_ in sys.modules.values(): try: module_.__path__ = [ os.path.abspath(path) for path in module_.__path__ if _package_exists(path) ] except AttributeError: pass except ImportError: pass
Normalizes sys.path to avoid the use of relative folders
def safe_record(ctx, item): if isinstance(item, basestring): return ctx.env.ref(item) return item
Make sure we get a record instance even if we pass an xmlid.
def _build_text_filter(self): text_filter = TextFilter(logger=self.logger) self.log(u"Created TextFilter object") for key, cls, param_name in [ ( gc.PPN_TASK_IS_TEXT_FILE_IGNORE_REGEX, TextFilterIgnoreRegex, "regex" ), ( gc.PPN_TASK_IS_TEXT_FILE_TRANSLITERATE_MAP, TextFilterTransliterate, "map_file_path" ) ]: cls_name = cls.__name__ param_value = gf.safe_get(self.parameters, key, None) if param_value is not None: self.log([u"Creating %s object...", cls_name]) params = { param_name: param_value, "logger": self.logger } try: inner_filter = cls(**params) text_filter.add_filter(inner_filter) self.log([u"Creating %s object... done", cls_name]) except ValueError as exc: self.log_exc(u"Creating %s object failed" % (cls_name), exc, False, None) return text_filter
Build a suitable TextFilter object.
def embeddedFileCount(self): if self.isClosed or self.isEncrypted: raise ValueError("operation illegal for closed / encrypted doc") return _fitz.Document_embeddedFileCount(self)
Return number of embedded files.
def int_subtype(i, bits, signed) : "returns integer i after checking that it fits in the given number of bits." if not isinstance(i, int) : raise TypeError("value is not int: %s" % repr(i)) if signed : lo = - 1 << bits - 1 hi = (1 << bits - 1) - 1 else : lo = 0 hi = (1 << bits) - 1 if i < lo or i > hi : raise ValueError \ ( "%d not in range of %s %d-bit value" % (i, ("unsigned", "signed")[signed], bits) ) return \ i
returns integer i after checking that it fits in the given number of bits.
def DumpCurrentSchema(cursor): cursor.execute("SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES " "WHERE table_schema = (SELECT DATABASE())") defs = [] for table, in sorted(cursor.fetchall()): cursor.execute("SHOW CREATE TABLE `{}`".format(table)) rows = cursor.fetchall() defs.append(rows[0][1]) return "\n\n".join(defs)
Dumps current database schema.
def _secret_yaml(loader, node): fname = os.path.join(os.path.dirname(loader.name), "secrets.yaml") try: with open(fname, encoding="utf-8") as secret_file: secrets = YAML(typ="safe").load(secret_file) except FileNotFoundError: raise ValueError("Secrets file {} not found".format(fname)) from None try: return secrets[node.value] except KeyError: raise ValueError("Secret {} not found".format(node.value)) from None
Load secrets and embed it into the configuration YAML.
def experiment(self): if self._experiment is None: self._experiment = list(self.config.experiments.keys())[-1] return self._experiment
The identifier or the experiment that is currently processed
def main(argv): del argv engine = make_gtp_instance(FLAGS.load_file, cgos_mode=FLAGS.cgos_mode, kgs_mode=FLAGS.kgs_mode, minigui_mode=FLAGS.minigui_mode) dbg("GTP engine ready\n") for msg in sys.stdin: if not engine.handle_msg(msg.strip()): break
Run Minigo in GTP mode.
def no_content_response(response): "Cautious assessment of the response body for no content." if not hasattr(response, '_container'): return True if response._container is None: return True if isinstance(response._container, (list, tuple)): if len(response._container) == 1 and not response._container[0]: return True return False
Cautious assessment of the response body for no content.
def setOrga(request, hproPk=None): if settings.PIAPI_STANDALONE: request.session['plugit-standalone-organame'] = request.GET.get('name') request.session['plugit-standalone-orgapk'] = request.GET.get('pk') else: (_, _, hproject) = getPlugItObject(hproPk) from organizations.models import Organization orga = get_object_or_404(Organization, pk=request.GET.get('orga')) if request.user.is_superuser or orga.isMember(request.user) or orga.isOwner(request.user): request.session['plugit-orgapk-' + str(hproject.pk)] = orga.pk return HttpResponse('')
Change the current orga
def _html(self, text): html = URL_REGEX.sub(self._parse_urls, text) html = USERNAME_REGEX.sub(self._parse_users, html) html = LIST_REGEX.sub(self._parse_lists, html) return HASHTAG_REGEX.sub(self._parse_tags, html)
Parse a Tweet and generate HTML.
def function_parser(function, parser): parser.set_defaults(func=function) help_text = inspect.getdoc(function) main_text, params_help = parser_help_text(help_text) args, varargs, keywords, defaults = inspect.getargspec(function) if args is None: args = [] if defaults is None: defaults = [] if len(args) and args[0] == 'self': args.pop(0) num_required_args = len(args) - len(defaults) for idx, arg in enumerate(args): if idx < num_required_args: arg_name, arg_params = calculate_default_type(arg, False, None, params_help) else: default_value = defaults[idx - num_required_args] arg_name, arg_params = calculate_default_type(arg, True, default_value, params_help) parser.add_argument(arg_name, **arg_params)
This function parses a function and adds its arguments to the supplied parser
def _repair_column(self): check_for_title = True for column_index in range(self.start[1], self.end[1]): table_column = TableTranspose(self.table)[column_index] column_start = table_column[self.start[0]] if check_for_title and is_empty_cell(column_start): self._stringify_column(column_index) elif (isinstance(column_start, basestring) and re.search(allregex.year_regex, column_start)): self._check_stringify_year_column(column_index) else: check_for_title = False
Same as _repair_row but for columns.
def export_serving(model_path): pred_config = PredictConfig( session_init=get_model_loader(model_path), model=InferenceOnlyModel(), input_names=['input_img_bytes'], output_names=['prediction_img_bytes']) ModelExporter(pred_config).export_serving('/tmp/exported')
Export trained model to use it in TensorFlow Serving or cloudML.
def _collect_state_names(self, variable): "Return a list of states that the variable takes in the data" states = sorted(list(self.data.ix[:, variable].dropna().unique())) return states
Return a list of states that the variable takes in the data
def ensure_unicode(text): u if isinstance(text, str): try: return text.decode(pyreadline_codepage, u"replace") except (LookupError, TypeError): return text.decode(u"ascii", u"replace") return text
u"""helper to ensure that text passed to WriteConsoleW is unicode
def load_stats(self, cache=None, wait=None): if cache is None: cache = not self.debug if wait is None: wait = self.debug if not cache or self._stats is None: self._stats = self._load_stats() start = time.time() while wait and self._stats.get('status') == 'compiling': if self.timeout and (time.time() - start > self.timeout): raise RuntimeError("Webpack {0!r} timed out while compiling" .format(self.stats_file.path)) time.sleep(0.1) self._stats = self._load_stats() return self._stats
Load and cache the webpack-stats file
def blocks(self, lines): state = markdown.blockparser.State() blocks = [] state.set('start') currblock = 0 for line in lines: line += '\n' if state.isstate('start'): if line[:3] == '```': state.set('```') else: state.set('\n') blocks.append('') currblock = len(blocks) - 1 else: marker = line[:3] if state.isstate(marker): state.reset() blocks[currblock] += line return blocks
Groups lines into markdown blocks
def _create_attach_records(self, ids, attributes): records = [] timed = self._has_pivot_column(self.created_at()) or self._has_pivot_column( self.updated_at() ) for key, value in enumerate(ids): records.append(self._attacher(key, value, attributes, timed)) return records
Create a list of records to insert into the pivot table.
def prioritize(self, item, force=False): with self.condition: if item in self.working or item in self.force: return self.queue.remove(item) if force: self.force.append(item) else: self.queue.appendleft(item) self.condition.notify_all()
Moves the item to the very left of the queue.
async def command(dev, service, method, parameters): params = None if parameters is not None: params = ast.literal_eval(parameters) click.echo("Calling %s.%s with params %s" % (service, method, params)) res = await dev.raw_command(service, method, params) click.echo(res)
Run a raw command.
def meminfo(): f = open("/proc/meminfo") hwinfo = {} for line in f.readlines(): meml = line.split() if (meml[0] == "MemTotal:"): mem = int(meml[1]) hwinfo["Mem_MiB"] = mem/1024 elif (meml[0] == "SwapTotal:"): swap = int(meml[1]) hwinfo["Swap_MiB"] = swap/1024 f.close() return hwinfo
Get the amount of memory and swap, Mebibytes
def record_event(self, event): with open(self._path, 'a') as file_: file_.write(str(event) + '\n')
Records the ``KindleEvent`` `event` in the store
def _parallel_predict(estimators, estimators_features, X, n_classes, combination, estimators_weight): n_samples = X.shape[0] pred = np.zeros((n_samples, n_classes)) n_estimators = len(estimators) for estimator, features, weight in zip(estimators, estimators_features, estimators_weight): predictions = estimator.predict(X[:, features]) for i in range(n_samples): if combination == 'weighted_voting': pred[i, int(predictions[i])] += 1 * weight else: pred[i, int(predictions[i])] += 1 return pred
Private function used to compute predictions within a job.
def kill(self, jid): greenlet = self.greenlets.get(jid) if greenlet is not None: logger.warn('Lost ownership of %s' % jid) greenlet.kill()
Stop the greenlet processing the provided jid
def process_environment_settings(default_dictionary: dict, settings: typing.Optional[dict]=None, presets: typing.Optional[dict]=None): settings = settings if settings is not None else {} presets = presets if presets is not None else {} env_keys = sorted(set(default_dictionary.keys()) | set(presets.keys())) result_dict = {} for key in env_keys: if key in default_dictionary: new_dict = default_dictionary[key].copy() else: new_dict = {} new_dict.update(settings) if key in presets: new_dict.update(presets[key]) result_dict[key] = new_dict return result_dict
Process a dictionary of env settings
def _log_app_data(self): if self.install_json: app_commit_hash = self.install_json.get('commitHash') app_features = ','.join(self.install_json.get('features', [])) app_min_ver = self.install_json.get('minServerVersion', 'N/A') app_name = self.install_json.get('displayName') app_runtime_level = self.install_json.get('runtimeLevel') app_version = self.install_json.get('programVersion') self.log.info(u'App Name: {}'.format(app_name)) if app_features: self.log.info(u'App Features: {}'.format(app_features)) self.log.info(u'App Minimum ThreatConnect Version: {}'.format(app_min_ver)) self.log.info(u'App Runtime Level: {}'.format(app_runtime_level)) self.log.info(u'App Version: {}'.format(app_version)) if app_commit_hash is not None: self.log.info(u'App Commit Hash: {}'.format(app_commit_hash))
Log the App data information.
def sorted(self, wantdirs=False): def add_dir(dirs, d): dirs.add(d) logger.debug('add_dir added %s', d) if d != self.base: parent, _ = os.path.split(d) assert parent not in ('', '/') add_dir(dirs, parent) result = set(self.files) if wantdirs: dirs = set() for f in result: add_dir(dirs, os.path.dirname(f)) result |= dirs return [os.path.join(*path_tuple) for path_tuple in sorted(os.path.split(path) for path in result)]
Return sorted files in directory order
def update_rbac_policy(self, rbac_policy_id, body=None): return self.put(self.rbac_policy_path % rbac_policy_id, body=body)
Update a RBAC policy.
def plantloopfieldlists(data): objkey = 'plantloop'.upper() numobjects = len(data.dt[objkey]) return [[ 'Name', 'Plant Side Inlet Node Name', 'Plant Side Outlet Node Name', 'Plant Side Branch List Name', 'Demand Side Inlet Node Name', 'Demand Side Outlet Node Name', 'Demand Side Branch List Name']] * numobjects
return the plantloopfield list
def _compute_dlt(self): res = super()._compute_dlt() for rec in self: ltaf_to_apply = self.env['ddmrp.adjustment'].search( rec._ltaf_to_apply_domain()) if ltaf_to_apply: ltaf = 1 values = ltaf_to_apply.mapped('value') for val in values: ltaf *= val prev = rec.dlt rec.dlt *= ltaf _logger.debug( "LTAF=%s applied to %s. DLT: %s -> %s" % (ltaf, rec.name, prev, rec.dlt)) return res
Apply Lead Time Adj Factor if existing
def load(self, fileobj): for loader in (pickle.load, json.load, csv.reader): fileobj.seek(0) try: return self.initial_update(loader(fileobj)) except Exception as e: pass raise ValueError('File not in a supported format')
Load the dict from the file object
def _lob_start_handler(c, ctx): assert c == _OPEN_BRACE c, self = yield trans = ctx.immediate_transition(self) quotes = 0 while True: if c in _WHITESPACE: if quotes > 0: _illegal_character(c, ctx) elif c == _DOUBLE_QUOTE: if quotes > 0: _illegal_character(c, ctx) ctx.set_ion_type(IonType.CLOB).set_unicode(quoted_text=True) yield ctx.immediate_transition(_short_string_handler(c, ctx)) elif c == _SINGLE_QUOTE: if not quotes: ctx.set_ion_type(IonType.CLOB).set_unicode(quoted_text=True) quotes += 1 if quotes == 3: yield ctx.immediate_transition(_long_string_handler(c, ctx)) else: yield ctx.immediate_transition(_blob_end_handler(c, ctx)) c, _ = yield trans
Handles tokens that begin with two open braces.
def combination_memo(n, r): memo = {} def recur(n, r): if n == r or r == 0: return 1 if (n, r) not in memo: memo[(n, r)] = recur(n - 1, r - 1) + recur(n - 1, r) return memo[(n, r)] return recur(n, r)
This function calculates nCr using memoization method.
def _is_broken_ref(key1, value1, key2, value2): if key1 != 'Link' or key2 != 'Str': return False n = 0 if _PANDOCVERSION < '1.16' else 1 if isinstance(value1[n][0]['c'], list): return False s = value1[n][0]['c'] + value2 return True if _REF.match(s) else False
True if this is a broken reference; False otherwise.
def _to_solr(self, data): return self._dest.index_json(self._dest_coll, json.dumps(data,sort_keys=True))
Sends data to a Solr instance.
def defvar(varname): if 'pyraf' in sys.modules: from pyraf import iraf else: iraf = None if iraf: _irafdef = iraf.envget(varname) else: _irafdef = 0 return varname in _varDict or varname in os.environ or _irafdef
Returns true if CL variable is defined.
def _get_agg_font(self, prop): if __debug__: verbose.report('RendererAgg._get_agg_font', 'debug-annoying') key = hash(prop) font = RendererAgg._fontd.get(key) if font is None: fname = findfont(prop) font = RendererAgg._fontd.get(fname) if font is None: font = FT2Font( str(fname), hinting_factor=rcParams['text.hinting_factor']) RendererAgg._fontd[fname] = font RendererAgg._fontd[key] = font font.clear() size = prop.get_size_in_points() font.set_size(size, self.dpi) return font
Get the font for text instance t, cacheing for efficiency
def to_json(self): result = super(Space, self).to_json() result.update({'name': self.name}) return result
Returns the JSON representation of the space.
def _get_metadata_as_string(self): metalist = [] for metaname, meta in iteritems(self.metadata): message = "Single value in metadata dictionary should be a list!" assert isinstance(meta, list), message for data in meta: if data: metalist.append("!%s_%s = %s" % (self.geotype.capitalize(), metaname, data)) return "\n".join(metalist)
Get the metadata as SOFT formatted string.
def _repr_values(self): def getattr_better(obj, field): try: return getattr(obj, field) except AttributeError as e: try: return getattr(obj, '_' + field) except AttributeError: raise e return (getattr_better(self, attr) for attr in self._repr_attributes)
Return values that are to be shown in repr string.
def extract(self, file_path): import tarfile print('Extracting {}'.format(file_path)) if not os.path.exists(self.extracted_data_directory): os.makedirs(self.extracted_data_directory) def track_progress(members): sys.stdout.write('.') for member in members: yield member with tarfile.open(file_path) as tar: tar.extractall(path=self.extracted_data_directory, members=track_progress(tar)) self.chatbot.logger.info('File extracted to {}'.format(self.extracted_data_directory)) return True
Extract a tar file at the specified file path.
def init(self): self.target.halt() self.target.reset_and_halt() result = self._call_function_and_wait(self.flash_algo['pc_init'], init=True) if result != 0: logging.error('init error: %i', result) self.erase_sector(0x01000000) time.sleep(.5) self.target.dp.reset() time.sleep(1.3) self.target.dp.init() self.target.dp.power_up_debug() self.target.halt() self.target.reset_and_halt() result = self._call_function_and_wait(self.flash_algo['pc_init'], init=True) if result != 0: logging.error('init error: %i', result)
Download the flash algorithm in RAM
def extern_store_utf8(self, context_handle, utf8_ptr, utf8_len): c = self._ffi.from_handle(context_handle) return c.to_value(self._ffi.string(utf8_ptr, utf8_len).decode('utf-8'))
Given a context and UTF8 bytes, return a new Handle to represent the content.
def start_process(self, key): if key in self.processes and key in self.paused: os.killpg(os.getpgid(self.processes[key].pid), signal.SIGCONT) self.queue[key]['status'] = 'running' self.paused.remove(key) return True elif key not in self.processes: if self.queue[key]['status'] in ['queued', 'stashed']: self.spawn_new(key) return True return False
Start a specific processes.
def remove_callback(obj, handle): callbacks = obj._callbacks if callbacks is handle: obj._callbacks = None elif isinstance(callbacks, dllist): callbacks.remove(handle) if not callbacks: obj._callbacks = None
Remove a callback from an object.
def constraints(self, chunk): a = [self._map1[w.index] for w in chunk.words if w.index in self._map1] b = []; [b.append(constraint) for constraint in a if constraint not in b] return b
Returns a list of constraints that match the given Chunk.
def inverse(self): if self._inverse is None: self._inverse = InverseTransform(self) return self._inverse
The inverse of this transform.
def _set_data(self, data): if type(data) == bytearray: self._data = data elif type(data) == str: if sys.version_info < (3,): self._data = bytearray(data) else: self._data = bytearray(data.encode('ISO-8859-1')) elif type(data) == list or type(data) == tuple: self._data = bytearray(data) elif sys.version_info >= (3,) and type(data) == bytes: self._data = bytearray(data) else: raise Exception('Data must be bytearray, string, list or tuple,' ' not {}'.format(type(data)))
Set the packet data
def ip_registrant_monitor(self, query, days_back=0, search_type="all", server=None, country=None, org=None, page=1, include_total_count=False, **kwargs): return self._results('ip-registrant-monitor', '/v1/ip-registrant-monitor', query=query, days_back=days_back, search_type=search_type, server=server, country=country, org=org, page=page, include_total_count=include_total_count, **kwargs)
Query based on free text query terms
def deploy(verbose, app): config = PsiturkConfig() config.load_config() config.set("Experiment Configuration", "mode", "deploy") config.set("Server Parameters", "logfile", "-") config.set("Shell Parameters", "launch_in_sandbox_mode", "false") deploy_sandbox_shared_setup(verbose=verbose, app=app)
Deploy app using Heroku to MTurk.
def decode_signature(sigb64): sig_bin = base64.b64decode(sigb64) if len(sig_bin) != 64: raise ValueError("Invalid base64 signature") sig_hex = sig_bin.encode('hex') sig_r = int(sig_hex[:64], 16) sig_s = int(sig_hex[64:], 16) return sig_r, sig_s
Decode a signature into r, s
def currentVersion(self): if self._currentVersion is None: self.__init(self._url) return self._currentVersion
returns the current version of the site
def _is_complex(pe): val = isinstance(pe, _bp('Complex')) or \ isinstance(pe, _bpimpl('Complex')) return val
Return True if the physical entity is a complex
def normalise_key(self, key): key = key.replace('-', '_') if key.startswith("noy_"): key = key[4:] return key
Make sure key is a valid python attribute
def start_batch(job, input_args): samples = parse_sra(input_args['sra']) job.addChildJobFn(download_and_transfer_sample, input_args, samples, cores=1, disk='30')
This function will administer 5 jobs at a time then recursively call itself until subset is empty
def annotatedcore(self): logging.info('Calculating annotated core') self.total_core() for sample in self.metadata: if sample.general.bestassemblyfile != 'NA': sample[self.analysistype].coreset = set() if sample.general.referencegenus == 'Escherichia': self.runmetadata.samples.append(sample) try: report = sample[self.analysistype].report self.blastparser(report=report, sample=sample, fieldnames=self.fieldnames) except KeyError: sample[self.analysistype].coreset = list() self.reporter()
Calculates the core genome of organisms using custom databases
def period( start, end, absolute=False ): return Period(start, end, absolute=absolute)
Create a Period instance.
def _save_cache(self): safe_makedirs(self.cache_dir) try: with open(self.cache_file, 'wb') as f: pickle.dump(self.data, f) except Exception as e: logger.error("Cannot write version to cache file {} ({})".format(self.cache_file, e))
Save data to the cache file.
def GoZero(self, speed): ' Go to Zero position ' self.ReleaseSW() spi.SPI_write_byte(self.CS, 0x82 | (self.Dir & 1)) spi.SPI_write_byte(self.CS, 0x00) spi.SPI_write_byte(self.CS, speed) while self.IsBusy(): pass time.sleep(0.3) self.ReleaseSW()
Go to Zero position
def _handle_single_tag_end(self): stack = self._stack depth = 1 for index, token in enumerate(stack[2:], 2): if isinstance(token, tokens.TagOpenOpen): depth += 1 elif isinstance(token, tokens.TagCloseOpen): depth -= 1 if depth == 0: break elif isinstance(token, tokens.TagCloseSelfclose): depth -= 1 if depth == 0: raise ParserError( "_handle_single_tag_end() got an unexpected " "TagCloseSelfclose") else: raise ParserError("_handle_single_tag_end() missed a TagCloseOpen") padding = stack[index].padding stack[index] = tokens.TagCloseSelfclose(padding=padding, implicit=True) return self._pop()
Handle the stream end when inside a single-supporting HTML tag.
def match(self, *args): self.fall = self.fall or not args self.fall = self.fall or (self.value in args) return self.fall
Whether or not to enter a given case statement
def read(self): if self.lines and self.chunksize: obj = concat(self) elif self.lines: data = to_str(self.data) obj = self._get_object_parser( self._combine_lines(data.split('\n')) ) else: obj = self._get_object_parser(self.data) self.close() return obj
Read the whole JSON input into a pandas object.
def api_notifications(): event_type = request.values['Event.1.EventType'] assignment_id = request.values['Event.1.AssignmentId'] db.logger.debug('rq: Queueing %s with id: %s for worker_function', event_type, assignment_id) q.enqueue(worker_function, event_type, assignment_id, None) db.logger.debug('rq: Submitted Queue Length: %d (%s)', len(q), ', '.join(q.job_ids)) return success_response(request_type="notification")
Receive MTurk REST notifications.
def _parse_certificate(cls, response): links = _parse_header_links(response) try: cert_chain_uri = links[u'up'][u'url'] except KeyError: cert_chain_uri = None return ( response.content() .addCallback( lambda body: messages.CertificateResource( uri=cls._maybe_location(response), cert_chain_uri=cert_chain_uri, body=body)) )
Parse a response containing a certificate resource.
def process_exception(self, request, exception): log_format = self._get_log_format(request) if log_format is None: return params = self._get_parameters_from_request(request, True) params['message'] = exception params['http_status'] = '-' self.OPERATION_LOG.info(log_format, params)
Log error info when exception occurred.
def start(cls): if cls._thread is None: cls._thread = threading.Thread(target=cls._run, name="Heartbeat") cls._thread.daemon = True cls._thread.start()
Start background thread if not already started
def open_fd(cls, name): try: return os.open(name, os.O_CREAT | os.O_RDWR | os.O_EXCL) except OSError as e: if e.errno != errno.EEXIST: raise return os.open(name, os.O_RDWR | os.O_EXCL)
Open a file or create it.
def format(self, formatstring, *args): if self.incoming_section: self.SendMessage(['s', {'name': args}]) self.incoming_section = False
Presentation Information from the Plugin
def count_elements_exactly_by_selector(self, number, selector): elems = find_elements_by_jquery(world.browser, selector) number = int(number) if len(elems) != number: raise AssertionError("Expected {} elements, found {}".format( number, len(elems)))
Assert n elements exist matching the given selector.
def iterdirty(self): return iter(chain(itervalues(self._new), itervalues(self._modified)))
Ordered iterator over dirty elements.