code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
async def stations(self): data = await self.retrieve(API_DISTRITS) Station = namedtuple('Station', ['latitude', 'longitude', 'idAreaAviso', 'idConselho', 'idDistrito', 'idRegiao', 'globalIdLocal', 'local']) _stations = [] for station in data['data']: _station = Station( self._to_number(station['latitude']), self._to_number(station['longitude']), station['idAreaAviso'], station['idConcelho'], station['idDistrito'], station['idRegiao'], station['globalIdLocal']//100 * 100, station['local'], ) _stations.append(_station) return _stations
Retrieve stations.
def limit_range_for_scale(self, vmin, vmax, minpos): vmin_bound = self._transform.transform_non_affine(0) vmax_bound = self._transform.transform_non_affine(self._transform.M) vmin = max(vmin, vmin_bound) vmax = min(vmax, vmax_bound) return vmin, vmax
Return minimum and maximum bounds for the logicle axis. Parameters ---------- vmin : float Minimum data value. vmax : float Maximum data value. minpos : float Minimum positive value in the data. Ignored by this function. Return ------ float Minimum axis bound. float Maximum axis bound.
def populateMainMenu(self, parentMenu): parentMenu.addAction("Configure", self.configure) parentMenu.addAction("Collect garbage", self.__collectGarbage)
Populates the main menu. The main menu looks as follows: Plugins - Plugin manager (fixed item) - Separator (fixed item) - <Plugin #1 name> (this is the parentMenu passed) ... If no items were populated by the plugin then there will be no <Plugin #N name> menu item shown. It is suggested to insert plugin configuration item here if so.
def release(self, lock_transactions=None): self.personal_lock.release() self.with_count -= 1 if lock_transactions is None: lock_transactions = self.lock_transactions if not lock_transactions: self.db_state.lock.release() return try: in_transaction = self.in_transaction except sqlite3.ProgrammingError: in_transaction = False if (self.was_in_transaction and not in_transaction) or not in_transaction: if self.with_count == 0: self.db_state.active_connection = None self.db_state.transaction_lock.release() self.db_state.lock.release()
Release the connection locks. :param lock_transactions: `bool`, release the transaction lock (`self.lock_transactions` is the default value)
def ida_connect(host='localhost', port=18861, retry=10): for i in range(retry): try: LOG.debug('Connectint to %s:%d, try %d...', host, port, i + 1) link = rpyc_classic.connect(host, port) link.eval('2 + 2') except socket.error: time.sleep(1) continue else: LOG.debug('Connected to %s:%d', host, port) return link raise IDALinkError("Could not connect to %s:%d after %d tries" % (host, port, retry))
Connect to an instance of IDA running our server.py. :param host: The host to connect to :param port: The port to connect to :param retry: How many times to try after errors before giving up
def __create_rec(*args, **kwargs): uid = args[0] kind = args[1] post_data = kwargs['post_data'] try: TabWiki.create( uid=uid, title=post_data['title'].strip(), date=datetime.datetime.now(), cnt_html=tools.markdown2html(post_data['cnt_md']), time_create=tools.timestamp(), user_name=post_data['user_name'], cnt_md=tornado.escape.xhtml_escape(post_data['cnt_md']), time_update=tools.timestamp(), view_count=1, kind=kind, ) return True except: return False
Create the record.
def _exit_handler(self): if os.path.isfile(self.cleanup_file): with open(self.cleanup_file, "a") as myfile: myfile.write("rm " + self.cleanup_file + "\n") os.chmod(self.cleanup_file, 0o755) if not self._has_exit_status: print("Pipeline status: {}".format(self.status)) self.fail_pipeline(Exception("Pipeline failure. See details above.")) if self.tee: self.tee.kill()
This function I register with atexit to run whenever the script is completing. A catch-all for uncaught exceptions, setting status flag file to failed.
def update_options(self) -> None: options = hydpy.pub.options for option in self.find('options'): value = option.text if value in ('true', 'false'): value = value == 'true' setattr(options, strip(option.tag), value) options.printprogress = False options.printincolor = False
Update the |Options| object available in module |pub| with the values defined in the `options` XML element. >>> from hydpy.auxs.xmltools import XMLInterface >>> from hydpy import data, pub >>> interface = XMLInterface('single_run.xml', data.get_path('LahnH')) >>> pub.options.printprogress = True >>> pub.options.printincolor = True >>> pub.options.reprdigits = -1 >>> pub.options.utcoffset = -60 >>> pub.options.ellipsis = 0 >>> pub.options.warnsimulationstep = 0 >>> interface.update_options() >>> pub.options Options( autocompile -> 1 checkseries -> 1 dirverbose -> 0 ellipsis -> 0 forcecompiling -> 0 printprogress -> 0 printincolor -> 0 reprcomments -> 0 reprdigits -> 6 skipdoctests -> 0 trimvariables -> 1 usecython -> 1 usedefaultvalues -> 0 utcoffset -> 60 warnmissingcontrolfile -> 0 warnmissingobsfile -> 1 warnmissingsimfile -> 1 warnsimulationstep -> 0 warntrim -> 1 flattennetcdf -> True isolatenetcdf -> True timeaxisnetcdf -> 0 ) >>> pub.options.printprogress = False >>> pub.options.reprdigits = 6
def _get_agent_key(self, proxy=None): if self._proxy is None: self._proxy = proxy if self._proxy is not None and self._proxy.has_effective_agent(): agent_key = self._proxy.get_effective_agent_id() else: agent_key = None if agent_key not in self._provider_sessions: self._provider_sessions[agent_key] = dict() return agent_key
Gets an agent key for session management. Side effect of setting a new proxy if one is sent along, and initializing the provider session map if agent key has not been seen before
def get_migrations(path): pattern = re.compile(r"\d+_[\w\d]+") modules = [name for _, name, _ in pkgutil.iter_modules([path]) if pattern.match(name) ] return sorted(modules, key=lambda name: int(name.split("_")[0]))
In the specified directory, get all the files which match the pattern 0001_migration.py
def private_props(obj): props = [item for item in dir(obj)] priv_props = [_PRIVATE_PROP_REGEXP.match(item) for item in props] call_props = [callable(getattr(obj, item)) for item in props] iobj = zip(props, priv_props, call_props) for obj_name in [prop for prop, priv, call in iobj if priv and (not call)]: yield obj_name
Yield private properties of an object. A private property is defined as one that has a single underscore (:code:`_`) before its name :param obj: Object :type obj: object :returns: iterator
def RdatabasesBM(host=rbiomart_host): biomaRt = importr("biomaRt") print(biomaRt.listMarts(host=host))
Lists BioMart databases through a RPY2 connection. :param host: address of the host server, default='www.ensembl.org' :returns: nothing
def attention_lm_small(): hparams = attention_lm_base() hparams.num_hidden_layers = 4 hparams.hidden_size = 512 hparams.filter_size = 2048 hparams.layer_prepostprocess_dropout = 0.5 return hparams
Cheap model. on lm1b_32k: 45M params 2 steps/sec on [GeForce GTX TITAN X] Returns: an hparams object.
def _crop_default(x, size, row_pct:uniform=0.5, col_pct:uniform=0.5): "Crop `x` to `size` pixels. `row_pct`,`col_pct` select focal point of crop." rows,cols = tis2hw(size) row_pct,col_pct = _minus_epsilon(row_pct,col_pct) row = int((x.size(1)-rows+1) * row_pct) col = int((x.size(2)-cols+1) * col_pct) return x[:, row:row+rows, col:col+cols].contiguous()
Crop `x` to `size` pixels. `row_pct`,`col_pct` select focal point of crop.
def get_stop_times(feed: "Feed", date: Optional[str] = None) -> DataFrame: f = feed.stop_times.copy() if date is None: return f g = feed.get_trips(date) return f[f["trip_id"].isin(g["trip_id"])]
Return a subset of ``feed.stop_times``. Parameters ---------- feed : Feed date : string YYYYMMDD date string restricting the output to trips active on the date Returns ------- DataFrame Subset of ``feed.stop_times`` Notes ----- Assume the following feed attributes are not ``None``: - ``feed.stop_times`` - Those used in :func:`.trips.get_trips`
def AddCampaign(self, client_customer_id, campaign_name, ad_channel_type, budget): self.client.SetClientCustomerId(client_customer_id) campaign_service = self.client.GetService('CampaignService') budget_id = self.AddBudget(client_customer_id, budget) operations = [{ 'operator': 'ADD', 'operand': { 'name': campaign_name, 'status': 'PAUSED', 'biddingStrategyConfiguration': { 'biddingStrategyType': 'MANUAL_CPC', 'biddingScheme': { 'xsi_type': 'ManualCpcBiddingScheme', 'enhancedCpcEnabled': 'false' } }, 'budget': { 'budgetId': budget_id }, 'advertisingChannelType': ad_channel_type } }] campaign_service.mutate(operations)
Add a Campaign to the client account. Args: client_customer_id: str Client Customer Id to use when creating Campaign. campaign_name: str Name of the campaign to be added. ad_channel_type: str Primary serving target the campaign's ads. budget: str a budget amount (in micros) to use.
def visit_UnaryOperation(self, node): if node.op.nature == Nature.PLUS: return +self.visit(node.right) elif node.op.nature == Nature.MINUS: return -self.visit(node.right) elif node.op.nature == Nature.NOT: return Bool(not self.visit(node.right))
Visitor for `UnaryOperation` AST node.
def fastq_to_csv(in_file, fastq_format, work_dir): out_file = "%s.csv" % (os.path.splitext(os.path.basename(in_file))[0]) out_file = os.path.join(work_dir, out_file) if not (os.path.exists(out_file) and os.path.getsize(out_file) > 0): with open(in_file) as in_handle: with open(out_file, "w") as out_handle: writer = csv.writer(out_handle) for rec in SeqIO.parse(in_handle, fastq_format): writer.writerow([rec.id] + rec.letter_annotations["phred_quality"]) return out_file
Convert a fastq file into a CSV of phred quality scores.
def create_readable_dir(entry, section, domain, output): if domain != 'viral': full_output_dir = os.path.join(output, 'human_readable', section, domain, get_genus_label(entry), get_species_label(entry), get_strain_label(entry)) else: full_output_dir = os.path.join(output, 'human_readable', section, domain, entry['organism_name'].replace(' ', '_'), get_strain_label(entry, viral=True)) try: os.makedirs(full_output_dir) except OSError as err: if err.errno == errno.EEXIST and os.path.isdir(full_output_dir): pass else: raise return full_output_dir
Create the a human-readable directory to link the entry to if needed.
def _set_es_workers(self, **kwargs): def make_es_worker(search_conn, es_index, es_doc_type, class_name): new_esbase = copy.copy(search_conn) new_esbase.es_index = es_index new_esbase.doc_type = es_doc_type log.info("Indexing '%s' into ES index '%s' doctype '%s'", class_name.pyuri, es_index, es_doc_type) return new_esbase def additional_indexers(rdf_class): rtn_list = rdf_class.es_indexers() rtn_list.remove(rdf_class) return rtn_list self.es_worker = make_es_worker(self.search_conn, self.es_index, self.es_doc_type, self.rdf_class.__name__) if not kwargs.get("idx_only_base"): self.other_indexers = {item.__name__: make_es_worker( self.search_conn, item.es_defs.get('kds_esIndex')[0], item.es_defs.get('kds_esDocType')[0], item.__name__) for item in additional_indexers(self.rdf_class)} else: self.other_indexers = {}
Creates index worker instances for each class to index kwargs: ------- idx_only_base[bool]: True will only index the base class
def deletefile(self, project_id, file_path, branch_name, commit_message): data = { 'file_path': file_path, 'branch_name': branch_name, 'commit_message': commit_message } request = requests.delete( '{0}/{1}/repository/files'.format(self.projects_url, project_id), headers=self.headers, data=data, verify=self.verify_ssl, auth=self.auth, timeout=self.timeout) return request.status_code == 200
Deletes existing file in the repository :param project_id: project id :param file_path: Full path to new file. Ex. lib/class.rb :param branch_name: The name of branch :param commit_message: Commit message :return: true if success, false if not
def list_collections(self, session=None, filter=None, **kwargs): if filter is not None: kwargs['filter'] = filter read_pref = ((session and session._txn_read_preference()) or ReadPreference.PRIMARY) def _cmd(session, server, sock_info, slave_okay): return self._list_collections( sock_info, slave_okay, session, read_preference=read_pref, **kwargs) return self.__client._retryable_read( _cmd, read_pref, session)
Get a cursor over the collectons of this database. :Parameters: - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. - `filter` (optional): A query document to filter the list of collections returned from the listCollections command. - `**kwargs` (optional): Optional parameters of the `listCollections command <https://docs.mongodb.com/manual/reference/command/listCollections/>`_ can be passed as keyword arguments to this method. The supported options differ by server version. :Returns: An instance of :class:`~pymongo.command_cursor.CommandCursor`. .. versionadded:: 3.6
def _pseudoinverse(self, A, tol=1.0e-10): return np.linalg.pinv(A, rcond=tol)
Compute the Moore-Penrose pseudoinverse, wraps np.linalg.pinv REQUIRED ARGUMENTS A (np KxK matrix) - the square matrix whose pseudoinverse is to be computed RETURN VALUES Ainv (np KxK matrix) - the pseudoinverse OPTIONAL VALUES tol - the tolerance (relative to largest magnitude singlular value) below which singular values are to not be include in forming pseudoinverse (default: 1.0e-10) NOTES In previous versions of pymbar / Numpy, we wrote our own pseudoinverse because of a bug in Numpy.
def softmax_cross_entropy_with_logits(sentinel=None, labels=None, logits=None, dim=-1): if sentinel is not None: name = "softmax_cross_entropy_with_logits" raise ValueError("Only call `%s` with " "named arguments (labels=..., logits=..., ...)" % name) if labels is None or logits is None: raise ValueError("Both labels and logits must be provided.") try: f = tf.nn.softmax_cross_entropy_with_logits_v2 except AttributeError: raise RuntimeError("This version of TensorFlow is no longer supported. See cleverhans/README.md") labels = tf.stop_gradient(labels) loss = f(labels=labels, logits=logits, dim=dim) return loss
Wrapper around tf.nn.softmax_cross_entropy_with_logits_v2 to handle deprecated warning
def index(request, template_name='staffmembers/index.html'): return render_to_response(template_name, {'staff': StaffMember.objects.active()}, context_instance=RequestContext(request))
The list of employees or staff members
def file_sign( blockchain_id, hostname, input_path, passphrase=None, config_path=CONFIG_PATH, wallet_keys=None ): config_dir = os.path.dirname(config_path) key_info = file_key_lookup( blockchain_id, 0, hostname, config_path=config_path, wallet_keys=wallet_keys ) if 'error' in key_info: return {'error': 'Failed to lookup encryption key'} res = blockstack_gpg.gpg_sign( input_path, key_info, config_dir=config_dir ) if 'error' in res: log.error("Failed to encrypt: %s" % res['error']) return {'error': 'Failed to encrypt'} return {'status': True, 'sender_key_id': key_info['key_id'], 'sig': res['sig']}
Sign a file with the current blockchain ID's host's public key. @config_path should be for the *client*, not blockstack-file Return {'status': True, 'sender_key_id': ..., 'sig': ...} on success, and write ciphertext to output_path Return {'error': ...} on error
def to_weld_vec(weld_type, ndim): for i in range(ndim): weld_type = WeldVec(weld_type) return weld_type
Convert multi-dimensional data to WeldVec types. Parameters ---------- weld_type : WeldType WeldType of data. ndim : int Number of dimensions. Returns ------- WeldVec WeldVec of 1 or more dimensions.
def download(request): f = FileUpload() f.title = request.GET['title'] or 'untitled' f.description = request.GET['description'] url = urllib.unquote(request.GET['photo']) file_content = urllib.urlopen(url).read() file_name = url.split('/')[-1] f.save_upload_file(file_name, file_content) f.save() return HttpResponse('%s' % (f.id))
Saves image from URL and returns ID for use with AJAX script
def load_factor(ts, resolution=None, norm=None): if norm is None: norm = ts.max() if resolution is not None: ts = ts.resample(rule=resolution).mean() lf = ts / norm return lf
Calculate the ratio of input vs. norm over a given interval. Parameters ---------- ts : pandas.Series timeseries resolution : str, optional interval over which to calculate the ratio default: resolution of the input timeseries norm : int | float, optional denominator of the ratio default: the maximum of the input timeseries Returns ------- pandas.Series
def _sort(self): self.__dict__['_z_ordered_sprites'] = sorted(self.sprites, key=lambda sprite:sprite.z_order)
sort sprites by z_order
async def pause(self): self.logger.debug("pause command") if not self.state == 'ready': return if self.streamer is None: return try: if self.streamer.is_playing(): self.streamer.pause() self.pause_time = self.vclient.loop.time() self.statuslog.info("Paused") except Exception as e: logger.error(e) pass
Pauses playback if playing
def toggle_item(self, item, test_func, field_name=None): if test_func(item): self.add_item(item, field_name) return True else: self.remove_item(item, field_name) return False
Toggles the section based on test_func. test_func takes an item and returns a boolean. If it returns True, the item will be added to the given section. It will be removed from the section otherwise. Intended for use with items of settings.ARMSTRONG_SECTION_ITEM_MODEL. Behavior on other items is undefined.
def files(xscript=0, yscript=1, eyscript=None, exscript=None, g=None, plotter=xy_databoxes, paths=None, **kwargs): if 'delimiter' in kwargs: delimiter = kwargs.pop('delimiter') else: delimiter = None if 'filters' in kwargs: filters = kwargs.pop('filters') else: filters = '*.*' ds = _data.load_multiple(paths=paths, delimiter=delimiter, filters=filters) if ds is None or len(ds) == 0: return if 'title' not in kwargs: kwargs['title']=_os.path.split(ds[0].path)[0] plotter(ds, xscript=xscript, yscript=yscript, eyscript=eyscript, exscript=exscript, g=g, **kwargs) return ds
This will load a bunch of data files, generate data based on the supplied scripts, and then plot this data using the specified databox plotter. xscript, yscript, eyscript, exscript scripts to generate x, y, and errors g optional dictionary of globals optional: filters="*.*" to set the file filters for the dialog. **kwargs are sent to plotter()
def ensure_object_is_ordered_dict(item, title): assert isinstance(title, str) if not isinstance(item, OrderedDict): msg = "{} must be an OrderedDict. {} passed instead." raise TypeError(msg.format(title, type(item))) return None
Checks that the item is an OrderedDict. If not, raises ValueError.
def parse_mapreduce_yaml(contents): try: builder = yaml_object.ObjectBuilder(MapReduceYaml) handler = yaml_builder.BuilderHandler(builder) listener = yaml_listener.EventListener(handler) listener.Parse(contents) mr_info = handler.GetResults() except (ValueError, yaml_errors.EventError), e: raise errors.BadYamlError(e) if len(mr_info) < 1: raise errors.BadYamlError("No configs found in mapreduce.yaml") if len(mr_info) > 1: raise errors.MultipleDocumentsInMrYaml("Found %d YAML documents" % len(mr_info)) jobs = mr_info[0] job_names = set(j.name for j in jobs.mapreduce) if len(jobs.mapreduce) != len(job_names): raise errors.BadYamlError( "Overlapping mapreduce names; names must be unique") return jobs
Parses mapreduce.yaml file contents. Args: contents: mapreduce.yaml file contents. Returns: MapReduceYaml object with all the data from original file. Raises: errors.BadYamlError: when contents is not a valid mapreduce.yaml file.
def enable_notifications(self, enabled=True): try: if enabled: self._object.StartNotify( reply_handler=self._enable_notifications_succeeded, error_handler=self._enable_notifications_failed, dbus_interface='org.bluez.GattCharacteristic1') else: self._object.StopNotify( reply_handler=self._enable_notifications_succeeded, error_handler=self._enable_notifications_failed, dbus_interface='org.bluez.GattCharacteristic1') except dbus.exceptions.DBusException as e: self._enable_notifications_failed(error=e)
Enables or disables value change notifications. Success or failure will be notified by calls to `characteristic_enable_notifications_succeeded` or `enable_notifications_failed` respectively. Each time when the device notifies a new value, `characteristic_value_updated()` of the related device will be called.
def createlabel(self, project_id, name, color): data = {'name': name, 'color': color} request = requests.post( '{0}/{1}/labels'.format(self.projects_url, project_id), data=data, verify=self.verify_ssl, auth=self.auth, headers=self.headers, timeout=self.timeout) if request.status_code == 201: return request.json() else: return False
Creates a new label for given repository with given name and color. :param project_id: The ID of a project :param name: The name of the label :param color: Color of the label given in 6-digit hex notation with leading '#' sign (e.g. #FFAABB) :return:
def multiplot(self, f, lfilter=None, **kargs): d = defaultdict(list) for i in self.res: if lfilter and not lfilter(i): continue k, v = f(i) d[k].append(v) figure = plt.figure() ax = figure.add_axes(plt.axes()) for i in d: ax.plot(d[i], **kargs) return figure
Uses a function that returns a label and a value for this label, then plots all the values label by label
def clean_prefix(self): user = self.context.guild.me if self.context.guild else self.context.bot.user return self.context.prefix.replace(user.mention, '@' + user.display_name)
The cleaned up invoke prefix. i.e. mentions are ``@name`` instead of ``<@id>``.
def spammer_view(request): context = RequestContext(request, {}) template = Template("") response = HttpResponse(template.render(context)) response.set_cookie(COOKIE_KEY, value=COOKIE_SPAM, httponly=True, expires=datetime.now()+timedelta(days=3650)) if DJANGOSPAM_LOG: log("BLOCK RESPONSE", request.method, request.path_info, request.META.get("HTTP_USER_AGENT", "undefined")) return response
View for setting cookies on spammers.
def findhight(data, ignoret=None, threshold=20): time = np.sort(data['time']) ww = np.ones(len(time), dtype=bool) if ignoret: for (t0, t1) in ignoret: ww = ww & np.where( (time < t0) | (time > t1), True, False ) bins = np.round(time[ww]).astype('int') counts = np.bincount(bins) high = np.where(counts > np.median(counts) + threshold*counts.std())[0] return high, counts[high]
Find bad time ranges from distribution of candidates. ignoret is list of tuples [(t0, t1), (t2, t3)] defining ranges to ignore. threshold is made above std of candidate distribution in time. Returns the time (in seconds) and counts for bins above threshold.
def get_advanced_search_form(self, data): if self.get_advanced_search_form_class(): self._advanced_search_form = self.get_advanced_search_form_class()( data=data ) return self._advanced_search_form
Hook to dynamically change the advanced search form
def make_parent_bands(self, band, child_bands): m = re.match(r'([pq][A-H\d]+(?:\.\d+)?)', band) if len(band) > 0: if m: p = str(band[0:len(band)-1]) p = re.sub(r'\.$', '', p) if p is not None: child_bands.add(p) self.make_parent_bands(p, child_bands) else: child_bands = set() return child_bands
this will determine the grouping bands that it belongs to, recursively 13q21.31 ==> 13, 13q, 13q2, 13q21, 13q21.3, 13q21.31 :param band: :param child_bands: :return:
def ping(self): url = self._build_url('pings', base_url=self._api) return self._boolean(self._post(url), 204, 404)
Ping this hook. :returns: bool
def run_marionette_script(script, chrome=False, async=False, host='localhost', port=2828): m = DeviceHelper.getMarionette(host, port) m.start_session() if chrome: m.set_context(marionette.Marionette.CONTEXT_CHROME) if not async: result = m.execute_script(script) else: result = m.execute_async_script(script) m.delete_session() return result
Create a Marionette instance and run the provided script
def _schema_to_json_file_object(self, schema_list, file_obj): json.dump(schema_list, file_obj, indent=2, sort_keys=True)
Helper function for schema_to_json that takes a schema list and file object and writes the schema list to the file object with json.dump
def iosequence(seq): lines = Lines() lines.add(1, 'cdef public bint _%s_diskflag' % seq.name) lines.add(1, 'cdef public str _%s_path' % seq.name) lines.add(1, 'cdef FILE *_%s_file' % seq.name) lines.add(1, 'cdef public bint _%s_ramflag' % seq.name) ctype = 'double' + NDIM2STR[seq.NDIM+1] lines.add(1, 'cdef public %s _%s_array' % (ctype, seq.name)) return lines
Special declaration lines for the given |IOSequence| object.
def stop(cls): if any(cls.streams): sys.stdout = cls.streams.pop(-1) else: sys.stdout = sys.__stdout__
Change back the normal stdout after the end
def shutdown(self): try: while True: self._executor._work_queue.get(block=False) except queue.Empty: pass self._executor.shutdown()
Shuts down the scheduler and immediately end all pending callbacks.
def from_map(map_key): 'use resolved map as image' image_id = subprocess.check_output(['plash', 'map', map_key]).decode().strip('\n') if not image_id: raise MapDoesNotExist('map {} not found'.format(repr(map_key))) return hint('image', image_id)
use resolved map as image
def set_root_logger(root_log_level, log_path=None): handlers = [] console_handler = logging.StreamHandler() handlers.append(console_handler) if log_path: file_handler = logging.FileHandler(log_path) handlers.append(file_handler) set_logging_config(root_log_level, handlers=handlers) root_logger = logging.getLogger("pypyr") root_logger.debug( f"Root logger {root_logger.name} configured with level " f"{root_log_level}")
Set the root logger 'pypyr'. Do this before you do anything else. Run once and only once at initialization.
def __clean_rouge_args(self, rouge_args): if not rouge_args: return quot_mark_pattern = re.compile('"(.+)"') match = quot_mark_pattern.match(rouge_args) if match: cleaned_args = match.group(1) return cleaned_args else: return rouge_args
Remove enclosing quotation marks, if any.
def pop_density(data: CityInfo) -> str: if not isinstance(data, CityInfo): raise AttributeError("Argument to pop_density() must be an instance of CityInfo") return no_dec(data.get_population() / data.get_area())
Calculate the population density from the data entry
def parse_changelog(args: Any) -> Tuple[str, str]: with open("CHANGELOG.rst", "r") as file: match = re.match( pattern=r"(.*?Unreleased\n---+\n)(.+?)(\n*[^\n]+\n---+\n.*)", string=file.read(), flags=re.DOTALL, ) assert match header, changes, tail = match.groups() tag = "%s - %s" % (args.tag, datetime.date.today().isoformat()) tagged = "\n%s\n%s\n%s" % (tag, "-" * len(tag), changes) if args.verbose: print(tagged) return "".join((header, tagged, tail)), changes
Return an updated changelog and and the list of changes.
def resistance_distance(G): r if sparse.issparse(G): L = G.tocsc() else: if G.lap_type != 'combinatorial': raise ValueError('Need a combinatorial Laplacian.') L = G.L.tocsc() try: pseudo = sparse.linalg.inv(L) except RuntimeError: pseudo = sparse.lil_matrix(np.linalg.pinv(L.toarray())) N = np.shape(L)[0] d = sparse.csc_matrix(pseudo.diagonal()) rd = sparse.kron(d, sparse.csc_matrix(np.ones((N, 1)))).T \ + sparse.kron(d, sparse.csc_matrix(np.ones((N, 1)))) \ - pseudo - pseudo.T return rd
r""" Compute the resistance distances of a graph. Parameters ---------- G : Graph or sparse matrix Graph structure or Laplacian matrix (L) Returns ------- rd : sparse matrix distance matrix References ---------- :cite:`klein1993resistance`
def draw(self, **kwargs): ax = mp.gca() shape = matplotlib.patches.Polygon(self.polygon, **kwargs) ax.add_artist(shape)
Draw the polygon Optional Inputs: ------------ All optional inputs are passed to ``matplotlib.patches.Polygon`` Notes: --------- Does not accept maptype as an argument.
def paren_split(sep,string): if len(sep) != 1: raise Exception("Separation string must be one character long") retlist = [] level = 0 blevel = 0 left = 0 for i in range(len(string)): if string[i] == "(": level += 1 elif string[i] == ")": level -= 1 elif string[i] == "[": blevel += 1 elif string[i] == "]": blevel -= 1 elif string[i] == sep and level == 0 and blevel == 0: retlist.append(string[left:i]) left = i+1 retlist.append(string[left:]) return retlist
Splits the string into pieces divided by sep, when sep is outside of parentheses.
def top(self, body_output, features): if isinstance(body_output, dict): logits = {} for k, v in six.iteritems(body_output): with tf.variable_scope(k) as top_vs: self._add_variable_scope("top_%s" % k, top_vs) logits[k] = self._top_single(v, k, features) return logits else: return self._top_single(body_output, "targets", features)
Computes logits given body output and features. Args: body_output: dict of str to Tensor, comprising one key-value pair for each target. Each value denotes the target's pre-logit activations. Alternatively, it may be a single Tensor denoting the pre-logits for that target. features: dict of str to Tensor. Typically it is the preprocessed data batch after Problem's preprocess_example(). Returns: logits: dict of str to Tensor, denoting each logits for each target; or a single Tensor denoting the logits for that target. When targets are generated at training time: logits == { "self_generated_targets": <generated targets tensor> "logits": <original logits Tensor or dict> }
def hierarchical(keys): ndims = len(keys[0]) if ndims <= 1: return True dim_vals = list(zip(*keys)) combinations = (zip(*dim_vals[i:i+2]) for i in range(ndims-1)) hierarchies = [] for combination in combinations: hierarchy = True store1 = defaultdict(list) store2 = defaultdict(list) for v1, v2 in combination: if v2 not in store2[v1]: store2[v1].append(v2) previous = store1[v2] if previous and previous[0] != v1: hierarchy = False break if v1 not in store1[v2]: store1[v2].append(v1) hierarchies.append(store2 if hierarchy else {}) return hierarchies
Iterates over dimension values in keys, taking two sets of dimension values at a time to determine whether two consecutive dimensions have a one-to-many relationship. If they do a mapping between the first and second dimension values is returned. Returns a list of n-1 mappings, between consecutive dimensions.
def get_not_unique_values(array): s = np.sort(array, axis=None) s = s[s[1:] == s[:-1]] return np.unique(s)
Returns the values that appear at least twice in array. Parameters ---------- array : array like Returns ------- numpy.array
def reset_headers(self): rows = self.rowCount() cols = self.columnCount() for r in range(rows): self.setVerticalHeaderItem(r, QTableWidgetItem(str(r))) for c in range(cols): self.setHorizontalHeaderItem(c, QTableWidgetItem(str(c))) self.setColumnWidth(c, 40)
Update the column and row numbering in the headers.
def get_form_type(self): for field in self.fields: if field.var == "FORM_TYPE" and field.type_ == FieldType.HIDDEN: if len(field.values) != 1: return None return field.values[0]
Extract the ``FORM_TYPE`` from the fields. :return: ``FORM_TYPE`` value or :data:`None` :rtype: :class:`str` or :data:`None` Return :data:`None` if no well-formed ``FORM_TYPE`` field is found in the list of fields. .. versionadded:: 0.8
def to_json(self): web_resp = collections.OrderedDict() web_resp['status_code'] = self.status_code web_resp['status_text'] = dict(HTTP_CODES).get(self.status_code) web_resp['data'] = self.data if self.data is not None else {} web_resp['errors'] = self.errors or [] return web_resp
Short cut for JSON response service data. Returns: Dict that implements JSON interface.
def get_length(self): length = 0 for i, point in enumerate(self.points): if i != 0: length += point.distance(self.points[i - 1]) return length
Calculate and return the length of the line as a sum of lengths of all its segments. :returns: Total length in km.
def escape(self, escape_func, quote_func=quote_spaces): if self.is_literal(): return escape_func(self.data) elif ' ' in self.data or '\t' in self.data: return quote_func(self.data) else: return self.data
Escape the string with the supplied function. The function is expected to take an arbitrary string, then return it with all special characters escaped and ready for passing to the command interpreter. After calling this function, the next call to str() will return the escaped string.
def create_local_scope_from_def_args( self, call_args, def_args, line_number, saved_function_call_index ): for i in range(len(call_args)): def_arg_local_name = def_args[i] def_arg_temp_name = 'temp_' + str(saved_function_call_index) + '_' + def_args[i] local_scope_node = RestoreNode( def_arg_local_name + ' = ' + def_arg_temp_name, def_arg_local_name, [def_arg_temp_name], line_number=line_number, path=self.filenames[-1] ) self.nodes[-1].connect(local_scope_node) self.nodes.append(local_scope_node)
Create the local scope before entering the body of a function call. Args: call_args(list[ast.Name]): Of the call being made. def_args(ast_helper.Arguments): Of the definition being called. line_number(int): Of the def of the function call about to be entered into. saved_function_call_index(int): Unique number for each call. Note: We do not need a connect_if_allowed because of the preceding call to save_def_args_in_temp.
def max_neg(self): if self.__len__() == 0: return ArgumentError('empty set has no maximum negative value.') if self.contains(0): return None negative = [interval for interval in self.intervals if interval.right < 0] if len(negative) == 0: return None return numpy.max(list(map(lambda i: i.right, negative)))
Returns maximum negative value or None.
def load_fis(dir=None): if dir is None: import tkFileDialog try: dir=tkFileDialog.askdirectory() except: return if dir is None: return None from os.path import walk walk(dir,fits_list,"*.fits")
Load fits images in a directory
def get_default_currency(self) -> Commodity: result = None if self.default_currency: result = self.default_currency else: def_currency = self.__get_default_currency() self.default_currency = def_currency result = def_currency return result
returns the book default currency
def print_smart_tasks(): print("Printing information about smart tasks") tasks = api(gateway.get_smart_tasks()) if len(tasks) == 0: exit(bold("No smart tasks defined")) container = [] for task in tasks: container.append(api(task).task_control.raw) print(jsonify(container))
Print smart tasks as JSON
def set_ghost_file(self, ghost_file): yield from self._hypervisor.send('vm set_ghost_file "{name}" {ghost_file}'.format(name=self._name, ghost_file=shlex.quote(ghost_file))) log.info('Router "{name}" [{id}]: ghost file set to {ghost_file}'.format(name=self._name, id=self._id, ghost_file=ghost_file)) self._ghost_file = ghost_file
Sets ghost RAM file :ghost_file: path to ghost file
def _unescape_match(self, match): char = match.group(1) if char in self.ESCAPE_LOOKUP: return self.ESCAPE_LOOKUP[char] elif not char: raise KatcpSyntaxError("Escape slash at end of argument.") else: raise KatcpSyntaxError("Invalid escape character %r." % (char,))
Given an re.Match, unescape the escape code it represents.
def get_host_power_status(self): sushy_system = self._get_sushy_system(PROLIANT_SYSTEM_ID) return GET_POWER_STATE_MAP.get(sushy_system.power_state)
Request the power state of the server. :returns: Power State of the server, 'ON' or 'OFF' :raises: IloError, on an error from iLO.
def share_model_ndex(): if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) stmts_str = body.get('stmts') stmts_json = json.loads(stmts_str) stmts = stmts_from_json(stmts_json["statements"]) ca = CxAssembler(stmts) for n, v in body.items(): ca.cx['networkAttributes'].append({'n': n, 'v': v, 'd': 'string'}) ca.make_model() network_id = ca.upload_model(private=False) return {'network_id': network_id}
Upload the model to NDEX
def _ReadParserPresetsFromFile(self): self._presets_file = os.path.join( self._data_location, self._PRESETS_FILE_NAME) if not os.path.isfile(self._presets_file): raise errors.BadConfigOption( 'No such parser presets file: {0:s}.'.format(self._presets_file)) try: parsers_manager.ParsersManager.ReadPresetsFromFile(self._presets_file) except errors.MalformedPresetError as exception: raise errors.BadConfigOption( 'Unable to read presets from file with error: {0!s}'.format( exception))
Reads the parser presets from the presets.yaml file. Raises: BadConfigOption: if the parser presets file cannot be read.
def _set_named_args(self, **kv): for k in kv: self._body['${0}'.format(k)] = kv[k] return self
Set a named parameter in the query. The named field must exist in the query itself. :param kv: Key-Value pairs representing values within the query. These values should be stripped of their leading `$` identifier.
def highrisk_special_prefixes(self): if self._highrisk_special_prefixes is None: self._highrisk_special_prefixes = HighriskSpecialPrefixList( self._version, iso_code=self._solution['iso_code'], ) return self._highrisk_special_prefixes
Access the highrisk_special_prefixes :returns: twilio.rest.voice.v1.dialing_permissions.country.highrisk_special_prefix.HighriskSpecialPrefixList :rtype: twilio.rest.voice.v1.dialing_permissions.country.highrisk_special_prefix.HighriskSpecialPrefixList
def get_name(component): if six.callable(component): name = getattr(component, "__qualname__", component.__name__) return '.'.join([component.__module__, name]) return str(component)
Attempt to get the string name of component, including module and class if applicable.
def workers(self): worker_keys = self.redis_client.keys("Worker*") workers_data = {} for worker_key in worker_keys: worker_info = self.redis_client.hgetall(worker_key) worker_id = binary_to_hex(worker_key[len("Workers:"):]) workers_data[worker_id] = { "node_ip_address": decode(worker_info[b"node_ip_address"]), "plasma_store_socket": decode( worker_info[b"plasma_store_socket"]) } if b"stderr_file" in worker_info: workers_data[worker_id]["stderr_file"] = decode( worker_info[b"stderr_file"]) if b"stdout_file" in worker_info: workers_data[worker_id]["stdout_file"] = decode( worker_info[b"stdout_file"]) return workers_data
Get a dictionary mapping worker ID to worker information.
def validate_cookies(session, class_name): if not do_we_have_enough_cookies(session.cookies, class_name): return False url = CLASS_URL.format(class_name=class_name) + '/class' r = session.head(url, allow_redirects=False) if r.status_code == 200: return True else: logging.debug('Stale session.') try: session.cookies.clear('.coursera.org') except KeyError: pass return False
Checks whether we have all the required cookies to authenticate on class.coursera.org. Also check for and remove stale session.
def from_structures(structures, transformations=None, extend_collection=0): tstruct = [TransformedStructure(s, []) for s in structures] return StandardTransmuter(tstruct, transformations, extend_collection)
Alternative constructor from structures rather than TransformedStructures. Args: structures: Sequence of structures transformations: New transformations to be applied to all structures extend_collection: Whether to use more than one output structure from one-to-many transformations. extend_collection can be a number, which determines the maximum branching for each transformation. Returns: StandardTransmuter
def setup(): init_tasks() run_hook("before_setup") env.run("mkdir -p %s" % (paths.get_shared_path())) env.run("chmod 755 %s" % (paths.get_shared_path())) env.run("mkdir -p %s" % (paths.get_backup_path())) env.run("chmod 750 %s" % (paths.get_backup_path())) env.run("mkdir -p %s" % (paths.get_upload_path())) env.run("chmod 775 %s" % (paths.get_upload_path())) run_hook("setup") run_hook("after_setup")
Creates shared and upload directory then fires setup to recipes.
def restore_state(scan_codes): _listener.is_replaying = True with _pressed_events_lock: current = set(_pressed_events) target = set(scan_codes) for scan_code in current - target: _os_keyboard.release(scan_code) for scan_code in target - current: _os_keyboard.press(scan_code) _listener.is_replaying = False
Given a list of scan_codes ensures these keys, and only these keys, are pressed. Pairs well with `stash_state`, alternative to `restore_modifiers`.
def find_previous(element, l): length = len(l) for index, current in enumerate(l): if length - 1 == index: return current if index == 0: if element < current: return None if current <= element < l[index+1]: return current
find previous element in a sorted list >>> find_previous(0, [0]) 0 >>> find_previous(2, [1, 1, 3]) 1 >>> find_previous(0, [1, 2]) >>> find_previous(1.5, [1, 2]) 1 >>> find_previous(3, [1, 2]) 2
def endnotemap(self, cache=True): if self.__endnotemap is not None and cache==True: return self.__endnotemap else: x = self.xml(src='word/endnotes.xml') d = Dict() if x is None: return d for endnote in x.root.xpath("w:endnote", namespaces=self.NS): id = endnote.get("{%(w)s}id" % self.NS) typ = endnote.get("{%(w)s}type" % self.NS) d[id] = Dict(id=id, type=typ, elem=endnote) if cache==True: self.__endnotemap = d return d
return the endnotes from the docx, keyed to string id.
def kick(self, bound: int) -> int: return self._int_cmd(b'kick %d' % bound, b'KICKED')
Moves delayed and buried jobs into the ready queue and returns the number of jobs effected. Only jobs from the currently used tube are moved. A kick will only move jobs in a single state. If there are any buried jobs, only those will be moved. Otherwise delayed jobs will be moved. :param bound: The maximum number of jobs to kick.
def _normalize_timestamps(self, timestamp, intervals, config): rval = [timestamp] if intervals<0: while intervals<0: rval.append( config['i_calc'].normalize(timestamp, intervals) ) intervals += 1 elif intervals>0: while intervals>0: rval.append( config['i_calc'].normalize(timestamp, intervals) ) intervals -= 1 return rval
Helper for the subclasses to generate a list of timestamps.
def attach_run_command(cmd): if isinstance(cmd, tuple): return _lxc.attach_run_command(cmd) elif isinstance(cmd, list): return _lxc.attach_run_command((cmd[0], cmd)) else: return _lxc.attach_run_command((cmd, [cmd]))
Run a command when attaching Please do not call directly, this will execvp the command. This is to be used in conjunction with the attach method of a container.
def knuth_morris_pratt(s, t): sep = '\x00' assert sep not in t and sep not in s f = maximum_border_length(t + sep + s) n = len(t) for i, fi in enumerate(f): if fi == n: return i - 2 * n return -1
Find a substring by Knuth-Morris-Pratt :param s: the haystack string :param t: the needle string :returns: index i such that s[i: i + len(t)] == t, or -1 :complexity: O(len(s) + len(t))
def cache_master(self, saltenv='base', cachedir=None): ret = [] for path in self.file_list(saltenv): ret.append( self.cache_file( salt.utils.url.create(path), saltenv, cachedir=cachedir) ) return ret
Download and cache all files on a master in a specified environment
def monkey_patch_migration_template(self, app, fixture_path): self._MIGRATION_TEMPLATE = writer.MIGRATION_TEMPLATE module_split = app.module.__name__.split('.') if len(module_split) == 1: module_import = "import %s\n" % module_split[0] else: module_import = "from %s import %s\n" % ( '.'.join(module_split[:-1]), module_split[-1:][0], ) writer.MIGRATION_TEMPLATE = writer.MIGRATION_TEMPLATE\ .replace( '%(imports)s', "%(imports)s" + "\nfrom django_migration_fixture import fixture\n%s" % module_import )\ .replace( '%(operations)s', " migrations.RunPython(**fixture(%s, ['%s'])),\n" % ( app.label, os.path.basename(fixture_path) ) + "%(operations)s\n" )
Monkey patch the django.db.migrations.writer.MIGRATION_TEMPLATE Monkey patching django.db.migrations.writer.MIGRATION_TEMPLATE means that we don't have to do any complex regex or reflection. It's hacky... but works atm.
def should_strip_auth(self, old_url, new_url): old_parsed = urlparse(old_url) new_parsed = urlparse(new_url) if old_parsed.hostname != new_parsed.hostname: return True if (old_parsed.scheme == 'http' and old_parsed.port in (80, None) and new_parsed.scheme == 'https' and new_parsed.port in (443, None)): return False changed_port = old_parsed.port != new_parsed.port changed_scheme = old_parsed.scheme != new_parsed.scheme default_port = (DEFAULT_PORTS.get(old_parsed.scheme, None), None) if (not changed_scheme and old_parsed.port in default_port and new_parsed.port in default_port): return False return changed_port or changed_scheme
Decide whether Authorization header should be removed when redirecting
def bind(self, args, kwargs): spec = self._spec resolution = self.resolve(args, kwargs) params = dict(zip(spec.args, resolution.slots)) if spec.varargs: params[spec.varargs] = resolution.varargs if spec.varkw: params[spec.varkw] = resolution.varkw if spec.kwonlyargs: params.update(resolution.kwonlyargs) return params
Bind arguments and keyword arguments to the encapsulated function. Returns a dictionary of parameters (named according to function parameters) with the values that were bound to each name.
def _get_image_stream_info_for_build_request(self, build_request): image_stream = None image_stream_tag_name = None if build_request.has_ist_trigger(): image_stream_tag_id = build_request.trigger_imagestreamtag image_stream_id, image_stream_tag_name = image_stream_tag_id.split(':') try: image_stream = self.get_image_stream(image_stream_id).json() except OsbsResponseException as x: if x.status_code != 404: raise if image_stream: try: self.get_image_stream_tag(image_stream_tag_id).json() except OsbsResponseException as x: if x.status_code != 404: raise return image_stream, image_stream_tag_name
Return ImageStream, and ImageStreamTag name for base_image of build_request If build_request is not auto instantiated, objects are not fetched and None, None is returned.
def dssps(self): dssps_dict = {} dssp_dir = os.path.join(self.parent_dir, 'dssp') if not os.path.exists(dssp_dir): os.makedirs(dssp_dir) for i, mmol_file in self.mmols.items(): dssp_file_name = '{0}.dssp'.format(os.path.basename(mmol_file)) dssp_file = os.path.join(dssp_dir, dssp_file_name) if not os.path.exists(dssp_file): dssp_out = run_dssp(pdb=mmol_file, path=True, outfile=dssp_file) if len(dssp_out) == 0: raise Warning("dssp file {0} is empty".format(dssp_file)) dssps_dict[i] = dssp_file return dssps_dict
Dict of filepaths for all dssp files associated with code. Notes ----- Runs dssp and stores writes output to files if not already present. Also downloads mmol files if not already present. Calls isambard.external_programs.dssp and so needs dssp to be installed. Returns ------- dssps_dict : dict, or None. Keys : int mmol number Values : str Filepath for the corresponding dssp file. Raises ------ Warning If any of the dssp files are empty.
def get_service_url( self, block_identifier: BlockSpecification, service_hex_address: AddressHex, ) -> Optional[str]: result = self.proxy.contract.functions.urls(service_hex_address).call( block_identifier=block_identifier, ) if result == '': return None return result
Gets the URL of a service by address. If does not exist return None
def create_asset(self, asset_form): collection = JSONClientValidated('repository', collection='Asset', runtime=self._runtime) if not isinstance(asset_form, ABCAssetForm): raise errors.InvalidArgument('argument type is not an AssetForm') if asset_form.is_for_update(): raise errors.InvalidArgument('the AssetForm is for update only, not create') try: if self._forms[asset_form.get_id().get_identifier()] == CREATED: raise errors.IllegalState('asset_form already used in a create transaction') except KeyError: raise errors.Unsupported('asset_form did not originate from this session') if not asset_form.is_valid(): raise errors.InvalidArgument('one or more of the form elements is invalid') insert_result = collection.insert_one(asset_form._my_map) self._forms[asset_form.get_id().get_identifier()] = CREATED result = objects.Asset( osid_object_map=collection.find_one({'_id': insert_result.inserted_id}), runtime=self._runtime, proxy=self._proxy) return result
Creates a new ``Asset``. arg: asset_form (osid.repository.AssetForm): the form for this ``Asset`` return: (osid.repository.Asset) - the new ``Asset`` raise: IllegalState - ``asset_form`` already used in a create transaction raise: InvalidArgument - one or more of the form elements is invalid raise: NullArgument - ``asset_form`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - ``asset_form`` did not originate from ``get_asset_form_for_create()`` *compliance: mandatory -- This method must be implemented.*
def index(self): c.pynipap_version = pynipap.__version__ try: c.nipapd_version = pynipap.nipapd_version() except: c.nipapd_version = 'unknown' c.nipap_db_version = pynipap.nipap_db_version() return render('/version.html')
Display NIPAP version info
def _index_resized(self, col, old_width, new_width): self.table_index.setColumnWidth(col, new_width) self._update_layout()
Resize the corresponding column of the index section selected.
def stop_consuming(self): if not self.consumer_tags: return if not self.is_closed: for tag in self.consumer_tags: self.basic.cancel(tag) self.remove_consumer_tag()
Stop consuming messages. :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :return: