code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def _on_process_error(self, error):
if self is None:
return
if error not in PROCESS_ERROR_STRING:
error = -1
if not self._prevent_logs:
_logger().warning(PROCESS_ERROR_STRING[error]) | Logs process error |
def _is_ipv4_like(s):
parts = s.split('.')
if len(parts) != 4:
return False
for part in parts:
try:
int(part)
except ValueError:
return False
return True | Find if a string superficially looks like an IPv4 address.
AWS documentation plays it fast and loose with this; in other
regions, it seems like even non-valid IPv4 addresses (in
particular, ones that possess decimal numbers out of range for
IPv4) are rejected. |
def has_object_permission(self, request, view, obj):
user = request.user
if not user.is_superuser and not user.is_anonymous():
valid = False
try:
ct = ContentType.objects.get_for_model(obj)
fpm = FilterPermissionModel.objects.get(user=user,
... | check filter permissions |
def sort_untl(self, sort_structure):
self.children.sort(key=lambda obj: sort_structure.index(obj.tag)) | Sort the UNTL Python object by the index
of a sort structure pre-ordered list. |
def normalise_rows(matrix):
lengths = np.apply_along_axis(np.linalg.norm, 1, matrix)
if not (lengths > 0).all():
lengths[lengths == 0] = 1
return matrix / lengths[:, np.newaxis] | Scales all rows to length 1. Fails when row is 0-length, so it
leaves these unchanged |
def upload(self, src_file_path, dst_file_name=None):
self._check_session()
status, data = self._rest.upload_file(
'files', src_file_path, dst_file_name)
return data | Upload the specified file to the server. |
def get_dep(self, name: str) -> str:
deps = self.meta["dependencies"]
for d in deps:
if d["model"] == name:
return d
raise KeyError("%s not found in %s." % (name, deps)) | Return the uuid of the dependency identified with "name".
:param name:
:return: UUID |
def returnIndexList(self, limit=False):
if limit==False:
return self.index_track
result = []
for i in range(limit):
if len(self.table)>i:
result.append(self.index_track[i])
return result | Return a list of integers that are list-index references to the
original list of dictionaries."
Example of use:
>>> test = [
... {"name": "Jim", "age": 18, "income": 93000, "order": 2},
... {"name": "Larry", "age": 18, "order": 3},
... {"name... |
def fetch_import_ref_restriction(self,):
inter = self.get_refobjinter()
restricted = self.status() not in (self.LOADED, self.UNLOADED)
return restricted or inter.fetch_action_restriction(self, 'import_reference') | Fetch whether importing the reference is restricted
:returns: True, if importing the reference is restricted
:rtype: :class:`bool`
:raises: None |
def reverse_timezone(self, query, timeout=DEFAULT_SENTINEL):
ensure_pytz_is_installed()
try:
lat, lng = self._coerce_point_to_string(query).split(',')
except ValueError:
raise ValueError("Must be a coordinate pair or Point")
params = {
"lat": lat,
... | Find the timezone for a point in `query`.
GeoNames always returns a timezone: if the point being queried
doesn't have an assigned Olson timezone id, a ``pytz.FixedOffset``
timezone is used to produce the :class:`geopy.timezone.Timezone`.
.. versionadded:: 1.18.0
:param query: ... |
def _compute(self):
newstate = self._implicit_solver()
adjustment = {}
tendencies = {}
for name, var in self.state.items():
adjustment[name] = newstate[name] - var
tendencies[name] = adjustment[name] / self.timestep
self.adjustment = adjustment
sel... | Computes the state variable tendencies in time for implicit processes.
To calculate the new state the :func:`_implicit_solver()` method is
called for daughter classes. This however returns the new state of the
variables, not just the tendencies. Therefore, the adjustment is
calculated w... |
def decompose_nfkd(text):
if text is None:
return None
if not hasattr(decompose_nfkd, '_tr'):
decompose_nfkd._tr = Transliterator.createInstance('Any-NFKD')
return decompose_nfkd._tr.transliterate(text) | Perform unicode compatibility decomposition.
This will replace some non-standard value representations in unicode and
normalise them, while also separating characters and their diacritics into
two separate codepoints. |
def scheduled(wait=False):
manager.run_scheduled()
while wait:
manager.run_scheduled()
time.sleep(settings.SCHEDULER_INTERVAL) | Run crawlers that are due. |
def logp_partial_gradient(self, variable, calculation_set=None):
if (calculation_set is None) or (self in calculation_set):
if not datatypes.is_continuous(variable):
return zeros(shape(variable.value))
if variable is self:
try:
gradient... | Calculates the partial gradient of the posterior of self with respect to variable.
Returns zero if self is not in calculation_set. |
def pay(self, predecessor):
assert predecessor is None or isinstance(predecessor, MatchSet)
if predecessor is not None:
expectation = self._algorithm.get_future_expectation(self)
predecessor.payoff += expectation | If the predecessor is not None, gives the appropriate amount of
payoff to the predecessor in payment for its contribution to this
match set's expected future payoff. The predecessor argument should
be either None or a MatchSet instance whose selected action led
directly to this match set... |
def register_factory(self, key, factory=_sentinel, scope=NoneScope, allow_overwrite=False):
if factory is _sentinel:
return functools.partial(self.register_factory, key, scope=scope, allow_overwrite=allow_overwrite)
if not allow_overwrite and key in self._providers:
raise KeyErro... | Creates and registers a provider using the given key, factory, and scope.
Can also be used as a decorator.
:param key: Provider key
:type key: object
:param factory: Factory callable
:type factory: callable
:param scope: Scope key, factory, or instance
:type sco... |
def source(self, fields=None, **kwargs):
s = self._clone()
if fields and kwargs:
raise ValueError("You cannot specify fields and kwargs at the same time.")
if fields is not None:
s._source = fields
return s
if kwargs and not isinstance(s._source, dict)... | Selectively control how the _source field is returned.
:arg fields: wildcard string, array of wildcards, or dictionary of includes and excludes
If ``fields`` is None, the entire document will be returned for
each hit. If fields is a dictionary with keys of 'include' and/or
'exclude' t... |
def getMemoryStats(self):
if self._statusxml is None:
self.initStats()
node = self._statusxml.find('jvm/memory')
memstats = {}
if node is not None:
for (key,val) in node.items():
memstats[key] = util.parse_value(val)
return memstats | Return JVM Memory Stats for Apache Tomcat Server.
@return: Dictionary of memory utilization stats. |
def generate_csv(src, out):
writer = UnicodeWriter(open(out, 'wb'), delimiter=';')
writer.writerow(('Reference ID', 'Created', 'Origin', 'Subject'))
for cable in cables_from_source(src, predicate=pred.origin_filter(pred.origin_germany)):
writer.writerow((cable.reference_id, cable.created, cable.orig... | \
Walks through `src` and generates the CSV file `out` |
def before_request(request, tracer=None):
if tracer is None:
tracer = opentracing.tracer
tags_dict = {
tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER,
tags.HTTP_URL: request.full_url,
}
remote_ip = request.remote_ip
if remote_ip:
tags_dict[tags.PEER_HOST_IPV4] = remote_ip
... | Attempts to extract a tracing span from incoming request.
If no tracing context is passed in the headers, or the data
cannot be parsed, a new root span is started.
:param request: HTTP request with `.headers` property exposed
that satisfies a regular dictionary interface
:param tracer: optional... |
def _sections_to_variance_sections(self, sections_over_time):
variance_sections = []
for i in range(len(sections_over_time[0])):
time_sections = [sections[i] for sections in sections_over_time]
variance = np.var(time_sections, axis=0)
variance_sections.append(variance)
return variance_sect... | Computes the variance of corresponding sections over time.
Returns:
a list of np arrays. |
def add_locations(self, locations):
if isinstance(locations, (str, ustr)):
self._add_from_str(locations)
elif isinstance(locations, (list, tuple)):
self._add_from_list(locations) | Add extra locations to AstralGeocoder.
Extra locations can be
* A single string containing one or more locations separated by a newline.
* A list of strings
* A list of lists/tuples that are passed to a :class:`Location` constructor |
def copy_node(node):
if not isinstance(node, gast.AST):
return [copy_node(n) for n in node]
new_node = copy.deepcopy(node)
setattr(new_node, anno.ANNOTATION_FIELD,
getattr(node, anno.ANNOTATION_FIELD, {}).copy())
return new_node | Copy a node but keep its annotations intact. |
def mutations_batcher(self, flush_count=FLUSH_COUNT, max_row_bytes=MAX_ROW_BYTES):
return MutationsBatcher(self, flush_count, max_row_bytes) | Factory to create a mutation batcher associated with this instance.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_mutations_batcher]
:end-before: [END bigtable_mutations_batcher]
:type table: class
:param table: class:`~google... |
def create_node_rating_counts_settings(sender, **kwargs):
created = kwargs['created']
node = kwargs['instance']
if created:
create_related_object.delay(NodeRatingCount, {'node': node})
create_related_object.delay(NodeParticipationSettings, {'node': node}) | create node rating count and settings |
def G(self, ID, lat, lon):
eqM = utils.eqCoords(lon, lat)
eqZ = eqM
if lat != 0:
eqZ = utils.eqCoords(lon, 0)
return {
'id': ID,
'lat': lat,
'lon': lon,
'ra': eqM[0],
'decl': eqM[1],
'raZ': eqZ[0],
... | Creates a generic entry for an object. |
def _cb_inform_sensor_status(self, msg):
timestamp = msg.arguments[0]
num_sensors = int(msg.arguments[1])
assert len(msg.arguments) == 2 + num_sensors * 3
for n in xrange(num_sensors):
name = msg.arguments[2 + n * 3]
status = msg.arguments[3 + n * 3]
v... | Update received for an sensor. |
def get_fd(file_or_fd, default=None):
fd = file_or_fd
if fd is None:
fd = default
if hasattr(fd, "fileno"):
fd = fd.fileno()
return fd | Helper function for getting a file descriptor. |
def add_widget(self, widget, column=0):
if self._frame is None:
raise RuntimeError("You must add the Layout to the Frame before you can add a Widget.")
self._columns[column].append(widget)
widget.register_frame(self._frame)
if widget.name in self._frame.data:
widg... | Add a widget to this Layout.
If you are adding this Widget to the Layout dynamically after starting to play the Scene,
don't forget to ensure that the value is explicitly set before the next update.
:param widget: The widget to be added.
:param column: The column within the widget for ... |
def generate_source_image(source_file, processor_options, generators=None,
fail_silently=True):
processor_options = ThumbnailOptions(processor_options)
was_closed = getattr(source_file, 'closed', False)
if generators is None:
generators = [
utils.dynamic_import(... | Processes a source ``File`` through a series of source generators, stopping
once a generator returns an image.
The return value is this image instance or ``None`` if no generators
return an image.
If the source file cannot be opened, it will be set to ``None`` and still
passed to the generators. |
def set_element(self, row, col, value):
javabridge.call(
self.jobject, "setElement", "(IID)V", row, col, value) | Sets the float value at the specified location.
:param row: the 0-based index of the row
:type row: int
:param col: the 0-based index of the column
:type col: int
:param value: the float value for that cell
:type value: float |
def update(self, **kwargs):
data = kwargs.get('data')
if data is not None:
if (util.pd and isinstance(data, util.pd.DataFrame) and
list(data.columns) != list(self.data.columns) and self._index):
data = data.reset_index()
self.verify(data)
... | Overrides update to concatenate streamed data up to defined length. |
def _set_autocommit(connection):
if hasattr(connection.connection, "autocommit"):
if callable(connection.connection.autocommit):
connection.connection.autocommit(True)
else:
connection.connection.autocommit = True
elif hasattr(connection.connection... | Make sure a connection is in autocommit mode. |
def string_to_datetime(self, obj):
if isinstance(obj, six.string_types) and len(obj) == 19:
try:
return datetime.strptime(obj, "%Y-%m-%dT%H:%M:%S")
except ValueError:
pass
if isinstance(obj, six.string_types) and len(obj) > 19:
try:
... | Decode a datetime string to a datetime object |
async def get_wallet_record(wallet_handle: int,
type_: str,
id: str,
options_json: str) -> str:
logger = logging.getLogger(__name__)
logger.debug("get_wallet_record: >>> wallet_handle: %r, type_: %r, id: %r, options_json: %r",
... | Get an wallet record by id
:param wallet_handle: wallet handler (created by open_wallet).
:param type_: allows to separate different record types collections
:param id: the id of record
:param options_json: //TODO: FIXME: Think about replacing by bitmask
{
retrieveType: (optional, false b... |
def _handle_exists(self, node, scope, ctxt, stream):
res = fields.Int()
try:
self._handle_node(node.expr, scope, ctxt, stream)
res._pfp__set_value(1)
except AttributeError:
res._pfp__set_value(0)
return res | Handle the exists unary operator
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO |
def query_echo(cls, request,
foo: (Ptypes.query, String('A query parameter'))) -> [
(200, 'Ok', String)]:
log.info('Echoing query param, value is: {}'.format(foo))
for i in range(randint(0, MAX_LOOP_DURATION)):
yield
msg = 'The value sent was: {}'.forma... | Echo the query parameter. |
def validate_args(args):
if not args.minutes and not args.start_time:
print("Error: missing --minutes or --start-time")
return False
if args.minutes and args.start_time:
print("Error: --minutes shouldn't be specified if --start-time is used")
return False
if args.end_time and... | Basic option validation. Returns False if the options are not valid,
True otherwise.
:param args: the command line options
:type args: map
:param brokers_num: the number of brokers |
def use_comparative_composition_view(self):
self._object_views['composition'] = COMPARATIVE
for session in self._get_provider_sessions():
try:
session.use_comparative_composition_view()
except AttributeError:
pass | Pass through to provider CompositionLookupSession.use_comparative_composition_view |
def detect_terminal(_environ=os.environ):
if _environ.get('TMUX'):
return 'tmux'
elif subdict_by_key_prefix(_environ, 'BYOBU'):
return 'byobu'
elif _environ.get('TERM').startswith('screen'):
return _environ['TERM']
elif _environ.get('COLORTERM'):
return _environ['COLORTER... | Detect "terminal" you are using.
First, this function checks if you are in tmux, byobu, or screen.
If not it uses $COLORTERM [#]_ if defined and fallbacks to $TERM.
.. [#] So, if you are in Gnome Terminal you have "gnome-terminal"
instead of "xterm-color"". |
def get_agent(msg):
agent = msg['msg']['agent']
if isinstance(agent, list):
agent = agent[0]
return agent | Handy hack to handle legacy messages where 'agent' was a list. |
def getManagedObjects(self, objectPath):
d = {}
for p in sorted(self.exports.keys()):
if not p.startswith(objectPath) or p == objectPath:
continue
o = self.exports[p]
i = {}
d[p] = i
for iface in o.getInterfaces():
... | Returns a Python dictionary containing the reply content for
org.freedesktop.DBus.ObjectManager.GetManagedObjects |
def get_docstring(obj):
docstring = getdoc(obj, allow_inherited=True)
if docstring is None:
logger = getLogger(__name__)
logger.warning("Object %s doesn't have a docstring.", obj)
docstring = 'Undocumented'
return prepare_docstring(docstring, ignore=1) | Extract the docstring from an object as individual lines.
Parameters
----------
obj : object
The Python object (class, function or method) to extract docstrings
from.
Returns
-------
lines : `list` of `str`
Individual docstring lines with common indentation removed, and... |
def _normalize_overlap(overlap, window, nfft, samp, method='welch'):
if method == 'bartlett':
return 0
if overlap is None and isinstance(window, string_types):
return recommended_overlap(window, nfft)
if overlap is None:
return 0
return seconds_to_samples(overlap, samp) | Normalise an overlap in physical units to a number of samples
Parameters
----------
overlap : `float`, `Quantity`, `None`
the overlap in some physical unit (seconds)
window : `str`
the name of the window function that will be used, only used
if `overlap=None` is given
nfft... |
def make_label(self, path):
from datetime import datetime
from StringIO import StringIO
path = path.lstrip("/")
bucket, label = path.split("/", 1)
bucket = self.ofs._require_bucket(bucket)
key = self.ofs._get_key(bucket, label)
if key is None:
key = bu... | this borrows too much from the internals of ofs
maybe expose different parts of the api? |
def setup_arrow_buttons(self):
vsb = self.scrollarea.verticalScrollBar()
style = vsb.style()
opt = QStyleOptionSlider()
vsb.initStyleOption(opt)
vsb_up_arrow = style.subControlRect(
QStyle.CC_ScrollBar, opt, QStyle.SC_ScrollBarAddLine, self)
up_btn = up_bt... | Setup the up and down arrow buttons that are placed at the top and
bottom of the scrollarea. |
def last_week_of_year(cls, year):
if year == cls.max.year:
return cls.max
return cls(year+1, 0) | Return the last week of the given year.
This week with either have week-number 52 or 53.
This will be the same as Week(year+1, 0), but will even work for
year 9999 where this expression would overflow.
The first week of a given year is simply Week(year, 1), so there
is no dedic... |
def relative_humidity_from_dewpoint(temperature, dewpt):
r
e = saturation_vapor_pressure(dewpt)
e_s = saturation_vapor_pressure(temperature)
return (e / e_s) | r"""Calculate the relative humidity.
Uses temperature and dewpoint in celsius to calculate relative
humidity using the ratio of vapor pressure to saturation vapor pressures.
Parameters
----------
temperature : `pint.Quantity`
The temperature
dew point : `pint.Quantity`
The dew ... |
def cleanup(self):
if self.process is None:
return
if self.process.poll() is None:
log.info("Sending TERM to %d", self.process.pid)
self.process.terminate()
start = time.clock()
while time.clock() - start < 1.0:
time.sleep(0.05)... | Clean up, making sure the process is stopped before we pack up and go home. |
def _stream_search(self, query):
for doc in self.solr.search(query, rows=100000000):
if self.unique_key != "_id":
doc["_id"] = doc.pop(self.unique_key)
yield doc | Helper method for iterating over Solr search results. |
def qsize(self):
if not self.connected:
raise QueueNotConnectedError("Queue is not Connected")
try:
size = self.__db.llen(self._key)
except redis.ConnectionError as e:
raise redis.ConnectionError(repr(e))
return size | Returns the number of items currently in the queue
:return: Integer containing size of the queue
:exception: ConnectionError if queue is not connected |
def p(i, sample_size, weights):
weight_i = weights[i]
weights_sum = sum(weights)
other_weights = list(weights)
del other_weights[i]
probability_of_i = 0
for picks in range(0, sample_size):
permutations = list(itertools.permutations(other_weights, picks))
... | Given a weighted set and sample size return the probabilty that the
weight `i` will be present in the sample.
Created to test the output of the `SomeOf` maker class. The math was
provided by Andy Blackshaw - thank you dad :) |
def _order_by_is_valid_or_none(self, params):
if not "order_by" in params or not params["order_by"]:
return True
def _order_by_dict_is_not_well_formed(d):
if not isinstance(d, dict):
return True
if "property_name" in d and d["property_name"]:
... | Validates that a given order_by has proper syntax.
:param params: Query params.
:return: Returns True if either no order_by is present, or if the order_by is well-formed. |
def after_websocket(self, func: Callable, name: AppOrBlueprintKey=None) -> Callable:
handler = ensure_coroutine(func)
self.after_websocket_funcs[name].append(handler)
return func | Add an after websocket function.
This is designed to be used as a decorator. An example usage,
.. code-block:: python
@app.after_websocket
def func(response):
return response
Arguments:
func: The after websocket function itself.
... |
def get_words(data):
words = re.findall(r"\w+", data)
LOGGER.debug("> Words: '{0}'".format(", ".join(words)))
return words | Extracts the words from given string.
Usage::
>>> get_words("Users are: John Doe, Jane Doe, Z6PO.")
[u'Users', u'are', u'John', u'Doe', u'Jane', u'Doe', u'Z6PO']
:param data: Data to extract words from.
:type data: unicode
:return: Words.
:rtype: list |
def cache_key_exist(self, key):
key_exist = True if cache.get(key) else False
status = 200 if key_exist else 404
return json_success(json.dumps({'key_exist': key_exist}),
status=status) | Returns if a key from cache exist |
def cached_read(self, kind):
if not kind in self.cache:
self.pull_stats(kind)
if self.epochnow() - self.cache[kind]['lastcall'] > self.cache_timeout:
self.pull_stats(kind)
return self.cache[kind]['lastvalue'] | Cache stats calls to prevent hammering the API |
async def post(self, url_path: str, params: dict = None, rtype: str = RESPONSE_JSON, schema: dict = None) -> Any:
if params is None:
params = dict()
client = API(self.endpoint.conn_handler(self.session, self.proxy))
response = await client.requests_post(url_path, **params)
if... | POST request on self.endpoint + url_path
:param url_path: Url encoded path following the endpoint
:param params: Url query string parameters dictionary
:param rtype: Response type
:param schema: Json Schema to validate response (optional, default None)
:return: |
def _prep_fields_param(fields):
store_samples = False
if fields is None:
return True, None
if isinstance(fields, str):
fields = [fields]
else:
fields = list(fields)
if 'samples' in fields:
fields.remove('samples')
store_samples = True
elif '*' in fields:
... | Prepare the `fields` parameter, and determine whether or not to store samples. |
def generous_parse_uri(uri):
parse_result = urlparse(uri)
if parse_result.scheme == '':
abspath = os.path.abspath(parse_result.path)
if IS_WINDOWS:
abspath = windows_to_unix_path(abspath)
fixed_uri = "file://{}".format(abspath)
parse_result = urlparse(fixed_uri)
r... | Return a urlparse.ParseResult object with the results of parsing the
given URI. This has the same properties as the result of parse_uri.
When passed a relative path, it determines the absolute path, sets the
scheme to file, the netloc to localhost and returns a parse of the result. |
def main(args=None):
try:
from psyplot_gui import get_parser as _get_parser
except ImportError:
logger.debug('Failed to import gui', exc_info=True)
parser = get_parser(create=False)
parser.update_arg('output', required=True)
parser.create_arguments()
parser.parse2... | Main function for usage of psyplot from the command line
This function creates a parser that parses command lines to the
:func:`make_plot` functions or (if the ``psyplot_gui`` module is
present, to the :func:`psyplot_gui.start_app` function)
Returns
-------
psyplot.parser.FuncArgParser
... |
def remove_user(self, group, username):
try:
self.lookup_id(group)
except ldap_tools.exceptions.InvalidResult as err:
raise err from None
operation = {'memberUid': [(ldap3.MODIFY_DELETE, [username])]}
self.client.modify(self.__distinguished_name(group), operation) | Remove a user from the specified LDAP group.
Args:
group: Name of group to update
username: Username of user to remove
Raises:
ldap_tools.exceptions.InvalidResult:
Results of the query were invalid. The actual exception raised
inheri... |
def create_new_values(self):
model = self.queryset.model
pks = []
extra_create_kwargs = self.extra_create_kwargs()
for value in self._new_values:
create_kwargs = {self.create_field: value}
create_kwargs.update(extra_create_kwargs)
new_item = self.creat... | Create values created by the user input. Return the model instances QS. |
def get_temperature_from_humidity(self):
self._init_humidity()
temp = 0
data = self._humidity.humidityRead()
if (data[2]):
temp = data[3]
return temp | Returns the temperature in Celsius from the humidity sensor |
def count_base_units(units):
ret = {}
for unit in units:
factor, base_unit = get_conversion_factor(unit)
ret.setdefault(base_unit, 0)
ret[base_unit] += 1
return ret | Returns a dict mapping names of base units to how many times they
appear in the given iterable of units. Effectively this counts how
many length units you have, how many time units, and so forth. |
def read(self, file_path):
if not os.path.exists(file_path):
raise InvalidZoneinfoFile("The tzinfo file does not exist")
with open(file_path, "rb") as fd:
return self._parse(fd) | Read a zoneinfo structure from the given path.
:param file_path: The path of a zoneinfo file. |
def install_hooks():
if PY3:
return
install_aliases()
flog.debug('sys.meta_path was: {0}'.format(sys.meta_path))
flog.debug('Installing hooks ...')
newhook = RenameImport(RENAMES)
if not detect_hooks():
sys.meta_path.append(newhook)
flog.debug('sys.meta_path is now: {0}'.form... | This function installs the future.standard_library import hook into
sys.meta_path. |
def generate(self, output_dir, work, ngrams, labels, minus_ngrams):
template = self._get_template()
colours = generate_colours(len(ngrams))
for siglum in self._corpus.get_sigla(work):
ngram_data = zip(labels, ngrams)
content = self._generate_base(work, siglum)
... | Generates HTML reports for each witness to `work`, showing its text
with the n-grams in `ngrams` highlighted.
Any n-grams in `minus_ngrams` have any highlighting of them
(or subsets of them) removed.
:param output_dir: directory to write report to
:type output_dir: `str`
... |
def is_archlinux():
if platform.system().lower() == 'linux':
if platform.linux_distribution() == ('', '', ''):
if os.path.exists('/etc/arch-release'):
return True
return False | return True if the current distribution is running on debian like OS. |
def _less_or_close(a, value, **kwargs):
r
return (a < value) | np.isclose(a, value, **kwargs) | r"""Compare values for less or close to boolean masks.
Returns a boolean mask for values less than or equal to a target within a specified
absolute or relative tolerance (as in :func:`numpy.isclose`).
Parameters
----------
a : array-like
Array of values to be compared
value : float
... |
def get_parameter_definitions(self):
output = {}
for var_name, attrs in self.defined_variables().items():
var_type = attrs.get("type")
if isinstance(var_type, CFNType):
cfn_attrs = copy.deepcopy(attrs)
cfn_attrs["type"] = var_type.parameter_type
... | Get the parameter definitions to submit to CloudFormation.
Any variable definition whose `type` is an instance of `CFNType` will
be returned as a CloudFormation Parameter.
Returns:
dict: parameter definitions. Keys are parameter names, the values
are dicts containin... |
def create_worker_build(self, **kwargs):
missing = set()
for required in ('platform', 'release', 'arrangement_version'):
if not kwargs.get(required):
missing.add(required)
if missing:
raise ValueError("Worker build missing required parameters: %s" %
... | Create a worker build
Pass through method to create_prod_build with the following
modifications:
- platform param is required
- release param is required
- arrangement_version param is required, which is used to
select which worker_inner:n.json template... |
def calculate_md5(fileobject, size=2**16):
fileobject.seek(0)
md5 = hashlib.md5()
for data in iter(lambda: fileobject.read(size), b''):
if not data: break
if isinstance(data, six.text_type):
data = data.encode('utf-8')
md5.update(data)
fileobject.seek(0)
return md... | Utility function to calculate md5 hashes while being light on memory usage.
By reading the fileobject piece by piece, we are able to process content that
is larger than available memory |
def write(self, path):
with open(path, 'wb') as f:
f.write(self.getXML()) | Write RSS content to file. |
def namedb_get_all_namespace_ids( cur ):
query = "SELECT namespace_id FROM namespaces WHERE op = ?;"
args = (NAMESPACE_READY,)
namespace_rows = namedb_query_execute( cur, query, args )
ret = []
for namespace_row in namespace_rows:
ret.append( namespace_row['namespace_id'] )
return ret | Get a list of all READY namespace IDs. |
def equities_sids_for_country_code(self, country_code):
sids = self._compute_asset_lifetimes([country_code]).sid
return tuple(sids.tolist()) | Return all of the sids for a given country.
Parameters
----------
country_code : str
An ISO 3166 alpha-2 country code.
Returns
-------
tuple[int]
The sids whose exchanges are in this country. |
def _iter_indented_subactions(self, action):
try:
get_subactions = action._get_subactions
except AttributeError:
pass
else:
self._indent()
if isinstance(action, argparse._SubParsersAction):
for subaction in sorted(
... | Sort the subcommands alphabetically |
async def prepare_decrypter(client, cdn_client, cdn_redirect):
cdn_aes = AESModeCTR(
key=cdn_redirect.encryption_key,
iv=cdn_redirect.encryption_iv[:12] + bytes(4)
)
decrypter = CdnDecrypter(
cdn_client, cdn_redirect.file_token,
cdn_aes, cdn_redire... | Prepares a new CDN decrypter.
:param client: a TelegramClient connected to the main servers.
:param cdn_client: a new client connected to the CDN.
:param cdn_redirect: the redirect file object that caused this call.
:return: (CdnDecrypter, first chunk file data) |
def _get_cached_mounted_points():
result = []
try:
mounted_devices_key = winreg.OpenKey(
winreg.HKEY_LOCAL_MACHINE, "SYSTEM\\MountedDevices"
)
for v in _iter_vals(mounted_devices_key):
if "DosDevices" not in v[0]:
continue
volume_string... | ! Get the volumes present on the system
@return List of mount points and their associated target id
Ex. [{ 'mount_point': 'D:', 'target_id_usb_id': 'xxxx'}, ...] |
def devices(self):
'return generator of configured devices'
return self.fs is not None and [JFSDevice(d, self, parentpath=self.rootpath) for d in self.fs.devices.iterchildren()] or [x for x in []] | return generator of configured devices |
def normalize_index(index):
index = np.asarray(index)
if len(index) == 0:
return index.astype('int')
if index.dtype == 'bool':
index = index.nonzero()[0]
elif index.dtype == 'int':
pass
else:
raise ValueError('Index should be either integer or bool')
return index | normalize numpy index |
def user(self):
return self.users.get(self.contexts[self.current_context].get("user", ""), {}) | Returns the current user set by current context |
def add_ip_address(list_name, item_name):
payload = {"jsonrpc": "2.0",
"id": "ID0",
"method": "add_policy_ip_addresses",
"params": [list_name, {"item_name": item_name}]}
response = __proxy__['bluecoat_sslv.call'](payload, True)
return _validate_change_result(resp... | Add an IP address to an IP address list.
list_name(str): The name of the specific policy IP address list to append to.
item_name(str): The IP address to append to the list.
CLI Example:
.. code-block:: bash
salt '*' bluecoat_sslv.add_ip_address MyIPAddressList 10.0.0.0/24 |
def list_tag(self, limit=500, offset=0):
evt = self._client._request_entity_tag_list(self.__lid, limit=limit, offset=offset)
self._client._wait_and_except_if_failed(evt)
return evt.payload['tags'] | List `all` the tags for this Thing
Returns lists of tags, as below
#!python
[
"mytag1",
"mytag2"
"ein_name",
"nochein_name"
]
- OR...
Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.E... |
def report(self, name, ok, msg=None, deltat=20):
r = self.reports[name]
if time.time() < r.last_report + deltat:
r.ok = ok
return
r.last_report = time.time()
if ok and not r.ok:
self.say("%s OK" % name)
r.ok = ok
if not r.ok:
... | report a sensor error |
def drop_index(self, table, column):
self.execute('ALTER TABLE {0} DROP INDEX {1}'.format(wrap(table), column))
self._printer('\tDropped index from column {0}'.format(column)) | Drop an index from a table. |
def get_gene_id(gene_name):
from intermine.webservice import Service
service = Service('http://yeastmine.yeastgenome.org/yeastmine/service')
query = service.new_query('Gene')
query.add_view('primaryIdentifier', 'secondaryIdentifier', 'symbol',
'name', 'sgdAlias', 'crossReferences.iden... | Retrieve systematic yeast gene name from the common name.
:param gene_name: Common name for yeast gene (e.g. ADE2).
:type gene_name: str
:returns: Systematic name for yeast gene (e.g. YOR128C).
:rtype: str |
def _recode_for_categories(codes, old_categories, new_categories):
from pandas.core.algorithms import take_1d
if len(old_categories) == 0:
return codes.copy()
elif new_categories.equals(old_categories):
return codes.copy()
indexer = coerce_indexer_dtype(new_categories.get_indexer(old_cat... | Convert a set of codes for to a new set of categories
Parameters
----------
codes : array
old_categories, new_categories : Index
Returns
-------
new_codes : array
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = n... |
def from_record(cls, record, crs, schema=None):
properties = cls._to_properties(record, schema)
vector = GeoVector(shape(record['geometry']), crs)
if record.get('raster'):
assets = {k: dict(type=RASTER_TYPE, product='visual', **v) for k, v in record.get('raster').items()}
els... | Create GeoFeature from a record. |
def list_files(tag=None, sat_id=None, data_path=None, format_str=None):
index = pds.date_range(pysat.datetime(2017,12,1), pysat.datetime(2018,12,1))
names = [ data_path+date.strftime('%Y-%m-%d')+'.nofile' for date in index]
return pysat.Series(names, index=index) | Produce a fake list of files spanning a year |
def upload(self, sys_id, file_path, name=None, multipart=False):
if not isinstance(multipart, bool):
raise InvalidUsage('Multipart must be of type bool')
resource = self.resource
if name is None:
name = os.path.basename(file_path)
resource.parameters.add_custom({
... | Attaches a new file to the provided record
:param sys_id: the sys_id of the record to attach the file to
:param file_path: local absolute path of the file to upload
:param name: custom name for the uploaded file (instead of basename)
:param multipart: whether or not to use multipart
... |
def get_raw(self):
return [self.name, self.size, self.last_modified, self.location] | Get a list with information about the file.
The returned list contains name, size, last_modified and location. |
def unregister_dependent_on(self, tree):
if tree in self.dependent_on:
self.dependent_on.remove(tree) | unregistering tree that we are dependent on |
def _resolved_pid(self):
if not isinstance(self.pid, PersistentIdentifier):
return resolve_pid(self.pid)
return self.pid | Resolve self.pid if it is a fetched pid. |
def _fingerprint_dict_with_files(self, option_val):
return stable_option_fingerprint({
k: self._expand_possible_file_value(v) for k, v in option_val.items()
}) | Returns a fingerprint of the given dictionary containing file paths.
Any value which is a file path which exists on disk will be fingerprinted by that file's
contents rather than by its path.
This assumes the files are small enough to be read into memory.
NB: The keys of the dict are assumed to be st... |
def unregister(self, plugin=None, name=None):
if name is None:
assert plugin is not None, "one of name or plugin needs to be specified"
name = self.get_name(plugin)
if plugin is None:
plugin = self.get_plugin(name)
if self._name2plugin.get(name):
d... | unregister a plugin object and all its contained hook implementations
from internal data structures. |
def device_message(device,
code,
ts=None,
origin=None,
type=None,
severity=None,
title=None,
description=None,
hint=None,
**metaData):
if ts is N... | This quickly builds a time-stamped message. If `ts` is None, the
current time is used. |
def CreateAFF4Object(stat_response, client_id_urn, mutation_pool, token=None):
urn = stat_response.pathspec.AFF4Path(client_id_urn)
if stat.S_ISDIR(stat_response.st_mode):
ftype = standard.VFSDirectory
else:
ftype = aff4_grr.VFSFile
with aff4.FACTORY.Create(
urn, ftype, mode="w", mutation_pool=mut... | This creates a File or a Directory from a stat response. |
def addHeader(self, name, value, must_understand=False):
self.headers[name] = value
self.headers.set_required(name, must_understand) | Sets a persistent header to send with each request.
@param name: Header name. |
def get_facet_serializer_class(self):
if self.facet_serializer_class is None:
raise AttributeError(
"%(cls)s should either include a `facet_serializer_class` attribute, "
"or override %(cls)s.get_facet_serializer_class() method." %
{"cls": self.__class... | Return the class to use for serializing facets.
Defaults to using ``self.facet_serializer_class``. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.