docstring stringlengths 52 499 | function stringlengths 67 35.2k | __index_level_0__ int64 52.6k 1.16M |
|---|---|---|
Check if a given date is a holiday.
Args:
date (date, datetime or str): Date to be checked.
Returns:
bool: True if the date is a holiday, False otherwise. | def isholiday(self, date):
date = parsefun(date)
if self.holidays:
# i is the index of first holiday >= date
i = bisect.bisect_left(self.holidays, date)
if i == 0 and date < self.holidays[0]:
warn('Holiday list exhausted at start, ' \
... | 818,995 |
Adjust date to last day of the month, regardless of work days.
Args:
date (date, datetime or str): Date to be adjusted.
Returns:
datetime: Adjusted date. | def caleom(date):
date = parsefun(date)
date += datetime.timedelta(days=32-date.day)
date -= datetime.timedelta(days=date.day)
return date | 819,002 |
Generate business days between two dates, taking holidays into
consideration.
Args:
date1 (date, datetime or str): Date start of interval.
date2 (date, datetime or str): Date end of interval, not included.
Note:
All business days between date1 (inc) ... | def range(self, date1, date2):
date1 = self.adjust(parsefun(date1), FOLLOWING)
date2 = parsefun(date2)
holidays = []
holidx = 0
if len(self.holidays):
index1 = bisect.bisect_left(self.holidays, date1)
index2 = bisect.bisect_left(self.hol... | 819,003 |
Set command line arguments as a source
Parses the command line arguments described by the parameters.
Args:
name: the long name of the argument (foo)
short_name: the optional short name of the argument (f)
type: the optional type of the argument, defaults to bool
... | def argv(cls, name, short_name=None, type=None, help=None):
cls.__hierarchy.append(argv.Argv(name, short_name, type, help)) | 819,061 |
Generate an identifier for a callable signal receiver.
This is used when disconnecting receivers, where we need to correctly
establish equivalence between the input receiver and the receivers assigned
to a signal.
Args:
receiver: A callable object.
Returns:
An identifier for the r... | def __make_id(receiver):
if __is_bound_method(receiver):
return (id(receiver.__func__), id(receiver.__self__))
return id(receiver) | 819,305 |
Return all signal handlers that are currently still alive for the
input `signal`.
Args:
signal: A signal name.
Returns:
A list of callable receivers for the input signal. | def __live_receivers(signal):
with __lock:
__purge()
receivers = [funcref() for funcref in __receivers[signal]]
return receivers | 819,307 |
Return ``True`` if the `method` is a bound method (attached to an class
instance.
Args:
method: A method or function type object. | def __is_bound_method(method):
if not(hasattr(method, "__func__") and hasattr(method, "__self__")):
return False
# Bound methods have a __self__ attribute pointing to the owner instance
return six.get_method_self(method) is not None | 819,308 |
Register `receiver` method/function as a receiver for the `signal`.
When the signal is emitted, this receiver will be invoked along with
all other associated signals.
Args:
signal: A signal identifier (e.g., a signal name)
receiver: A callable object to connect to the signal. | def connect(signal, receiver):
__check_receiver(receiver)
if __is_bound_method(receiver):
ref = WeakMethod
else:
ref = weakref.ref
with __lock:
__purge()
__receivers[signal].append(ref(receiver)) | 819,309 |
Disconnect the receiver `func` from the signal, identified by
`signal_id`.
Args:
signal: The signal identifier.
receiver: The callable receiver to disconnect.
Returns:
True if the receiver was successfully disconnected. False otherwise. | def disconnect(signal, receiver):
inputkey = __make_id(receiver)
with __lock:
__purge()
receivers = __receivers.get(signal)
for idx in six.moves.range(len(receivers)):
connected = receivers[idx]()
if inputkey != __make_id(connected):
contin... | 819,310 |
Emit a signal by serially calling each registered signal receiver for
the `signal`.
Note:
The receiver must accept the *args and/or **kwargs that have been
passed to it. There expected parameters are not dictated by
mixbox.
Args:
signal: A signal identifier or name.... | def emit(signal, *args, **kwargs):
if signal not in __receivers:
return
receivers = __live_receivers(signal)
for func in receivers:
func(*args, **kwargs) | 819,311 |
Unset the TypedFields on the input `entity`.
Args:
entity: A mixbox.Entity object.
*types: A variable-length list of TypedField subclasses. If not
provided, defaults to TypedField. | def unset(entity, *types):
if not types:
types = (TypedField,)
fields = list(entity._fields.keys())
remove = (x for x in fields if isinstance(x, types))
for field in remove:
del entity._fields[field] | 819,937 |
Return True if the input TypedField `field` contains instance attributes
that match the input parameters.
Args:
field: A TypedField instance.
params: A dictionary of TypedField instance attribute-to-value mappings.
Returns:
True if the input TypedField matches the input parameters. | def _matches(field, params):
fieldattrs = six.iteritems(params)
return all(getattr(field, attr) == val for attr, val in fieldattrs) | 819,938 |
Iterate over the input class members and yield its TypedFields.
Args:
klass: A class (usually an Entity subclass).
Yields:
(class attribute name, TypedField instance) tuples. | def iterfields(klass):
is_field = lambda x: isinstance(x, TypedField)
for name, field in inspect.getmembers(klass, predicate=is_field):
yield name, field | 819,939 |
Return all TypedFields found on the input `Entity` that were initialized
with the input **kwargs.
Example:
>>> find(myentity, multiple=True, type_=Foo)
Note:
TypedFields.__init__() can accept a string or a class as a type_
argument, but this method expects a class.
Args:
... | def find(entity, **kwargs):
try:
typedfields = entity.typed_fields()
except AttributeError:
typedfields = iterfields(entity.__class__)
matching = [x for x in typedfields if _matches(x, kwargs)]
return matching | 819,940 |
Return the TypedField value for the input `instance` and `owner`.
If the TypedField is a "multiple" field and hasn't been set yet,
set the field to an empty list and return it.
Args:
instance: An instance of the `owner` class that this TypedField
belongs to..
... | def __get__(self, instance, owner=None):
if instance is None:
return self
elif self in instance._fields:
return instance._fields[self]
elif self.multiple:
return instance._fields.setdefault(self, self._listfunc())
else:
return None | 819,942 |
Get the namespace the given prefix maps to.
Args:
prefix (str): The prefix
Returns:
str: The namespace, or None if the prefix isn't mapped to
anything in this set. | def namespace_for_prefix(self, prefix):
try:
ni = self.__lookup_prefix(prefix)
except PrefixNotFoundError:
return None
else:
return ni.uri | 820,294 |
Return a subset of this NamespaceSet containing only data for the
given namespaces.
Args:
ns_uris (iterable): An iterable of namespace URIs which select the
namespaces for the subset.
Returns:
The subset
Raises:
NamespaceNotFoundErro... | def subset(self, ns_uris):
sub_ns = NamespaceSet()
for ns_uri in ns_uris:
ni = self.__lookup_uri(ns_uri)
new_ni = copy.deepcopy(ni)
# We should be able to reach into details of our own
# implementation on another obj, right?? This makes the sub... | 820,308 |
Return the version of the root element passed in.
Args:
root (etree.Element)
Returns:
distutils.StrictVersion
Raises:
UnknownVersionError | def _get_version(self, root):
# Note: STIX and MAEC use a "version" attribute. To support CybOX, a
# subclass will need to combine "cybox_major_version",
# "cybox_minor_version", and "cybox_update_version".
version = self.get_version(root)
if version:
return ... | 820,315 |
Ensure the root element is a supported version.
Args:
root (etree.Element)
Raises:
UnsupportedVersionError | def _check_version(self, root):
version = self._get_version(root)
supported = [StrictVersion(x) for x in
self.supported_versions(root.tag)]
if version in supported:
return
error = "Document version ({0}) not in supported versions ({1})"
... | 820,316 |
Check that the XML element tree has a supported root element.
Args:
root (etree.Element)
Raises:
UnsupportedRootElementError | def _check_root_tag(self, root):
supported = self.supported_tags()
if root.tag in supported:
return
error = "Document root element ({0}) not one of ({1})"
raise UnsupportedRootElementError(
message=error.format(root.tag, supported),
expected=... | 820,317 |
Attempts to parse `value` into an instance of ``datetime.datetime``. If
`value` is ``None``, this function will return ``None``.
Args:
value: A timestamp. This can be a string or datetime.datetime value. | def parse_datetime(value):
if not value:
return None
elif isinstance(value, datetime.datetime):
return value
return dateutil.parser.parse(value) | 820,347 |
Attempts to parse `value` into an instance of ``datetime.date``. If
`value` is ``None``, this function will return ``None``.
Args:
value: A timestamp. This can be a string, datetime.date, or
datetime.datetime value. | def parse_date(value):
if not value:
return None
if isinstance(value, datetime.date):
return value
return parse_datetime(value).date() | 820,348 |
Attempts to convert `value` into an ``xs:date`` string. If `value` is
``None``, ``None`` will be returned.
Args:
value: A date value. This can be a string, datetime.date, or
datetime.datetime object.
Returns:
An ``xs:date`` formatted timestamp string. | def serialize_date(value):
if not value:
return None
elif isinstance(value, datetime.datetime):
return value.date().isoformat()
elif isinstance(value, datetime.date):
return value.isoformat()
else:
return parse_date(value).isoformat() | 820,349 |
Create an instance of the class associated with the `key` (xsi:type)
and initialize it with the *args and **kwargs.
Args:
key: A class lookup key (see entity_class()).
Returns:
An instance of the class associated with the `key`. | def instance(cls, key, *args, **kwargs):
klass = cls.entity_class(key)
return klass(*args, **kwargs) | 820,513 |
Parse the dictionary and return an Entity instance.
This will attempt to extract type information from the input
dictionary and pass it to entity_class to resolve the correct class
for the type.
Args:
cls_dict: A dictionary representation of an Entity object.
fa... | def from_dict(cls, cls_dict, fallback_xsi_type=None):
if not cls_dict:
return None
if isinstance(cls_dict, six.string_types):
if not getattr(cls, "_convert_strings", False):
return cls_dict
try:
typekey = cls.dictkey(cls_dict... | 820,514 |
Parse the generateDS object and return an Entity instance.
This will attempt to extract type information from the input
object and pass it to entity_class to resolve the correct class
for the type.
Args:
cls_obj: A generateDS object.
Returns:
An Entity ... | def from_obj(cls, cls_obj):
if not cls_obj:
return None
typekey = cls.objkey(cls_obj)
klass = cls.entity_class(typekey)
return klass.from_obj(cls_obj) | 820,515 |
Import the class referred to by the fully qualified class path.
Args:
classpath: A full "foo.bar.MyClass" path to a class definition.
Returns:
The class referred to by the classpath.
Raises:
ImportError: If an error occurs while importing the module.
AttributeError: IF the... | def import_class(classpath):
modname, classname = classpath.rsplit(".", 1)
module = importlib.import_module(modname)
klass = getattr(module, classname)
return klass | 820,597 |
Attempt to return a Python class for the input class reference.
If `classref` is a class or None, return it. If `classref` is a
python classpath (e.g., "foo.bar.MyClass") import the class and return
it.
Args:
classref: A fully-qualified Python path to class, or a Python class.
Returns:
... | def resolve_class(classref):
if classref is None:
return None
elif isinstance(classref, six.class_types):
return classref
elif isinstance(classref, six.string_types):
return import_class(classref)
else:
raise ValueError("Unable to resolve class for '%s'" % classref) | 820,598 |
Function decorator which checks that the decorated function is called
with a set of required kwargs.
Args:
*argnames: String keyword argument names.
Raises:
ValueError: If a required kwarg is missing in the decorated function
call. | def needkwargs(*argnames):
required = set(argnames)
def decorator(func):
def inner(*args, **kwargs):
missing = required - set(kwargs)
if missing:
err = "%s kwargs are missing." % list(missing)
raise ValueError(err)
return func(*ar... | 820,599 |
Removes all CDATA blocks from `text` if it contains them.
Note:
If the function contains escaped XML characters outside of a
CDATA block, they will be unescaped.
Args:
A string containing one or more CDATA blocks.
Returns:
An XML unescaped string with CDATA block qualifier... | def strip_cdata(text):
if not is_cdata(text):
return text
xml = "<e>{0}</e>".format(text)
node = etree.fromstring(xml)
return node.text | 820,706 |
Wraps the input `text` in a ``<![CDATA[ ]]>`` block.
If the text contains CDATA sections already, they are stripped and replaced
by the application of an outer-most CDATA block.
Args:
text: A string to wrap in a CDATA block.
Returns:
The `text` value wrapped in ``<![CDATA[]]>`` | def cdata(text):
if not text:
return text
if is_cdata(text):
text = strip_cdata(text)
escaped = "{0}{1}{2}".format(CDATA_START, text, CDATA_END)
return escaped | 820,707 |
Return True if the input value is valid for insertion into the
inner list.
Args:
value: An object about to be inserted. | def _is_valid(self, value):
# Entities have an istypeof method that can perform more sophisticated
# type checking.
if hasattr(self._type, "istypeof"):
return self._type.istypeof(value)
else:
return isinstance(value, self._type) | 820,709 |
Attempt to set the value at position `key` to the `value`.
If a value is not the correct type, an attempt will be made to
convert it to the correct type.
Args:
key: An index.
value: A value to set. | def __setitem__(self, key, value):
if not self._is_valid(value):
value = self._fix_value(value)
self._inner.__setitem__(key, value) | 820,711 |
Get file from WeedFS.
Returns file content. May be problematic for large files as content is
stored in memory.
Args:
**fid**: File identifier <volume_id>,<file_name_hash>
Returns:
Content of the file with provided fid or None if file doesn't
exist o... | def get_file(self, fid):
url = self.get_file_url(fid)
return self.conn.get_raw_data(url) | 823,433 |
Gets size of uploaded file
Or None if file doesn't exist.
Args:
**fid**: File identifier <volume_id>,<file_name_hash>
Returns:
Int or None | def get_file_size(self, fid):
url = self.get_file_url(fid)
res = self.conn.head(url)
if res is not None:
size = res.headers.get("content-length", None)
if size is not None:
return int(size)
return None | 823,436 |
Checks if file with provided fid exists
Args:
**fid**: File identifier <volume_id>,<file_name_hash>
Returns:
True if file exists. False if not. | def file_exists(self, fid):
res = self.get_file_size(fid)
if res is not None:
return True
return False | 823,437 |
Prepare headers for http communication.
Return dict of header to be used in requests.
Args:
.. versionadded:: 0.3.2
**additional_headers**: (optional) Additional headers
to be used with request
Returns:
Headers dict. Key and values are s... | def _prepare_headers(self, additional_headers=None, **kwargs):
user_agent = "pyseaweed/{version}".format(version=__version__)
headers = {"User-Agent": user_agent}
if additional_headers is not None:
headers.update(additional_headers)
return headers | 823,572 |
Gets data from url as text
Returns content under the provided url as text
Args:
**url**: address of the wanted data
.. versionadded:: 0.3.2
**additional_headers**: (optional) Additional headers
to be used with request
Returns:
... | def get_data(self, url, *args, **kwargs):
res = self._conn.get(url, headers=self._prepare_headers(**kwargs))
if res.status_code == 200:
return res.text
else:
return None | 823,574 |
Gets data from url as bytes
Returns content under the provided url as bytes
ie. for binary data
Args:
**url**: address of the wanted data
.. versionadded:: 0.3.2
**additional_headers**: (optional) Additional headers
to be used with reque... | def get_raw_data(self, url, *args, **kwargs):
res = self._conn.get(url, headers=self._prepare_headers(**kwargs))
if res.status_code == 200:
return res.content
else:
return None | 823,575 |
Uploads file to provided url.
Returns contents as text
Args:
**url**: address where to upload file
**filename**: Name of the uploaded file
**file_stream**: file like object to upload
.. versionadded:: 0.3.2
**additional_headers**: (opt... | def post_file(self, url, filename, file_stream, *args, **kwargs):
res = self._conn.post(url, files={filename: file_stream},
headers=self._prepare_headers(**kwargs))
if res.status_code == 200 or res.status_code == 201:
return res.text
else:
... | 823,576 |
Deletes data under provided url
Returns status as boolean.
Args:
**url**: address of file to be deleted
.. versionadded:: 0.3.2
**additional_headers**: (optional) Additional headers
to be used with request
Returns:
Boolean. ... | def delete_data(self, url, *args, **kwargs):
res = self._conn.delete(url, headers=self._prepare_headers(**kwargs))
if res.status_code == 200 or res.status_code == 202:
return True
else:
return False | 823,577 |
Custom compare function. Returns ``1`` if the first ``Piper`` instance
is upstream of the second ``Piper`` instance, ``-1`` if the first
``Piper`` is downstream of the second ``Piper`` and ``0`` if the two
``Pipers`` are independent.
Arguments:
- piper1(``P... | def children_after_parents(self, piper1, piper2):
if piper1 in self[piper2].deep_nodes():
return 1
elif piper2 in self[piper1].deep_nodes():
return - 1
else:
return 0 | 824,136 |
Removes a ``Piper`` from the ``Dagger`` instance.
Arguments:
- piper(``Piper`` or id(``Piper``)) ``Piper`` instance or ``Piper``
instance id.
- forced(bool) [default: ``False``] If "forced" is ``True``, will not
raise a ``DaggerError`` if the ``Piper`` ha... | def del_piper(self, piper, forced=False):
self.log.debug('%s trying to delete piper %s' % \
(repr(self), repr(piper)))
try:
piper = self.resolve(piper, forgive=False)
except DaggerError:
self.log.error('%s cannot resolve piper from %s' % \
... | 824,146 |
Adds a sequence of ``Pipers`` instances to the ``Dagger`` in the
specified order. Takes optional arguments for ``Dagger.add_piper``.
Arguments:
- pipers(sequence of valid ``add_piper`` arguments) Sequence of
``Pipers`` or valid ``Dagger.add_piper`` arguments to ... | def add_pipers(self, pipers, *args, **kwargs):
for piper in pipers:
self.add_piper(piper, *args, **kwargs) | 824,149 |
Deletes a sequence of ``Pipers`` instances from the ``Dagger`` in the
reverse of the specified order. Takes optional arguments for
``Dagger.del_piper``.
Arguments:
- pipers (sequence of valid ``del_piper`` arguments) Sequence of
``Pipers`` or valid ``Dag... | def del_pipers(self, pipers, *args, **kwargs):
pipers.reverse()
for piper in pipers:
self.del_piper(piper, *args, **kwargs) | 824,150 |
Adds a sequence of pipes to the ``Dagger`` in the specified order.
Takes optional arguments for ``Dagger.add_pipe``.
Arguments:
- pipes(sequence of valid ``add_pipe`` arguments) Sequence of pipes
or other valid ``Dagger.add_pipe`` arguments to be added to the
... | def add_pipes(self, pipes, *args, **kwargs):
for pipe in pipes:
self.add_pipe(pipe, *args, **kwargs) | 824,151 |
Deletes a sequence of pipes from the ``Dagger`` in the specified order.
Takes optional arguments for ``Dagger.del_pipe``.
Arguments:
- pipes(sequence of valid ``del_pipe`` arguments) Sequence of pipes or
other valid ``Dagger.del_pipe`` arguments to be removed fr... | def del_pipes(self, pipes, *args, **kwargs):
for pipe in pipes:
self.del_pipe(pipe * args, **kwargs) | 824,152 |
Saves pipeline as a Python source code file.
Arguments:
- filename(``path``) Path to save the pipeline source code. | def save(self, filename):
handle = open(filename, 'wb')
handle.write(P_LAY % self._code())
handle.close() | 824,157 |
Instanciates (loads) pipeline from a source code file.
Arguments:
- filename(``path``) location of the pipeline source code. | def load(self, filename):
dir_name = os.path.dirname(filename)
mod_name = os.path.basename(filename).split('.')[0]
self.filename = mod_name
sys.path.insert(0, dir_name)
mod = __import__(mod_name)
sys.path.remove(dir_name) # do not pollute the path.
pipers... | 824,158 |
Waits (blocks) until a running pipeline finishes.
Arguments:
- timeout(``int``) [default: ``None``] Specifies the timeout,
``RuntimeError`` will be raised. The default is to wait indefinetely
for the pipeline to finish. | def wait(self, timeout=None):
if self._started.isSet() and \
self._running.isSet() and \
not self._pausing.isSet():
self._finished.wait(timeout)
else:
raise PlumberError | 824,161 |
Connects the ``Piper`` instance to its upstream ``Pipers`` that should
be given as a sequence. This connects this ``Piper.inbox`` with the
upstream ``Piper.outbox`` respecting any "consume", "spawn" and
"produce" arguments.
Arguments:
- inbox(sequence) seque... | def connect(self, inbox):
if self.started:
self.log.error('Piper %s is started and cannot connect to %s.' % \
(self, inbox))
raise PiperError('Piper %s is started and cannot connect to %s.' % \
(self, inbox))
elif s... | 824,166 |
Disconnects the ``Piper`` instance from its upstream ``Pipers`` or
input data if the ``Piper`` is the input node of a pipeline.
Arguments:
- forced(``bool``) [default: ``False``] If ``True`` the ``Piper`` will
try to forcefully remove all tasks (including the spa... | def disconnect(self, forced=False):
if not self.connected:
self.log.error('Piper %s is not connected and cannot be disconnected' % self)
raise PiperError('Piper %s is not connected and cannot be disconnected' % self)
elif self.started:
self.log.error('Piper %... | 824,168 |
Removes a previously added **task** from the ``NuMap`` instance.
Arguments:
- number (``int`` or ``True``) A positive integer specifying the
number of **tasks** to pop. If number is set ``True`` all **tasks**
will be popped. | def pop_task(self, number):
if not self._started.isSet():
if number is True:
self._tasks = []
self._tasks_tracked = {}
elif number > 0:
last_task_id = len(self._tasks) - 1
for i in xrange(number):
... | 824,315 |
Write language-specific script template to file.
Arguments:
- fn(``string``) path to save the template to
- lang('python', 'bash') which programming language | def write_template(fn, lang="python"):
with open(fn, "wb") as fh:
if lang == "python":
fh.write(PY_TEMPLATE)
elif lang == "bash":
fh.write(SH_TEMPLATE) | 824,354 |
Execute arbitrary scripts.
Arguments:
- cfg(``dict``) script configuartion dictionary | def script(inbox, cfg):
script_name = cfg["id"]
script_id = str(abs(hash((cfg["id"],) + tuple(inbox[0].values()))))[0:8]
# LOG.log(mp.DEFAULT, "@papy;script %s:%s started" % (script_name, script_id))
# LOG.log(mp.SUBDEFAULT, "@papy;%s:%s received: %s" % (script_name, script_id, inbox))
args = {... | 824,355 |
G square test for a binary data.
Args:
dm: the data matrix to be used (as a numpy.ndarray).
x: the first node (as an integer).
y: the second node (as an integer).
s: the set of neibouring nodes of x and y (as a set()).
Returns:
p_val: the p-value of conditional independ... | def g_square_bin(dm, x, y, s):
def _calculate_tlog(x, y, s, dof, dm):
nijk = np.zeros((2, 2, dof))
s_size = len(s)
z = []
for z_index in range(s_size):
z.append(s.pop())
pass
for row_index in range(0, dm.shape[0]):
i = dm[row_index, x... | 824,363 |
Removes a **node object** from the ``DictGraph``. Returns ``True`` if a
**node object** has been removed. If the **node object** is not in the
``DictGraph`` raises a ``KeyError``.
Arguments:
- node(``object``) **node object** to be removed. Any hashable Python
... | def del_node(self, node):
for node_ in self.values():
if node in node_:
node_.pop(node)
return bool(self.pop(node)) | 824,723 |
Adds an edge to the ``DictGraph``. An edge is just a pair of **node
objects**. If the **node objects** are not in the graph they are
created.
Arguments:
- edge(iterable) An ordered pair of **node objects**. The edge is
assumed to have a direction from t... | def add_edge(self, edge, double=False):
(left_entity, right_entity) = edge
self.add_node(left_entity)
self.add_node(right_entity)
self[left_entity].update({right_entity:self[right_entity]})
if double:
self.add_edge((edge[1], edge[0])) | 824,724 |
Removes an edge from the ``DictGraph``. An edge is a pair of **node
objects**. The **node objects** are not removed from the ``DictGraph``.
Arguments:
- edge(``tuple``) An ordered pair of **node objects**. The edge is
assumed to have a direction from the first t... | def del_edge(self, edge, double=False):
(left_entity, right_entity) = edge
self[left_entity].pop(right_entity)
if double:
self.del_edge((edge[1], edge[0])) | 824,725 |
Adds **node objects** to the graph.
Arguments:
- nodes(iterable) Sequence of **node objects** to be added to the
``DictGraph``
- xtras(iterable) [default: ``None``] Sequence of ``Node.xtra``
dictionaries corresponding to the **node... | def add_nodes(self, nodes, xtras=None):
for node, xtra in izip(nodes, (xtras or repeat(None))):
self.add_node(node, xtra) | 824,726 |
Adds edges to the graph. Takes optional arguments for
``DictGraph.add_edge``.
Arguments:
- edges(iterable) Sequence of edges to be added to the
``DictGraph``. | def add_edges(self, edges, *args, **kwargs):
for edge in edges:
self.add_edge(edge, *args, **kwargs) | 824,727 |
Removes edges from the graph. Takes optional arguments for
``DictGraph.del_edge``.
Arguments:
- edges(iterable) Sequence of edges to be removed from the
``DictGraph``. | def del_edges(self, edges, *args, **kwargs):
for edge in edges:
self.del_edge(edge, *args, **kwargs) | 824,728 |
Returns a ``tuple`` of all edges in the ``DictGraph`` an edge is a pair
of **node objects**.
Arguments:
- nodes(iterable) [default: ``None``] iterable of **node objects** if
specified the edges will be limited to those outgoing from one of
the specif... | def edges(self, nodes=None):
# If a Node has been directly updated (__not__ recommended)
# then the Graph will not know the added nodes and therefore will
# miss half of their edges.
edges = set()
for node in (nodes or self.iterkeys()):
ends = self[node].node... | 824,729 |
Returns a ``tuple`` of incoming edges for a **node object**.
Arguments:
- node(``object``) **node object** present in the graph to be queried
for incoming edges. | def incoming_edges(self, node):
edges = self.edges()
in_edges = []
for out_node, in_node in edges:
if node is in_node:
in_edges.append((out_node, in_node))
return tuple(in_edges) | 824,730 |
Returns a ``tuple`` of outgoing edges for a **node object**.
Arguments:
- node(``object``) **node object** present in the graph to be queried
for outgoing edges. | def outgoing_edges(self, node):
#TODO: pls make outgoig_edges less insane
edges = self.edges()
out_edges = []
for out_node, in_node in edges:
if node is out_node:
out_edges.append((out_node, in_node))
return tuple(out_edges) | 824,731 |
Passes inputs with indecies in s. By default passes the whole inbox.
Arguments:
- s(sequence) [default: ``None``] The default translates to a range for
all inputs of the "inbox" i.e. ``range(len(inbox))`` | def spasser(inbox, s=None):
seq = (s or range(len(inbox)))
return [input_ for i, input_ in enumerate(inbox) if i in seq] | 824,782 |
Zips inputs from inbox with indicies in "s". By default zips the whole
inbox (all indices).
Arguments:
- s(sequence) [default: ``None``] | def szipper(inbox, s=None):
#TODO: test szipper
return zip(*[input_ for i, input_ in enumerate(inbox) if i in s]) | 824,783 |
String joins input with indices in s.
Arguments:
- s(sequence) [default: ``None``] ``tuple`` or ``list`` of indices of the
elements which will be joined.
- join(``str``) [default: ``""``] String which will join the elements of
the inbox i.e. ``join.join()``. | def sjoiner(inbox, s=None, join=""):
return join.join([input_ for i, input_ in enumerate(inbox) if i in s]) | 824,784 |
Creates a string generator from a stream (file handle) containing data
delimited by the delimiter strings. This is a stand-alone function and
should be used to feed external data into a pipeline.
Arguments:
- hande(``file``) A file handle open for reading.
- delimiter(``str``) [default: ``No... | def load_stream(handle, delimiter=None):
delimiter = (delimiter or "") + "\n"
while True:
item = []
while True:
line = handle.readline()
if line == "":
raise StopIteration
elif line == delimiter:
if item:
... | 824,786 |
Creates a line generator from a stream (file handle) containing data in
lines.
Arguments:
- follow(``bool``) [default: ``False``] If ``True`` follows the file after
it finishes like 'tail -f'.
- wait(``float``) [default: ``0.1``] time to wait in seconds between file
polls. | def make_lines(handle, follow=False, wait=0.1):
while True:
line = handle.readline()
if line:
yield line
elif follow:
time.sleep(wait)
else:
raise StopIteration | 824,789 |
Read links and associated categories for specified articles
in text file seperated by a space
Args:
corpus_dir (str): The directory to save the generated corpus
datastore_type (Optional[str]): Format to save generated corpus.
Specify ... | def __init__(self,corpus_dir,datastore_type='file',db_name='corpus.db'):
self.g = Goose({'browser_user_agent': 'Mozilla','parser_class':'soup'})
#self.g = Goose({'browser_user_agent': 'Mozilla'})
self.corpus_dir = corpus_dir
self.datastore_type = datastore_type
self.db_... | 824,886 |
Read links and associated categories for specified articles
in text file seperated by a space
Args:
file_path (str): The path to text file with news article links
and category
Returns:
articles: Array of tuples that contains article link & ... | def read_links_file(self,file_path):
articles = []
with open(file_path) as f:
for line in f:
line = line.strip()
#Ignore blank lines
if len(line) != 0:
link,category = line.split(' ')
articles.a... | 824,888 |
Setup integration
Register plug-ins and integrate into the host
Arguments:
console (bool): DEPRECATED
port (int, optional): DEPRECATED | def setup(console=False, port=None):
if self._has_been_setup:
teardown()
register_plugins()
register_host()
self._has_been_setup = True
print("pyblish: Pyblish loaded successfully.") | 825,969 |
Create new sketch
Params:
<int> width
<str> path
<int> flags
<int> seed | def create(self, width=0, depth=0, path=None, flags=0, seed=0):
return self.create_method(self, width, depth, path, flags, seed) | 825,990 |
Add key-value
Params:
<str> key
<int> key_length
Return:
<int> key_value | def get(self, key, key_length=0):
if key_length < 1:
key_length = len(key)
return self.get_method(self, key, key_length) | 825,992 |
Set value to key-value
Params:
<str> key
<int> value
<int> key_length
Return:
<int> key_value | def set(self, key, value, key_length=0):
if key_length < 1:
key_length = len(key)
if self.k:
self._update(key, value)
return self.set_method(self, key, key_length, value) | 825,993 |
Add value to key-value
Params:
<str> key
<int> value
<int> key_length
Return:
<int> key_value | def add(self, key, value, key_length=0):
if key_length < 1:
key_length = len(key)
val = self.add_method(self, key, key_length, value)
if self.k:
self._update(key, value)
return val | 825,994 |
Add value to key-value
Params:
<str> key
<int> value
<int> key_length
Return:
<int> key_value | def inc(self, key, key_length=0):
if key_length < 1:
key_length = len(key)
val = self.add_method(self, key, key_length, 1)
if self.k:
self._update(key, val)
return val | 825,995 |
Shrink sketch
Params:
<Sketch> src_sketch
<int> width
<str> path
<int> flags | def shrink(self, src, width=0, path=None, flags=0):
self.shrink_method(self, src, width, path, flags) | 825,997 |
Merge two sketches
Params:
<Sketch> sketch
<lambda> | <function> lhs_filter
<lambda> | <function> rhs_filter | def merge(self, rhs, lhs_filter=None, rhs_filter=None):
if lhs_filter or rhs_filter:
get_ = self.get___method
set_ = self.set___method
max_value = _madoka.Sketch_max_value(self)
for table_id in range(self.depth):
for cell_id in range(self.... | 825,998 |
Set values from dict
Params:
<dict <str> <int>> src_dict | def fromdict(self, src_dict, method='set'):
if method == 'set':
_method = self.set_method
else:
_method = self.add_method
if hasattr(src_dict, 'iteritems'):
for (key, val) in src_dict.iteritems():
_method(self, key, len(key), val)
... | 826,001 |
Create new sketch
Params:
<int> width
<int> max_value
<str> path
<int> flags
<int> seed | def create(self, width=0, max_value=0, path=None, flags=0, seed=0):
return _madoka.Sketch_create(self, width, max_value, path, flags, seed) | 826,005 |
Increment key-value
Params:
<str> key
<int> key_length
Return:
<int> key_value | def inc(self, key, key_length=0):
if key_length < 1:
key_length = len(key)
return _madoka.Sketch_inc(self, key, key_length) | 826,006 |
Shrink sketch
Params:
<Sketch> src_sketch
<int> width
<int> max_value
<lambda> | <function> filter
<str> path
<int> flags | def shrink(self, src, width=0, max_value=0, filter_method=None,
path=None, flags=0):
if filter_method:
get_ = _madoka.Sketch_get__
set_ = _madoka.Sketch_set__
new_sketch = Sketch(width, max_value, path, flags, src.seed)
for table_id in rang... | 826,007 |
The initializer sets up stuff to do the work
Args:
dict of args
Returns:
kwarg[Profile]: asdasdf
Raises:
SystemError if thing are not all good | def __init__(self, **kwargs):
try:
self.nap_time = int(os.environ.get('CSU_POLL_INTERVAL', 30))
except Exception:
self.nap_time = 15
self._stack_name = kwargs.get('Stack')
self._verbose = kwargs.get('Verbose', False)
if not self._stack_name:
... | 826,065 |
The utililty requires boto3 clients to CloudFormation.
Args:
None
Returns:
Good or Bad; True or False | def _init_boto3_clients(self, profile, region):
try:
session = None
if profile and region:
session = boto3.session.Session(profile_name=profile, region_name=region)
elif profile:
session = boto3.session.Session(profile_name=profile)
... | 826,066 |
Determine the drift of the stack.
Args:
None
Returns:
Good or Bad; True or False | def determine_drift(self):
try:
response = self._cloud_formation.detect_stack_drift(StackName=self._stack_name)
drift_request_id = response.get('StackDriftDetectionId', None)
if drift_request_id:
logging.info('drift_request_id: %s - polling', drift_re... | 826,067 |
Report the drift of the stack.
Args:
None
Returns:
Good or Bad; True or False
Note: not yet implemented | def _print_drift_report(self):
try:
response = self._cloud_formation.describe_stack_resources(StackName=self._stack_name)
rows = []
for resource in response.get('StackResources', []):
row = []
row.append(resource.get('LogicalResourceId... | 826,068 |
Parse a file-like object or string.
Args:
file_or_string (file, str): File-like object or string.
Returns:
ParseResults: instance of pyparsing parse results. | def parse(file_or_string):
from mysqlparse.grammar.sql_file import sql_file_syntax
if hasattr(file_or_string, 'read') and hasattr(file_or_string.read, '__call__'):
return sql_file_syntax.parseString(file_or_string.read())
elif isinstance(file_or_string, six.string_types):
return sql_fi... | 826,073 |
Return True if OS name in /etc/lsb-release of host given by fabric param
`-H` is the same as given by argument, False else.
If arg version_id is not None only return True if it is the same as in
/etc/lsb-release, too.
Args:
name: 'Debian GNU/Linux', 'Ubuntu'
version_id(None or str): No... | def is_os(name, version_id=None):
result = False
os_release_infos = _fetch_os_release_infos()
if name == os_release_infos.get('name', None):
if version_id is None:
result = True
elif version_id == os_release_infos.get('version_id', None):
result = True
ret... | 826,209 |
Determine latest stable python versions and return them as a list of str.
Args:
minors([<str>,..]): List of python minor versions as str, eg.
['2.6', '2.7', '3.3', '3.4', '3.5', '3.6']
Return example:
['2.6.9', '2.7.14', '3.3.7', '3.4.8', '3.5.5', '3.6.4'] | def determine_latest_pythons(minors):
# eg: ['2.6.9', '2.7.14', '3.3.7', '3.4.8', '3.5.5', '3.6.4']
latests = []
versions_str = fabric.api.local(flo(
'pyenv install --list | tr -d [:blank:] | '
'grep -P "^[\d\.]+$"'), capture=True)
versions = versions_str.split()
for minor in ... | 826,212 |
Install or update Janus, a distribution of addons and mappings for vim.
More info:
https://github.com/carlhuda/janus
Customization: https://github.com/carlhuda/janus/wiki/Customization
Args:
uninstall: If not None, Uninstall janus and restore old vim config | def vim_janus(uninstall=None):
if uninstall is not None:
uninstall_janus()
else:
if not exists('~/.vim/janus'):
print_msg('not installed => install')
install_janus()
else:
print_msg('already installed => update')
update_janus()
... | 826,214 |
Helper function to facilitate upsert.
Args:
ini_date - the dictionary of info to run upsert
Exit:
0 - good
1 - bad | def start_upsert(ini_data):
stack_driver = CloudStackUtility(ini_data)
poll_stack = not ini_data.get('no_poll', False)
if stack_driver.upsert():
logging.info('stack create/update was started successfully.')
if poll_stack:
stack_tool = None
try:
p... | 826,227 |
Read the INI file
Args:
ini_file - path to the file
Returns:
A dictionary of stuff from the INI file
Exits:
1 - if problems are encountered | def read_config_info(ini_file):
try:
config = RawConfigParser()
config.optionxform = lambda option: option
config.read(ini_file)
the_stuff = {}
for section in config.sections():
the_stuff[section] = {}
for option in config.options(section):
... | 826,228 |
StackTool is a simple tool to print some specific data about a
CloudFormation stack.
Args:
stack_name - name of the stack of interest
region - AWS region where the stack was created
Returns:
not a damn thing
Raises:
SystemError - if every... | def __init__(self, stack_name, region, cf_client):
try:
self._stack_name = stack_name
self._region = region
self._cf_client = cf_client
except Exception:
raise SystemError | 826,230 |
List resources from the given stack
Args:
None
Returns:
A dictionary filled resources or None if things went sideways | def print_stack_info(self):
try:
rest_api_id = None
deployment_found = False
response = self._cf_client.describe_stack_resources(
StackName=self._stack_name
)
print('\nThe following resources were created:')
rows ... | 826,231 |
List events from the given stack
Args:
None
Returns:
None | def print_stack_events(self):
first_token = '7be7981bd6287dd8112305e8f3822a6f'
keep_going = True
next_token = first_token
current_request_token = None
rows = []
try:
while keep_going and next_token:
if next_token == first_token:
... | 826,232 |
write data to cache file
parameters:
cache_path - path to cache file
content - a data structure to save into cache file | def write(content, filename='cache'):
cache_path = get_cache_path(filename)
with open(cache_path, 'w') as file:
if content is not None:
json.dump(content, file, indent=3, sort_keys=True) | 826,530 |
Recursively iterate through 'package_module' and add every fabric task
to the 'addon_module' keeping the task hierarchy.
Args:
addon_module(types.ModuleType)
package_module(types.ModuleType)
package_name(str): Required, to avoid redundant addition of tasks
Return: None | def add_tasks_r(addon_module, package_module, package_name):
module_dict = package_module.__dict__
for attr_name, attr_val in module_dict.items():
if isinstance(attr_val, fabric.tasks.WrappedCallableTask):
addon_module.__dict__[attr_name] = attr_val
elif attr_name != package_n... | 826,546 |
Load an fabsetup addon given by 'package_name' and hook it in the
base task namespace 'username'.
Args:
username(str)
package_name(str)
_globals(dict): the globals() namespace of the fabric script.
Return: None | def load_addon(username, package_name, _globals):
addon_module = get_or_create_module_r(username)
package_module = __import__(package_name)
add_tasks_r(addon_module, package_module, package_name)
_globals.update({username: addon_module})
del package_module
del addon_module | 826,547 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.