code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def getContext(self, context_name = 'default'):
"""Get a context by name, create the default context if it does not exist
Params:
context_name (string):
Context name
Raises:
KeyError:
If the context name does not exist
Returns:
bubbler.Bubbler:
Named context
"""
if context_name == 'default' and 'default' not in self.contexts:
self('default')
return self.contexts[context_name] | Get a context by name, create the default context if it does not exist
Params:
context_name (string):
Context name
Raises:
KeyError:
If the context name does not exist
Returns:
bubbler.Bubbler:
Named context | Below is the the instruction that describes the task:
### Input:
Get a context by name, create the default context if it does not exist
Params:
context_name (string):
Context name
Raises:
KeyError:
If the context name does not exist
Returns:
bubbler.Bubbler:
Named context
### Response:
def getContext(self, context_name = 'default'):
"""Get a context by name, create the default context if it does not exist
Params:
context_name (string):
Context name
Raises:
KeyError:
If the context name does not exist
Returns:
bubbler.Bubbler:
Named context
"""
if context_name == 'default' and 'default' not in self.contexts:
self('default')
return self.contexts[context_name] |
def resume_service(name):
"""
Resume the service given by name.
@warn: This method requires UAC elevation in Windows Vista and above.
@note: Not all services support this.
@see: L{get_services}, L{get_active_services},
L{start_service}, L{stop_service}, L{pause_service}
"""
with win32.OpenSCManager(
dwDesiredAccess = win32.SC_MANAGER_CONNECT
) as hSCManager:
with win32.OpenService(hSCManager, name,
dwDesiredAccess = win32.SERVICE_PAUSE_CONTINUE
) as hService:
win32.ControlService(hService, win32.SERVICE_CONTROL_CONTINUE) | Resume the service given by name.
@warn: This method requires UAC elevation in Windows Vista and above.
@note: Not all services support this.
@see: L{get_services}, L{get_active_services},
L{start_service}, L{stop_service}, L{pause_service} | Below is the the instruction that describes the task:
### Input:
Resume the service given by name.
@warn: This method requires UAC elevation in Windows Vista and above.
@note: Not all services support this.
@see: L{get_services}, L{get_active_services},
L{start_service}, L{stop_service}, L{pause_service}
### Response:
def resume_service(name):
"""
Resume the service given by name.
@warn: This method requires UAC elevation in Windows Vista and above.
@note: Not all services support this.
@see: L{get_services}, L{get_active_services},
L{start_service}, L{stop_service}, L{pause_service}
"""
with win32.OpenSCManager(
dwDesiredAccess = win32.SC_MANAGER_CONNECT
) as hSCManager:
with win32.OpenService(hSCManager, name,
dwDesiredAccess = win32.SERVICE_PAUSE_CONTINUE
) as hService:
win32.ControlService(hService, win32.SERVICE_CONTROL_CONTINUE) |
def add_real_paths(self, path_list, read_only=True, lazy_dir_read=True):
"""This convenience method adds multiple files and/or directories from
the real file system to the fake file system. See `add_real_file()` and
`add_real_directory()`.
Args:
path_list: List of file and directory paths in the real file
system.
read_only: If set, all files and files under under the directories
are treated as read-only, e.g. a write access raises an
exception; otherwise, writing to the files changes the fake
files only as usually.
lazy_dir_read: Uses lazy reading of directory contents if set
(see `add_real_directory`)
Raises:
OSError: if any of the files and directories in the list
does not exist in the real file system.
OSError: if any of the files and directories in the list
already exists in the fake file system.
"""
for path in path_list:
if os.path.isdir(path):
self.add_real_directory(path, read_only, lazy_dir_read)
else:
self.add_real_file(path, read_only) | This convenience method adds multiple files and/or directories from
the real file system to the fake file system. See `add_real_file()` and
`add_real_directory()`.
Args:
path_list: List of file and directory paths in the real file
system.
read_only: If set, all files and files under under the directories
are treated as read-only, e.g. a write access raises an
exception; otherwise, writing to the files changes the fake
files only as usually.
lazy_dir_read: Uses lazy reading of directory contents if set
(see `add_real_directory`)
Raises:
OSError: if any of the files and directories in the list
does not exist in the real file system.
OSError: if any of the files and directories in the list
already exists in the fake file system. | Below is the the instruction that describes the task:
### Input:
This convenience method adds multiple files and/or directories from
the real file system to the fake file system. See `add_real_file()` and
`add_real_directory()`.
Args:
path_list: List of file and directory paths in the real file
system.
read_only: If set, all files and files under under the directories
are treated as read-only, e.g. a write access raises an
exception; otherwise, writing to the files changes the fake
files only as usually.
lazy_dir_read: Uses lazy reading of directory contents if set
(see `add_real_directory`)
Raises:
OSError: if any of the files and directories in the list
does not exist in the real file system.
OSError: if any of the files and directories in the list
already exists in the fake file system.
### Response:
def add_real_paths(self, path_list, read_only=True, lazy_dir_read=True):
"""This convenience method adds multiple files and/or directories from
the real file system to the fake file system. See `add_real_file()` and
`add_real_directory()`.
Args:
path_list: List of file and directory paths in the real file
system.
read_only: If set, all files and files under under the directories
are treated as read-only, e.g. a write access raises an
exception; otherwise, writing to the files changes the fake
files only as usually.
lazy_dir_read: Uses lazy reading of directory contents if set
(see `add_real_directory`)
Raises:
OSError: if any of the files and directories in the list
does not exist in the real file system.
OSError: if any of the files and directories in the list
already exists in the fake file system.
"""
for path in path_list:
if os.path.isdir(path):
self.add_real_directory(path, read_only, lazy_dir_read)
else:
self.add_real_file(path, read_only) |
def exclusive_ns(guard: StateGuard[A], desc: str, thunk: Callable[..., NS[A, B]], *a: Any) -> Do:
'''this is the central unsafe function, using a lock and updating the state in `guard` in-place.
'''
yield guard.acquire()
log.debug2(lambda: f'exclusive: {desc}')
state, response = yield N.ensure_failure(thunk(*a).run(guard.state), guard.release)
yield N.delay(lambda v: unsafe_update_state(guard, state))
yield guard.release()
log.debug2(lambda: f'release: {desc}')
yield N.pure(response) | this is the central unsafe function, using a lock and updating the state in `guard` in-place. | Below is the the instruction that describes the task:
### Input:
this is the central unsafe function, using a lock and updating the state in `guard` in-place.
### Response:
def exclusive_ns(guard: StateGuard[A], desc: str, thunk: Callable[..., NS[A, B]], *a: Any) -> Do:
'''this is the central unsafe function, using a lock and updating the state in `guard` in-place.
'''
yield guard.acquire()
log.debug2(lambda: f'exclusive: {desc}')
state, response = yield N.ensure_failure(thunk(*a).run(guard.state), guard.release)
yield N.delay(lambda v: unsafe_update_state(guard, state))
yield guard.release()
log.debug2(lambda: f'release: {desc}')
yield N.pure(response) |
def serialize(self, value, **kwargs):
"""Return a serialized copy of the tuple"""
kwargs.update({'include_class': kwargs.get('include_class', True)})
if self.serializer is not None:
return self.serializer(value, **kwargs)
if value is None:
return None
serial_list = [self.prop.serialize(val, **kwargs)
for val in value]
return serial_list | Return a serialized copy of the tuple | Below is the the instruction that describes the task:
### Input:
Return a serialized copy of the tuple
### Response:
def serialize(self, value, **kwargs):
"""Return a serialized copy of the tuple"""
kwargs.update({'include_class': kwargs.get('include_class', True)})
if self.serializer is not None:
return self.serializer(value, **kwargs)
if value is None:
return None
serial_list = [self.prop.serialize(val, **kwargs)
for val in value]
return serial_list |
def getValue(self, prop, default=None):
"""Return the value of feature with that name or ``default``."""
f = self.props.get(prop, None)
if not f:
return default
if isinstance(f, Feature):
return f.getValue()
if isinstance(f, tuple):
# if f[0].getValue() == f[1].getValue():
# return f[0].getValue()
# Miguel: para poder probar
if f[0]:
return f[0].getValue()
elif f[1]:
return f[1].getValue()
raise Exception("Getting value from a property with a constrain")
return f | Return the value of feature with that name or ``default``. | Below is the the instruction that describes the task:
### Input:
Return the value of feature with that name or ``default``.
### Response:
def getValue(self, prop, default=None):
"""Return the value of feature with that name or ``default``."""
f = self.props.get(prop, None)
if not f:
return default
if isinstance(f, Feature):
return f.getValue()
if isinstance(f, tuple):
# if f[0].getValue() == f[1].getValue():
# return f[0].getValue()
# Miguel: para poder probar
if f[0]:
return f[0].getValue()
elif f[1]:
return f[1].getValue()
raise Exception("Getting value from a property with a constrain")
return f |
def subset(self, interval: Interval,
flexibility: int = 2) -> "IntervalList":
"""
Returns an IntervalList that's a subset of this one, only containing
intervals that meet the "interval" parameter criterion. What "meet"
means is defined by the ``flexibility`` parameter.
``flexibility == 0``: permits only wholly contained intervals:
.. code-block:: none
interval:
I----------------I
intervals in self that will/won't be returned:
N---N N---N Y---Y N---N N---N
N---N N---N
``flexibility == 1``: permits overlapping intervals as well:
.. code-block:: none
I----------------I
N---N Y---Y Y---Y Y---Y N---N
N---N N---N
``flexibility == 2``: permits adjoining intervals as well:
.. code-block:: none
I----------------I
N---N Y---Y Y---Y Y---Y N---N
Y---Y Y---Y
"""
if flexibility not in [0, 1, 2]:
raise ValueError("subset: bad flexibility value")
permitted = []
for i in self.intervals:
if flexibility == 0:
ok = i.start > interval.start and i.end < interval.end
elif flexibility == 1:
ok = i.end > interval.start and i.start < interval.end
else:
ok = i.end >= interval.start and i.start <= interval.end
if ok:
permitted.append(i)
return IntervalList(permitted) | Returns an IntervalList that's a subset of this one, only containing
intervals that meet the "interval" parameter criterion. What "meet"
means is defined by the ``flexibility`` parameter.
``flexibility == 0``: permits only wholly contained intervals:
.. code-block:: none
interval:
I----------------I
intervals in self that will/won't be returned:
N---N N---N Y---Y N---N N---N
N---N N---N
``flexibility == 1``: permits overlapping intervals as well:
.. code-block:: none
I----------------I
N---N Y---Y Y---Y Y---Y N---N
N---N N---N
``flexibility == 2``: permits adjoining intervals as well:
.. code-block:: none
I----------------I
N---N Y---Y Y---Y Y---Y N---N
Y---Y Y---Y | Below is the the instruction that describes the task:
### Input:
Returns an IntervalList that's a subset of this one, only containing
intervals that meet the "interval" parameter criterion. What "meet"
means is defined by the ``flexibility`` parameter.
``flexibility == 0``: permits only wholly contained intervals:
.. code-block:: none
interval:
I----------------I
intervals in self that will/won't be returned:
N---N N---N Y---Y N---N N---N
N---N N---N
``flexibility == 1``: permits overlapping intervals as well:
.. code-block:: none
I----------------I
N---N Y---Y Y---Y Y---Y N---N
N---N N---N
``flexibility == 2``: permits adjoining intervals as well:
.. code-block:: none
I----------------I
N---N Y---Y Y---Y Y---Y N---N
Y---Y Y---Y
### Response:
def subset(self, interval: Interval,
flexibility: int = 2) -> "IntervalList":
"""
Returns an IntervalList that's a subset of this one, only containing
intervals that meet the "interval" parameter criterion. What "meet"
means is defined by the ``flexibility`` parameter.
``flexibility == 0``: permits only wholly contained intervals:
.. code-block:: none
interval:
I----------------I
intervals in self that will/won't be returned:
N---N N---N Y---Y N---N N---N
N---N N---N
``flexibility == 1``: permits overlapping intervals as well:
.. code-block:: none
I----------------I
N---N Y---Y Y---Y Y---Y N---N
N---N N---N
``flexibility == 2``: permits adjoining intervals as well:
.. code-block:: none
I----------------I
N---N Y---Y Y---Y Y---Y N---N
Y---Y Y---Y
"""
if flexibility not in [0, 1, 2]:
raise ValueError("subset: bad flexibility value")
permitted = []
for i in self.intervals:
if flexibility == 0:
ok = i.start > interval.start and i.end < interval.end
elif flexibility == 1:
ok = i.end > interval.start and i.start < interval.end
else:
ok = i.end >= interval.start and i.start <= interval.end
if ok:
permitted.append(i)
return IntervalList(permitted) |
def run(self, manager):
"""Run this query and returns the resulting BEL graph.
:param manager: A cache manager
:rtype: Optional[pybel.BELGraph]
"""
universe = self._get_universe(manager)
graph = self.seeding.run(universe)
return self.pipeline.run(graph, universe=universe) | Run this query and returns the resulting BEL graph.
:param manager: A cache manager
:rtype: Optional[pybel.BELGraph] | Below is the the instruction that describes the task:
### Input:
Run this query and returns the resulting BEL graph.
:param manager: A cache manager
:rtype: Optional[pybel.BELGraph]
### Response:
def run(self, manager):
"""Run this query and returns the resulting BEL graph.
:param manager: A cache manager
:rtype: Optional[pybel.BELGraph]
"""
universe = self._get_universe(manager)
graph = self.seeding.run(universe)
return self.pipeline.run(graph, universe=universe) |
def remove_builtin(self, key, orig):
"""Remove an added builtin and re-set the original."""
if orig is BuiltinUndefined:
del __builtin__.__dict__[key]
else:
__builtin__.__dict__[key] = orig | Remove an added builtin and re-set the original. | Below is the the instruction that describes the task:
### Input:
Remove an added builtin and re-set the original.
### Response:
def remove_builtin(self, key, orig):
"""Remove an added builtin and re-set the original."""
if orig is BuiltinUndefined:
del __builtin__.__dict__[key]
else:
__builtin__.__dict__[key] = orig |
def memoized_property(func=None, key_factory=per_instance, **kwargs):
"""A convenience wrapper for memoizing properties.
Applied like so:
>>> class Foo(object):
... @memoized_property
... def name(self):
... pass
Is equivalent to:
>>> class Foo(object):
... @property
... @memoized_method
... def name(self):
... pass
Which is equivalent to:
>>> class Foo(object):
... @property
... @memoized(key_factory=per_instance)
... def name(self):
... pass
By default a deleter for the property is setup that un-caches the property such that a subsequent
property access re-computes the value. In other words, for this `now` @memoized_property:
>>> import time
>>> class Bar(object):
... @memoized_property
... def now(self):
... return time.time()
You could write code like so:
>>> bar = Bar()
>>> bar.now
1433267312.622095
>>> time.sleep(5)
>>> bar.now
1433267312.622095
>>> del bar.now
>>> bar.now
1433267424.056189
>>> time.sleep(5)
>>> bar.now
1433267424.056189
>>>
:API: public
:param func: The property getter method to wrap. Only generally passed by the python runtime and
should be omitted when passing a custom `key_factory` or `cache_factory`.
:param key_factory: A function that can form a cache key from the arguments passed to the
wrapped, memoized function; by default `per_instance`.
:param kwargs: Any extra keyword args accepted by `memoized`.
:raises: `ValueError` if the wrapper is applied to anything other than a function.
:returns: A read-only property that memoizes its calculated value and un-caches its value when
`del`ed.
"""
getter = memoized_method(func=func, key_factory=key_factory, **kwargs)
return property(fget=getter, fdel=lambda self: getter.forget(self)) | A convenience wrapper for memoizing properties.
Applied like so:
>>> class Foo(object):
... @memoized_property
... def name(self):
... pass
Is equivalent to:
>>> class Foo(object):
... @property
... @memoized_method
... def name(self):
... pass
Which is equivalent to:
>>> class Foo(object):
... @property
... @memoized(key_factory=per_instance)
... def name(self):
... pass
By default a deleter for the property is setup that un-caches the property such that a subsequent
property access re-computes the value. In other words, for this `now` @memoized_property:
>>> import time
>>> class Bar(object):
... @memoized_property
... def now(self):
... return time.time()
You could write code like so:
>>> bar = Bar()
>>> bar.now
1433267312.622095
>>> time.sleep(5)
>>> bar.now
1433267312.622095
>>> del bar.now
>>> bar.now
1433267424.056189
>>> time.sleep(5)
>>> bar.now
1433267424.056189
>>>
:API: public
:param func: The property getter method to wrap. Only generally passed by the python runtime and
should be omitted when passing a custom `key_factory` or `cache_factory`.
:param key_factory: A function that can form a cache key from the arguments passed to the
wrapped, memoized function; by default `per_instance`.
:param kwargs: Any extra keyword args accepted by `memoized`.
:raises: `ValueError` if the wrapper is applied to anything other than a function.
:returns: A read-only property that memoizes its calculated value and un-caches its value when
`del`ed. | Below is the the instruction that describes the task:
### Input:
A convenience wrapper for memoizing properties.
Applied like so:
>>> class Foo(object):
... @memoized_property
... def name(self):
... pass
Is equivalent to:
>>> class Foo(object):
... @property
... @memoized_method
... def name(self):
... pass
Which is equivalent to:
>>> class Foo(object):
... @property
... @memoized(key_factory=per_instance)
... def name(self):
... pass
By default a deleter for the property is setup that un-caches the property such that a subsequent
property access re-computes the value. In other words, for this `now` @memoized_property:
>>> import time
>>> class Bar(object):
... @memoized_property
... def now(self):
... return time.time()
You could write code like so:
>>> bar = Bar()
>>> bar.now
1433267312.622095
>>> time.sleep(5)
>>> bar.now
1433267312.622095
>>> del bar.now
>>> bar.now
1433267424.056189
>>> time.sleep(5)
>>> bar.now
1433267424.056189
>>>
:API: public
:param func: The property getter method to wrap. Only generally passed by the python runtime and
should be omitted when passing a custom `key_factory` or `cache_factory`.
:param key_factory: A function that can form a cache key from the arguments passed to the
wrapped, memoized function; by default `per_instance`.
:param kwargs: Any extra keyword args accepted by `memoized`.
:raises: `ValueError` if the wrapper is applied to anything other than a function.
:returns: A read-only property that memoizes its calculated value and un-caches its value when
`del`ed.
### Response:
def memoized_property(func=None, key_factory=per_instance, **kwargs):
"""A convenience wrapper for memoizing properties.
Applied like so:
>>> class Foo(object):
... @memoized_property
... def name(self):
... pass
Is equivalent to:
>>> class Foo(object):
... @property
... @memoized_method
... def name(self):
... pass
Which is equivalent to:
>>> class Foo(object):
... @property
... @memoized(key_factory=per_instance)
... def name(self):
... pass
By default a deleter for the property is setup that un-caches the property such that a subsequent
property access re-computes the value. In other words, for this `now` @memoized_property:
>>> import time
>>> class Bar(object):
... @memoized_property
... def now(self):
... return time.time()
You could write code like so:
>>> bar = Bar()
>>> bar.now
1433267312.622095
>>> time.sleep(5)
>>> bar.now
1433267312.622095
>>> del bar.now
>>> bar.now
1433267424.056189
>>> time.sleep(5)
>>> bar.now
1433267424.056189
>>>
:API: public
:param func: The property getter method to wrap. Only generally passed by the python runtime and
should be omitted when passing a custom `key_factory` or `cache_factory`.
:param key_factory: A function that can form a cache key from the arguments passed to the
wrapped, memoized function; by default `per_instance`.
:param kwargs: Any extra keyword args accepted by `memoized`.
:raises: `ValueError` if the wrapper is applied to anything other than a function.
:returns: A read-only property that memoizes its calculated value and un-caches its value when
`del`ed.
"""
getter = memoized_method(func=func, key_factory=key_factory, **kwargs)
return property(fget=getter, fdel=lambda self: getter.forget(self)) |
def filter_reports(self, analytes, filt_str='all', nbin=5, samples=None,
outdir=None, subset='All_Samples'):
"""
Plot filter reports for all filters that contain ``filt_str``
in the name.
"""
if outdir is None:
outdir = self.report_dir + '/filters/' + filt_str
if not os.path.isdir(self.report_dir + '/filters'):
os.mkdir(self.report_dir + '/filters')
if not os.path.isdir(outdir):
os.mkdir(outdir)
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
with self.pbar.set(total=len(samples), desc='Drawing Plots') as prog:
for s in samples:
_ = self.data[s].filter_report(filt=filt_str,
analytes=analytes,
savedir=outdir,
nbin=nbin)
prog.update()
# plt.close(fig)
return | Plot filter reports for all filters that contain ``filt_str``
in the name. | Below is the the instruction that describes the task:
### Input:
Plot filter reports for all filters that contain ``filt_str``
in the name.
### Response:
def filter_reports(self, analytes, filt_str='all', nbin=5, samples=None,
outdir=None, subset='All_Samples'):
"""
Plot filter reports for all filters that contain ``filt_str``
in the name.
"""
if outdir is None:
outdir = self.report_dir + '/filters/' + filt_str
if not os.path.isdir(self.report_dir + '/filters'):
os.mkdir(self.report_dir + '/filters')
if not os.path.isdir(outdir):
os.mkdir(outdir)
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
with self.pbar.set(total=len(samples), desc='Drawing Plots') as prog:
for s in samples:
_ = self.data[s].filter_report(filt=filt_str,
analytes=analytes,
savedir=outdir,
nbin=nbin)
prog.update()
# plt.close(fig)
return |
def wait_not_displayed(element, timeout=None, fail_on_timeout=None):
"""
Wait until element becomes invisible or time out.
Returns true is element became invisible, otherwise false.
If timeout is not specified or 0, then uses specific element wait timeout.
:param element:
:param timeout:
:param fail_on_timeout:
:return:
"""
return wait(lambda: not element.is_displayed(), timeout or element.wait_timeout, fail_on_timeout) | Wait until element becomes invisible or time out.
Returns true is element became invisible, otherwise false.
If timeout is not specified or 0, then uses specific element wait timeout.
:param element:
:param timeout:
:param fail_on_timeout:
:return: | Below is the the instruction that describes the task:
### Input:
Wait until element becomes invisible or time out.
Returns true is element became invisible, otherwise false.
If timeout is not specified or 0, then uses specific element wait timeout.
:param element:
:param timeout:
:param fail_on_timeout:
:return:
### Response:
def wait_not_displayed(element, timeout=None, fail_on_timeout=None):
"""
Wait until element becomes invisible or time out.
Returns true is element became invisible, otherwise false.
If timeout is not specified or 0, then uses specific element wait timeout.
:param element:
:param timeout:
:param fail_on_timeout:
:return:
"""
return wait(lambda: not element.is_displayed(), timeout or element.wait_timeout, fail_on_timeout) |
def writeToCheckpoint(self, checkpointDir):
"""Serializes model using capnproto and writes data to ``checkpointDir``"""
proto = self.getSchema().new_message()
self.write(proto)
checkpointPath = self._getModelCheckpointFilePath(checkpointDir)
# Clean up old saved state, if any
if os.path.exists(checkpointDir):
if not os.path.isdir(checkpointDir):
raise Exception(("Existing filesystem entry <%s> is not a model"
" checkpoint -- refusing to delete (not a directory)") \
% checkpointDir)
if not os.path.isfile(checkpointPath):
raise Exception(("Existing filesystem entry <%s> is not a model"
" checkpoint -- refusing to delete"\
" (%s missing or not a file)") % \
(checkpointDir, checkpointPath))
shutil.rmtree(checkpointDir)
# Create a new directory for saving state
self.__makeDirectoryFromAbsolutePath(checkpointDir)
with open(checkpointPath, 'wb') as f:
proto.write(f) | Serializes model using capnproto and writes data to ``checkpointDir`` | Below is the the instruction that describes the task:
### Input:
Serializes model using capnproto and writes data to ``checkpointDir``
### Response:
def writeToCheckpoint(self, checkpointDir):
"""Serializes model using capnproto and writes data to ``checkpointDir``"""
proto = self.getSchema().new_message()
self.write(proto)
checkpointPath = self._getModelCheckpointFilePath(checkpointDir)
# Clean up old saved state, if any
if os.path.exists(checkpointDir):
if not os.path.isdir(checkpointDir):
raise Exception(("Existing filesystem entry <%s> is not a model"
" checkpoint -- refusing to delete (not a directory)") \
% checkpointDir)
if not os.path.isfile(checkpointPath):
raise Exception(("Existing filesystem entry <%s> is not a model"
" checkpoint -- refusing to delete"\
" (%s missing or not a file)") % \
(checkpointDir, checkpointPath))
shutil.rmtree(checkpointDir)
# Create a new directory for saving state
self.__makeDirectoryFromAbsolutePath(checkpointDir)
with open(checkpointPath, 'wb') as f:
proto.write(f) |
def send(self, stanza):
"""Send a stanza somwhere.
The default implementation sends it via the `uplink` if it is defined
or raises the `NoRouteError`.
:Parameters:
- `stanza`: the stanza to send.
:Types:
- `stanza`: `pyxmpp.stanza.Stanza`"""
if self.uplink:
self.uplink.send(stanza)
else:
raise NoRouteError("No route for stanza") | Send a stanza somwhere.
The default implementation sends it via the `uplink` if it is defined
or raises the `NoRouteError`.
:Parameters:
- `stanza`: the stanza to send.
:Types:
- `stanza`: `pyxmpp.stanza.Stanza` | Below is the the instruction that describes the task:
### Input:
Send a stanza somwhere.
The default implementation sends it via the `uplink` if it is defined
or raises the `NoRouteError`.
:Parameters:
- `stanza`: the stanza to send.
:Types:
- `stanza`: `pyxmpp.stanza.Stanza`
### Response:
def send(self, stanza):
"""Send a stanza somwhere.
The default implementation sends it via the `uplink` if it is defined
or raises the `NoRouteError`.
:Parameters:
- `stanza`: the stanza to send.
:Types:
- `stanza`: `pyxmpp.stanza.Stanza`"""
if self.uplink:
self.uplink.send(stanza)
else:
raise NoRouteError("No route for stanza") |
def flat_plane(script, plane=0, aspect_ratio=False):
"""Flat plane parameterization
"""
filter_xml = ''.join([
' <filter name="Parametrization: Flat Plane ">\n',
' <Param name="projectionPlane"',
'value="%d"' % plane,
'description="Projection plane"',
'enum_val0="XY"',
'enum_val1="XZ"',
'enum_val2="YZ"',
'enum_cardinality="3"',
'type="RichEnum"',
'tooltip="Choose the projection plane"',
'/>\n',
' <Param name="aspectRatio"',
'value="%s"' % str(aspect_ratio).lower(),
'description="Preserve Ratio"',
'type="RichBool"',
'tooltip="If checked the resulting parametrization will preserve the original apsect ratio of the model otherwise it will fill up the whole 0..1 uv space"',
'/>\n',
' </filter>\n'])
util.write_filter(script, filter_xml)
return None | Flat plane parameterization | Below is the the instruction that describes the task:
### Input:
Flat plane parameterization
### Response:
def flat_plane(script, plane=0, aspect_ratio=False):
"""Flat plane parameterization
"""
filter_xml = ''.join([
' <filter name="Parametrization: Flat Plane ">\n',
' <Param name="projectionPlane"',
'value="%d"' % plane,
'description="Projection plane"',
'enum_val0="XY"',
'enum_val1="XZ"',
'enum_val2="YZ"',
'enum_cardinality="3"',
'type="RichEnum"',
'tooltip="Choose the projection plane"',
'/>\n',
' <Param name="aspectRatio"',
'value="%s"' % str(aspect_ratio).lower(),
'description="Preserve Ratio"',
'type="RichBool"',
'tooltip="If checked the resulting parametrization will preserve the original apsect ratio of the model otherwise it will fill up the whole 0..1 uv space"',
'/>\n',
' </filter>\n'])
util.write_filter(script, filter_xml)
return None |
def deploy_stack(awsclient, context, conf, cloudformation, override_stack_policy=False):
"""Deploy the stack to AWS cloud. Does either create or update the stack.
:param conf:
:param override_stack_policy:
:return: exit_code
"""
stack_name = _get_stack_name(conf)
parameters = _generate_parameters(conf)
if stack_exists(awsclient, stack_name):
exit_code = _update_stack(awsclient, context, conf, cloudformation,
parameters, override_stack_policy)
else:
exit_code = _create_stack(awsclient, context, conf, cloudformation,
parameters)
# add 'stack_output' to the context so it becomes available
# in 'command_finalized' hook
context['stack_output'] = _get_stack_outputs(
awsclient.get_client('cloudformation'), stack_name)
_call_hook(awsclient, conf, stack_name, parameters, cloudformation,
hook='post_hook',
message='CloudFormation is done, now executing post hook...')
return exit_code | Deploy the stack to AWS cloud. Does either create or update the stack.
:param conf:
:param override_stack_policy:
:return: exit_code | Below is the the instruction that describes the task:
### Input:
Deploy the stack to AWS cloud. Does either create or update the stack.
:param conf:
:param override_stack_policy:
:return: exit_code
### Response:
def deploy_stack(awsclient, context, conf, cloudformation, override_stack_policy=False):
"""Deploy the stack to AWS cloud. Does either create or update the stack.
:param conf:
:param override_stack_policy:
:return: exit_code
"""
stack_name = _get_stack_name(conf)
parameters = _generate_parameters(conf)
if stack_exists(awsclient, stack_name):
exit_code = _update_stack(awsclient, context, conf, cloudformation,
parameters, override_stack_policy)
else:
exit_code = _create_stack(awsclient, context, conf, cloudformation,
parameters)
# add 'stack_output' to the context so it becomes available
# in 'command_finalized' hook
context['stack_output'] = _get_stack_outputs(
awsclient.get_client('cloudformation'), stack_name)
_call_hook(awsclient, conf, stack_name, parameters, cloudformation,
hook='post_hook',
message='CloudFormation is done, now executing post hook...')
return exit_code |
def try_to_replace(self, provider, other, problems):
"""
Attempt to replace one provider with another. This is typically used
when resolving dependencies from multiple sources, e.g. A requires
(B >= 1.0) while C requires (B >= 1.1).
For successful replacement, ``provider`` must meet all the requirements
which ``other`` fulfills.
:param provider: The provider we are trying to replace with.
:param other: The provider we're trying to replace.
:param problems: If False is returned, this will contain what
problems prevented replacement. This is currently
a tuple of the literal string 'cantreplace',
``provider``, ``other`` and the set of requirements
that ``provider`` couldn't fulfill.
:return: True if we can replace ``other`` with ``provider``, else
False.
"""
rlist = self.reqts[other]
unmatched = set()
for s in rlist:
matcher = self.get_matcher(s)
if not matcher.match(provider.version):
unmatched.add(s)
if unmatched:
# can't replace other with provider
problems.add(('cantreplace', provider, other,
frozenset(unmatched)))
result = False
else:
# can replace other with provider
self.remove_distribution(other)
del self.reqts[other]
for s in rlist:
self.reqts.setdefault(provider, set()).add(s)
self.add_distribution(provider)
result = True
return result | Attempt to replace one provider with another. This is typically used
when resolving dependencies from multiple sources, e.g. A requires
(B >= 1.0) while C requires (B >= 1.1).
For successful replacement, ``provider`` must meet all the requirements
which ``other`` fulfills.
:param provider: The provider we are trying to replace with.
:param other: The provider we're trying to replace.
:param problems: If False is returned, this will contain what
problems prevented replacement. This is currently
a tuple of the literal string 'cantreplace',
``provider``, ``other`` and the set of requirements
that ``provider`` couldn't fulfill.
:return: True if we can replace ``other`` with ``provider``, else
False. | Below is the the instruction that describes the task:
### Input:
Attempt to replace one provider with another. This is typically used
when resolving dependencies from multiple sources, e.g. A requires
(B >= 1.0) while C requires (B >= 1.1).
For successful replacement, ``provider`` must meet all the requirements
which ``other`` fulfills.
:param provider: The provider we are trying to replace with.
:param other: The provider we're trying to replace.
:param problems: If False is returned, this will contain what
problems prevented replacement. This is currently
a tuple of the literal string 'cantreplace',
``provider``, ``other`` and the set of requirements
that ``provider`` couldn't fulfill.
:return: True if we can replace ``other`` with ``provider``, else
False.
### Response:
def try_to_replace(self, provider, other, problems):
"""
Attempt to replace one provider with another. This is typically used
when resolving dependencies from multiple sources, e.g. A requires
(B >= 1.0) while C requires (B >= 1.1).
For successful replacement, ``provider`` must meet all the requirements
which ``other`` fulfills.
:param provider: The provider we are trying to replace with.
:param other: The provider we're trying to replace.
:param problems: If False is returned, this will contain what
problems prevented replacement. This is currently
a tuple of the literal string 'cantreplace',
``provider``, ``other`` and the set of requirements
that ``provider`` couldn't fulfill.
:return: True if we can replace ``other`` with ``provider``, else
False.
"""
rlist = self.reqts[other]
unmatched = set()
for s in rlist:
matcher = self.get_matcher(s)
if not matcher.match(provider.version):
unmatched.add(s)
if unmatched:
# can't replace other with provider
problems.add(('cantreplace', provider, other,
frozenset(unmatched)))
result = False
else:
# can replace other with provider
self.remove_distribution(other)
del self.reqts[other]
for s in rlist:
self.reqts.setdefault(provider, set()).add(s)
self.add_distribution(provider)
result = True
return result |
def iter_genotypes(self):
"""Iterates on available markers.
Returns:
Genotypes instances.
"""
# Parsing each column of the dataframe
for variant in self.df.columns:
genotypes = self.df.loc[:, variant].values
info = self.map_info.loc[variant, :]
yield Genotypes(
Variant(info.name, info.chrom, info.pos, [info.a1, info.a2]),
genotypes,
reference=info.a2,
coded=info.a1,
multiallelic=False,
) | Iterates on available markers.
Returns:
Genotypes instances. | Below is the the instruction that describes the task:
### Input:
Iterates on available markers.
Returns:
Genotypes instances.
### Response:
def iter_genotypes(self):
"""Iterates on available markers.
Returns:
Genotypes instances.
"""
# Parsing each column of the dataframe
for variant in self.df.columns:
genotypes = self.df.loc[:, variant].values
info = self.map_info.loc[variant, :]
yield Genotypes(
Variant(info.name, info.chrom, info.pos, [info.a1, info.a2]),
genotypes,
reference=info.a2,
coded=info.a1,
multiallelic=False,
) |
def checkedItems( self ):
"""
Returns the checked items for this combobox.
:return [<str>, ..]
"""
if not self.isCheckable():
return []
return [nativestring(self.itemText(i)) for i in self.checkedIndexes()] | Returns the checked items for this combobox.
:return [<str>, ..] | Below is the the instruction that describes the task:
### Input:
Returns the checked items for this combobox.
:return [<str>, ..]
### Response:
def checkedItems( self ):
"""
Returns the checked items for this combobox.
:return [<str>, ..]
"""
if not self.isCheckable():
return []
return [nativestring(self.itemText(i)) for i in self.checkedIndexes()] |
def upload_from_shared_memory(self, location, bbox, order='F', cutout_bbox=None):
"""
Upload from a shared memory array.
https://github.com/seung-lab/cloud-volume/wiki/Advanced-Topic:-Shared-Memory
tip: If you want to use slice notation, np.s_[...] will help in a pinch.
MEMORY LIFECYCLE WARNING: You are responsible for managing the lifecycle of the
shared memory. CloudVolume will merely read from it, it will not unlink the
memory automatically. To fully clear the shared memory you must unlink the
location and close any mmap file handles. You can use `cloudvolume.sharedmemory.unlink(...)`
to help you unlink the shared memory file.
EXPERT MODE WARNING: If you aren't sure you need this function (e.g. to relieve
memory pressure or improve performance in some way) you should use the ordinary
upload method of vol[:] = img. A typical use case is transferring arrays between
different processes without making copies. For reference, this feature was created
for uploading a 62 GB array that originated in Julia.
Required:
location: (str) Shared memory location e.g. 'cloudvolume-shm-RANDOM-STRING'
This typically corresponds to a file in `/dev/shm` or `/run/shm/`. It can
also be a file if you're using that for mmap.
bbox: (Bbox or list of slices) the bounding box the shared array represents. For instance
if you have a 1024x1024x128 volume and you're uploading only a 512x512x64 corner
touching the origin, your Bbox would be `Bbox( (0,0,0), (512,512,64) )`.
Optional:
cutout_bbox: (bbox or list of slices) If you only want to upload a section of the
array, give the bbox in volume coordinates (not image coordinates) that should
be cut out. For example, if you only want to upload 256x256x32 of the upper
rightmost corner of the above example but the entire 512x512x64 array is stored
in memory, you would provide: `Bbox( (256, 256, 32), (512, 512, 64) )`
By default, just upload the entire image.
Returns: void
"""
def tobbox(x):
if type(x) == Bbox:
return x
return Bbox.from_slices(x)
bbox = tobbox(bbox)
cutout_bbox = tobbox(cutout_bbox) if cutout_bbox else bbox.clone()
if not bbox.contains_bbox(cutout_bbox):
raise exceptions.AlignmentError("""
The provided cutout is not wholly contained in the given array.
Bbox: {}
Cutout: {}
""".format(bbox, cutout_bbox))
if self.autocrop:
cutout_bbox = Bbox.intersection(cutout_bbox, self.bounds)
if cutout_bbox.subvoxel():
return
shape = list(bbox.size3()) + [ self.num_channels ]
mmap_handle, shared_image = sharedmemory.ndarray(
location=location, shape=shape, dtype=self.dtype, order=order, readonly=True)
delta_box = cutout_bbox.clone() - bbox.minpt
cutout_image = shared_image[ delta_box.to_slices() ]
txrx.upload_image(self, cutout_image, cutout_bbox.minpt, parallel=self.parallel,
manual_shared_memory_id=location, manual_shared_memory_bbox=bbox, manual_shared_memory_order=order)
mmap_handle.close() | Upload from a shared memory array.
https://github.com/seung-lab/cloud-volume/wiki/Advanced-Topic:-Shared-Memory
tip: If you want to use slice notation, np.s_[...] will help in a pinch.
MEMORY LIFECYCLE WARNING: You are responsible for managing the lifecycle of the
shared memory. CloudVolume will merely read from it, it will not unlink the
memory automatically. To fully clear the shared memory you must unlink the
location and close any mmap file handles. You can use `cloudvolume.sharedmemory.unlink(...)`
to help you unlink the shared memory file.
EXPERT MODE WARNING: If you aren't sure you need this function (e.g. to relieve
memory pressure or improve performance in some way) you should use the ordinary
upload method of vol[:] = img. A typical use case is transferring arrays between
different processes without making copies. For reference, this feature was created
for uploading a 62 GB array that originated in Julia.
Required:
location: (str) Shared memory location e.g. 'cloudvolume-shm-RANDOM-STRING'
This typically corresponds to a file in `/dev/shm` or `/run/shm/`. It can
also be a file if you're using that for mmap.
bbox: (Bbox or list of slices) the bounding box the shared array represents. For instance
if you have a 1024x1024x128 volume and you're uploading only a 512x512x64 corner
touching the origin, your Bbox would be `Bbox( (0,0,0), (512,512,64) )`.
Optional:
cutout_bbox: (bbox or list of slices) If you only want to upload a section of the
array, give the bbox in volume coordinates (not image coordinates) that should
be cut out. For example, if you only want to upload 256x256x32 of the upper
rightmost corner of the above example but the entire 512x512x64 array is stored
in memory, you would provide: `Bbox( (256, 256, 32), (512, 512, 64) )`
By default, just upload the entire image.
Returns: void | Below is the the instruction that describes the task:
### Input:
Upload from a shared memory array.
https://github.com/seung-lab/cloud-volume/wiki/Advanced-Topic:-Shared-Memory
tip: If you want to use slice notation, np.s_[...] will help in a pinch.
MEMORY LIFECYCLE WARNING: You are responsible for managing the lifecycle of the
shared memory. CloudVolume will merely read from it, it will not unlink the
memory automatically. To fully clear the shared memory you must unlink the
location and close any mmap file handles. You can use `cloudvolume.sharedmemory.unlink(...)`
to help you unlink the shared memory file.
EXPERT MODE WARNING: If you aren't sure you need this function (e.g. to relieve
memory pressure or improve performance in some way) you should use the ordinary
upload method of vol[:] = img. A typical use case is transferring arrays between
different processes without making copies. For reference, this feature was created
for uploading a 62 GB array that originated in Julia.
Required:
location: (str) Shared memory location e.g. 'cloudvolume-shm-RANDOM-STRING'
This typically corresponds to a file in `/dev/shm` or `/run/shm/`. It can
also be a file if you're using that for mmap.
bbox: (Bbox or list of slices) the bounding box the shared array represents. For instance
if you have a 1024x1024x128 volume and you're uploading only a 512x512x64 corner
touching the origin, your Bbox would be `Bbox( (0,0,0), (512,512,64) )`.
Optional:
cutout_bbox: (bbox or list of slices) If you only want to upload a section of the
array, give the bbox in volume coordinates (not image coordinates) that should
be cut out. For example, if you only want to upload 256x256x32 of the upper
rightmost corner of the above example but the entire 512x512x64 array is stored
in memory, you would provide: `Bbox( (256, 256, 32), (512, 512, 64) )`
By default, just upload the entire image.
Returns: void
### Response:
def upload_from_shared_memory(self, location, bbox, order='F', cutout_bbox=None):
"""
Upload from a shared memory array.
https://github.com/seung-lab/cloud-volume/wiki/Advanced-Topic:-Shared-Memory
tip: If you want to use slice notation, np.s_[...] will help in a pinch.
MEMORY LIFECYCLE WARNING: You are responsible for managing the lifecycle of the
shared memory. CloudVolume will merely read from it, it will not unlink the
memory automatically. To fully clear the shared memory you must unlink the
location and close any mmap file handles. You can use `cloudvolume.sharedmemory.unlink(...)`
to help you unlink the shared memory file.
EXPERT MODE WARNING: If you aren't sure you need this function (e.g. to relieve
memory pressure or improve performance in some way) you should use the ordinary
upload method of vol[:] = img. A typical use case is transferring arrays between
different processes without making copies. For reference, this feature was created
for uploading a 62 GB array that originated in Julia.
Required:
location: (str) Shared memory location e.g. 'cloudvolume-shm-RANDOM-STRING'
This typically corresponds to a file in `/dev/shm` or `/run/shm/`. It can
also be a file if you're using that for mmap.
bbox: (Bbox or list of slices) the bounding box the shared array represents. For instance
if you have a 1024x1024x128 volume and you're uploading only a 512x512x64 corner
touching the origin, your Bbox would be `Bbox( (0,0,0), (512,512,64) )`.
Optional:
cutout_bbox: (bbox or list of slices) If you only want to upload a section of the
array, give the bbox in volume coordinates (not image coordinates) that should
be cut out. For example, if you only want to upload 256x256x32 of the upper
rightmost corner of the above example but the entire 512x512x64 array is stored
in memory, you would provide: `Bbox( (256, 256, 32), (512, 512, 64) )`
By default, just upload the entire image.
Returns: void
"""
def tobbox(x):
if type(x) == Bbox:
return x
return Bbox.from_slices(x)
bbox = tobbox(bbox)
cutout_bbox = tobbox(cutout_bbox) if cutout_bbox else bbox.clone()
if not bbox.contains_bbox(cutout_bbox):
raise exceptions.AlignmentError("""
The provided cutout is not wholly contained in the given array.
Bbox: {}
Cutout: {}
""".format(bbox, cutout_bbox))
if self.autocrop:
cutout_bbox = Bbox.intersection(cutout_bbox, self.bounds)
if cutout_bbox.subvoxel():
return
shape = list(bbox.size3()) + [ self.num_channels ]
mmap_handle, shared_image = sharedmemory.ndarray(
location=location, shape=shape, dtype=self.dtype, order=order, readonly=True)
delta_box = cutout_bbox.clone() - bbox.minpt
cutout_image = shared_image[ delta_box.to_slices() ]
txrx.upload_image(self, cutout_image, cutout_bbox.minpt, parallel=self.parallel,
manual_shared_memory_id=location, manual_shared_memory_bbox=bbox, manual_shared_memory_order=order)
mmap_handle.close() |
def create_queue(self, queue_name, visibility_timeout=None, callback=None):
"""
Create an SQS Queue.
:type queue_name: str or unicode
:param queue_name: The name of the new queue. Names are scoped to
an account and need to be unique within that
account. Calling this method on an existing
queue name will not return an error from SQS
unless the value for visibility_timeout is
different than the value of the existing queue
of that name. This is still an expensive operation,
though, and not the preferred way to check for
the existence of a queue. See the
:func:`boto.sqs.connection.SQSConnection.lookup` method.
:type visibility_timeout: int
:param visibility_timeout: The default visibility timeout for all
messages written in the queue. This can
be overridden on a per-message.
:rtype: :class:`boto.sqs.queue.Queue`
:return: The newly created queue.
"""
params = {'QueueName': queue_name}
if visibility_timeout:
params['DefaultVisibilityTimeout'] = '%d' % (visibility_timeout,)
return self.get_object('CreateQueue', params, botornado.sqs.queue.AsyncQueue, callback=callback) | Create an SQS Queue.
:type queue_name: str or unicode
:param queue_name: The name of the new queue. Names are scoped to
an account and need to be unique within that
account. Calling this method on an existing
queue name will not return an error from SQS
unless the value for visibility_timeout is
different than the value of the existing queue
of that name. This is still an expensive operation,
though, and not the preferred way to check for
the existence of a queue. See the
:func:`boto.sqs.connection.SQSConnection.lookup` method.
:type visibility_timeout: int
:param visibility_timeout: The default visibility timeout for all
messages written in the queue. This can
be overridden on a per-message.
:rtype: :class:`boto.sqs.queue.Queue`
:return: The newly created queue. | Below is the the instruction that describes the task:
### Input:
Create an SQS Queue.
:type queue_name: str or unicode
:param queue_name: The name of the new queue. Names are scoped to
an account and need to be unique within that
account. Calling this method on an existing
queue name will not return an error from SQS
unless the value for visibility_timeout is
different than the value of the existing queue
of that name. This is still an expensive operation,
though, and not the preferred way to check for
the existence of a queue. See the
:func:`boto.sqs.connection.SQSConnection.lookup` method.
:type visibility_timeout: int
:param visibility_timeout: The default visibility timeout for all
messages written in the queue. This can
be overridden on a per-message.
:rtype: :class:`boto.sqs.queue.Queue`
:return: The newly created queue.
### Response:
def create_queue(self, queue_name, visibility_timeout=None, callback=None):
"""
Create an SQS Queue.
:type queue_name: str or unicode
:param queue_name: The name of the new queue. Names are scoped to
an account and need to be unique within that
account. Calling this method on an existing
queue name will not return an error from SQS
unless the value for visibility_timeout is
different than the value of the existing queue
of that name. This is still an expensive operation,
though, and not the preferred way to check for
the existence of a queue. See the
:func:`boto.sqs.connection.SQSConnection.lookup` method.
:type visibility_timeout: int
:param visibility_timeout: The default visibility timeout for all
messages written in the queue. This can
be overridden on a per-message.
:rtype: :class:`boto.sqs.queue.Queue`
:return: The newly created queue.
"""
params = {'QueueName': queue_name}
if visibility_timeout:
params['DefaultVisibilityTimeout'] = '%d' % (visibility_timeout,)
return self.get_object('CreateQueue', params, botornado.sqs.queue.AsyncQueue, callback=callback) |
def prt_mrks(self, name_marks_list, prt=sys.stdout):
"""Print summary of all GOEAs.
Example:
Key for GO sections:
A immune
B viral/bacteria
C neuro
D cell death
E lipid
F adhesion
G cell cycle
H chromosome
I development
J extracellular matrix
K ion
L localization
M membrane
N metabolic
O phosphorylation
P signaling
Q stimulus
R prolif_differ
S Misc.
ABCDEFGHIJKLMNOPQRS
XX.X..XXX..X.XX.XXX transient_increase
XX.XXX.....X.X.XXXX consistent_increase
XXXXXX..XXXXXXXXXXX late_increase
..X.....X.XX.X....X consistent_decrease
..X.XX..X.XX.XXX.XX late_decrease
"""
if not name_marks_list:
return
# prt.write("\nKey for GO sections:\n")
# self.prt_section_key(prt)
prt.write("\n{HDR}\n".format(HDR=self.str_hdr()))
for name, mark in name_marks_list:
if mark is not None:
prt.write("{MRKS} {NAME}\n".format(MRKS="".join(mark), NAME=name))
prt.write("\n") | Print summary of all GOEAs.
Example:
Key for GO sections:
A immune
B viral/bacteria
C neuro
D cell death
E lipid
F adhesion
G cell cycle
H chromosome
I development
J extracellular matrix
K ion
L localization
M membrane
N metabolic
O phosphorylation
P signaling
Q stimulus
R prolif_differ
S Misc.
ABCDEFGHIJKLMNOPQRS
XX.X..XXX..X.XX.XXX transient_increase
XX.XXX.....X.X.XXXX consistent_increase
XXXXXX..XXXXXXXXXXX late_increase
..X.....X.XX.X....X consistent_decrease
..X.XX..X.XX.XXX.XX late_decrease | Below is the the instruction that describes the task:
### Input:
Print summary of all GOEAs.
Example:
Key for GO sections:
A immune
B viral/bacteria
C neuro
D cell death
E lipid
F adhesion
G cell cycle
H chromosome
I development
J extracellular matrix
K ion
L localization
M membrane
N metabolic
O phosphorylation
P signaling
Q stimulus
R prolif_differ
S Misc.
ABCDEFGHIJKLMNOPQRS
XX.X..XXX..X.XX.XXX transient_increase
XX.XXX.....X.X.XXXX consistent_increase
XXXXXX..XXXXXXXXXXX late_increase
..X.....X.XX.X....X consistent_decrease
..X.XX..X.XX.XXX.XX late_decrease
### Response:
def prt_mrks(self, name_marks_list, prt=sys.stdout):
"""Print summary of all GOEAs.
Example:
Key for GO sections:
A immune
B viral/bacteria
C neuro
D cell death
E lipid
F adhesion
G cell cycle
H chromosome
I development
J extracellular matrix
K ion
L localization
M membrane
N metabolic
O phosphorylation
P signaling
Q stimulus
R prolif_differ
S Misc.
ABCDEFGHIJKLMNOPQRS
XX.X..XXX..X.XX.XXX transient_increase
XX.XXX.....X.X.XXXX consistent_increase
XXXXXX..XXXXXXXXXXX late_increase
..X.....X.XX.X....X consistent_decrease
..X.XX..X.XX.XXX.XX late_decrease
"""
if not name_marks_list:
return
# prt.write("\nKey for GO sections:\n")
# self.prt_section_key(prt)
prt.write("\n{HDR}\n".format(HDR=self.str_hdr()))
for name, mark in name_marks_list:
if mark is not None:
prt.write("{MRKS} {NAME}\n".format(MRKS="".join(mark), NAME=name))
prt.write("\n") |
def _sequence(self, i):
"""Handle character group."""
c = next(i)
if c == '!':
c = next(i)
if c in ('^', '-', '['):
c = next(i)
while c != ']':
if c == '\\':
# Handle escapes
subindex = i.index
try:
self._references(i, True)
except PathNameException:
raise StopIteration
except StopIteration:
i.rewind(i.index - subindex)
elif c == '/':
raise StopIteration
c = next(i) | Handle character group. | Below is the the instruction that describes the task:
### Input:
Handle character group.
### Response:
def _sequence(self, i):
"""Handle character group."""
c = next(i)
if c == '!':
c = next(i)
if c in ('^', '-', '['):
c = next(i)
while c != ']':
if c == '\\':
# Handle escapes
subindex = i.index
try:
self._references(i, True)
except PathNameException:
raise StopIteration
except StopIteration:
i.rewind(i.index - subindex)
elif c == '/':
raise StopIteration
c = next(i) |
def start(self):
'''start sending packets'''
if self.sock is not None:
self.sock.close()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.connect(('', gen_settings.port))
global track_count
self.aircraft = []
track_count = 0
self.last_t = 0
# some fixed wing aircraft
for i in range(gen_settings.num_aircraft):
self.aircraft.append(Aircraft(random.uniform(10, 100), 2000.0))
# some birds of prey
for i in range(gen_settings.num_bird_prey):
self.aircraft.append(BirdOfPrey())
# some migrating birds
for i in range(gen_settings.num_bird_migratory):
self.aircraft.append(BirdMigrating())
# some weather systems
for i in range(gen_settings.num_weather):
self.aircraft.append(Weather())
print("Started on port %u" % gen_settings.port) | start sending packets | Below is the the instruction that describes the task:
### Input:
start sending packets
### Response:
def start(self):
'''start sending packets'''
if self.sock is not None:
self.sock.close()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.connect(('', gen_settings.port))
global track_count
self.aircraft = []
track_count = 0
self.last_t = 0
# some fixed wing aircraft
for i in range(gen_settings.num_aircraft):
self.aircraft.append(Aircraft(random.uniform(10, 100), 2000.0))
# some birds of prey
for i in range(gen_settings.num_bird_prey):
self.aircraft.append(BirdOfPrey())
# some migrating birds
for i in range(gen_settings.num_bird_migratory):
self.aircraft.append(BirdMigrating())
# some weather systems
for i in range(gen_settings.num_weather):
self.aircraft.append(Weather())
print("Started on port %u" % gen_settings.port) |
def on_select_mean_type_box(self, event):
"""
set parent Zeq_GUI to reflect change in this box and change the
@param: event -> the wx.ComboBoxEvent that triggered this function
"""
new_mean_type = self.mean_type_box.GetValue()
if new_mean_type == "None":
self.parent.clear_high_level_pars()
self.parent.mean_type_box.SetStringSelection(new_mean_type)
self.parent.onSelect_mean_type_box(event) | set parent Zeq_GUI to reflect change in this box and change the
@param: event -> the wx.ComboBoxEvent that triggered this function | Below is the the instruction that describes the task:
### Input:
set parent Zeq_GUI to reflect change in this box and change the
@param: event -> the wx.ComboBoxEvent that triggered this function
### Response:
def on_select_mean_type_box(self, event):
"""
set parent Zeq_GUI to reflect change in this box and change the
@param: event -> the wx.ComboBoxEvent that triggered this function
"""
new_mean_type = self.mean_type_box.GetValue()
if new_mean_type == "None":
self.parent.clear_high_level_pars()
self.parent.mean_type_box.SetStringSelection(new_mean_type)
self.parent.onSelect_mean_type_box(event) |
def convert_belscript(ctx, input_fn, output_fn):
"""Convert belscript to nanopubs_bel format
This will convert the OpenBEL BELScript file format to
nanopub_bel-1.0.0 format.
\b
input_fn:
If input fn has *.gz, will read as a gzip file
\b
output_fn:
If output fn has *.gz, will written as a gzip file
If output fn has *.jsonl*, will written as a JSONLines file
IF output fn has *.json*, will be written as a JSON file
If output fn has *.yaml* or *.yml*, will be written as a YAML file
"""
try:
(
out_fh,
yaml_flag,
jsonl_flag,
json_flag,
) = bel.nanopub.files.create_nanopubs_fh(output_fn)
if yaml_flag or json_flag:
docs = []
# input file
if re.search("gz$", input_fn):
f = gzip.open(input_fn, "rt")
else:
f = open(input_fn, "rt")
# process belscript
for doc in bel.nanopub.belscripts.parse_belscript(f):
if yaml_flag or json_flag:
docs.append(doc)
elif jsonl_flag:
out_fh.write("{}\n".format(json.dumps(doc)))
if yaml_flag:
yaml.dump(docs, out_fh)
elif json_flag:
json.dump(docs, out_fh, indent=4)
finally:
f.close()
out_fh.close() | Convert belscript to nanopubs_bel format
This will convert the OpenBEL BELScript file format to
nanopub_bel-1.0.0 format.
\b
input_fn:
If input fn has *.gz, will read as a gzip file
\b
output_fn:
If output fn has *.gz, will written as a gzip file
If output fn has *.jsonl*, will written as a JSONLines file
IF output fn has *.json*, will be written as a JSON file
If output fn has *.yaml* or *.yml*, will be written as a YAML file | Below is the the instruction that describes the task:
### Input:
Convert belscript to nanopubs_bel format
This will convert the OpenBEL BELScript file format to
nanopub_bel-1.0.0 format.
\b
input_fn:
If input fn has *.gz, will read as a gzip file
\b
output_fn:
If output fn has *.gz, will written as a gzip file
If output fn has *.jsonl*, will written as a JSONLines file
IF output fn has *.json*, will be written as a JSON file
If output fn has *.yaml* or *.yml*, will be written as a YAML file
### Response:
def convert_belscript(ctx, input_fn, output_fn):
"""Convert belscript to nanopubs_bel format
This will convert the OpenBEL BELScript file format to
nanopub_bel-1.0.0 format.
\b
input_fn:
If input fn has *.gz, will read as a gzip file
\b
output_fn:
If output fn has *.gz, will written as a gzip file
If output fn has *.jsonl*, will written as a JSONLines file
IF output fn has *.json*, will be written as a JSON file
If output fn has *.yaml* or *.yml*, will be written as a YAML file
"""
try:
(
out_fh,
yaml_flag,
jsonl_flag,
json_flag,
) = bel.nanopub.files.create_nanopubs_fh(output_fn)
if yaml_flag or json_flag:
docs = []
# input file
if re.search("gz$", input_fn):
f = gzip.open(input_fn, "rt")
else:
f = open(input_fn, "rt")
# process belscript
for doc in bel.nanopub.belscripts.parse_belscript(f):
if yaml_flag or json_flag:
docs.append(doc)
elif jsonl_flag:
out_fh.write("{}\n".format(json.dumps(doc)))
if yaml_flag:
yaml.dump(docs, out_fh)
elif json_flag:
json.dump(docs, out_fh, indent=4)
finally:
f.close()
out_fh.close() |
def _paged_api_call(self, func, kwargs, item_type='photo'):
"""
Takes a Flickr API function object and dict of keyword args and calls the
API call repeatedly with an incrementing page value until all contents are exhausted.
Flickr seems to limit to about 500 items.
"""
page = 1
while True:
LOG.info("Fetching page %s" % page)
kwargs['page'] = page
rsp = self._load_rsp(func(**kwargs))
if rsp["stat"] == "ok":
plural = item_type + 's'
if plural in rsp:
items = rsp[plural]
if int(items["page"]) < page:
LOG.info("End of Flickr pages (%s pages with %s per page)" % (items["pages"], items["perpage"]))
break
for i in items[item_type]:
yield self._prep(i)
else:
yield rsp
page += 1
else:
yield [rsp]
break | Takes a Flickr API function object and dict of keyword args and calls the
API call repeatedly with an incrementing page value until all contents are exhausted.
Flickr seems to limit to about 500 items. | Below is the the instruction that describes the task:
### Input:
Takes a Flickr API function object and dict of keyword args and calls the
API call repeatedly with an incrementing page value until all contents are exhausted.
Flickr seems to limit to about 500 items.
### Response:
def _paged_api_call(self, func, kwargs, item_type='photo'):
"""
Takes a Flickr API function object and dict of keyword args and calls the
API call repeatedly with an incrementing page value until all contents are exhausted.
Flickr seems to limit to about 500 items.
"""
page = 1
while True:
LOG.info("Fetching page %s" % page)
kwargs['page'] = page
rsp = self._load_rsp(func(**kwargs))
if rsp["stat"] == "ok":
plural = item_type + 's'
if plural in rsp:
items = rsp[plural]
if int(items["page"]) < page:
LOG.info("End of Flickr pages (%s pages with %s per page)" % (items["pages"], items["perpage"]))
break
for i in items[item_type]:
yield self._prep(i)
else:
yield rsp
page += 1
else:
yield [rsp]
break |
def pdna_network_from_bbox(
lat_min=None, lng_min=None, lat_max=None, lng_max=None, bbox=None,
network_type='walk', two_way=True,
timeout=180, memory=None, max_query_area_size=50 * 1000 * 50 * 1000):
"""
Make a Pandana network from a bounding lat/lon box
request to the Overpass API. Distance will be in the default units meters.
Parameters
----------
lat_min, lng_min, lat_max, lng_max : float
bbox : tuple
Bounding box formatted as a 4 element tuple:
(lng_max, lat_min, lng_min, lat_max)
network_type : {'walk', 'drive'}, optional
Specify whether the network will be used for walking or driving.
A value of 'walk' attempts to exclude things like freeways,
while a value of 'drive' attempts to exclude things like
bike and walking paths.
two_way : bool, optional
Whether the routes are two-way. If True, node pairs will only
occur once.
timeout : int, optional
the timeout interval for requests and to pass to Overpass API
memory : int, optional
server memory allocation size for the query, in bytes.
If none, server will use its default allocation size
max_query_area_size : float, optional
max area for any part of the geometry, in the units the geometry is in
Returns
-------
network : pandana.Network
"""
nodes, edges = network_from_bbox(lat_min=lat_min, lng_min=lng_min,
lat_max=lat_max, lng_max=lng_max,
bbox=bbox, network_type=network_type,
two_way=two_way, timeout=timeout,
memory=memory,
max_query_area_size=max_query_area_size)
return Network(
nodes['x'], nodes['y'],
edges['from'], edges['to'], edges[['distance']]) | Make a Pandana network from a bounding lat/lon box
request to the Overpass API. Distance will be in the default units meters.
Parameters
----------
lat_min, lng_min, lat_max, lng_max : float
bbox : tuple
Bounding box formatted as a 4 element tuple:
(lng_max, lat_min, lng_min, lat_max)
network_type : {'walk', 'drive'}, optional
Specify whether the network will be used for walking or driving.
A value of 'walk' attempts to exclude things like freeways,
while a value of 'drive' attempts to exclude things like
bike and walking paths.
two_way : bool, optional
Whether the routes are two-way. If True, node pairs will only
occur once.
timeout : int, optional
the timeout interval for requests and to pass to Overpass API
memory : int, optional
server memory allocation size for the query, in bytes.
If none, server will use its default allocation size
max_query_area_size : float, optional
max area for any part of the geometry, in the units the geometry is in
Returns
-------
network : pandana.Network | Below is the the instruction that describes the task:
### Input:
Make a Pandana network from a bounding lat/lon box
request to the Overpass API. Distance will be in the default units meters.
Parameters
----------
lat_min, lng_min, lat_max, lng_max : float
bbox : tuple
Bounding box formatted as a 4 element tuple:
(lng_max, lat_min, lng_min, lat_max)
network_type : {'walk', 'drive'}, optional
Specify whether the network will be used for walking or driving.
A value of 'walk' attempts to exclude things like freeways,
while a value of 'drive' attempts to exclude things like
bike and walking paths.
two_way : bool, optional
Whether the routes are two-way. If True, node pairs will only
occur once.
timeout : int, optional
the timeout interval for requests and to pass to Overpass API
memory : int, optional
server memory allocation size for the query, in bytes.
If none, server will use its default allocation size
max_query_area_size : float, optional
max area for any part of the geometry, in the units the geometry is in
Returns
-------
network : pandana.Network
### Response:
def pdna_network_from_bbox(
lat_min=None, lng_min=None, lat_max=None, lng_max=None, bbox=None,
network_type='walk', two_way=True,
timeout=180, memory=None, max_query_area_size=50 * 1000 * 50 * 1000):
"""
Make a Pandana network from a bounding lat/lon box
request to the Overpass API. Distance will be in the default units meters.
Parameters
----------
lat_min, lng_min, lat_max, lng_max : float
bbox : tuple
Bounding box formatted as a 4 element tuple:
(lng_max, lat_min, lng_min, lat_max)
network_type : {'walk', 'drive'}, optional
Specify whether the network will be used for walking or driving.
A value of 'walk' attempts to exclude things like freeways,
while a value of 'drive' attempts to exclude things like
bike and walking paths.
two_way : bool, optional
Whether the routes are two-way. If True, node pairs will only
occur once.
timeout : int, optional
the timeout interval for requests and to pass to Overpass API
memory : int, optional
server memory allocation size for the query, in bytes.
If none, server will use its default allocation size
max_query_area_size : float, optional
max area for any part of the geometry, in the units the geometry is in
Returns
-------
network : pandana.Network
"""
nodes, edges = network_from_bbox(lat_min=lat_min, lng_min=lng_min,
lat_max=lat_max, lng_max=lng_max,
bbox=bbox, network_type=network_type,
two_way=two_way, timeout=timeout,
memory=memory,
max_query_area_size=max_query_area_size)
return Network(
nodes['x'], nodes['y'],
edges['from'], edges['to'], edges[['distance']]) |
def parse(cls, root):
"""
Create a new AMDSec by parsing root.
:param root: Element or ElementTree to be parsed into an object.
"""
if root.tag != utils.lxmlns("mets") + "amdSec":
raise exceptions.ParseError(
"AMDSec can only parse amdSec elements with METS namespace."
)
section_id = root.get("ID")
subsections = []
for child in root:
subsection = SubSection.parse(child)
subsections.append(subsection)
return cls(section_id, subsections) | Create a new AMDSec by parsing root.
:param root: Element or ElementTree to be parsed into an object. | Below is the the instruction that describes the task:
### Input:
Create a new AMDSec by parsing root.
:param root: Element or ElementTree to be parsed into an object.
### Response:
def parse(cls, root):
"""
Create a new AMDSec by parsing root.
:param root: Element or ElementTree to be parsed into an object.
"""
if root.tag != utils.lxmlns("mets") + "amdSec":
raise exceptions.ParseError(
"AMDSec can only parse amdSec elements with METS namespace."
)
section_id = root.get("ID")
subsections = []
for child in root:
subsection = SubSection.parse(child)
subsections.append(subsection)
return cls(section_id, subsections) |
def resample(data, s_freq=None, axis='time', ftype='fir', n=None):
"""Downsample the data after applying a filter.
Parameters
----------
data : instance of Data
data to downsample
s_freq : int or float
desired sampling frequency
axis : str
axis you want to apply downsample on (most likely 'time')
ftype : str
filter type to apply. The default here is 'fir', like Matlab but unlike
the default in scipy, because it works better
n : int
The order of the filter (1 less than the length for ‘fir’).
Returns
-------
instance of Data
downsampled data
"""
output = data._copy()
ratio = int(data.s_freq / s_freq)
for i in range(data.number_of('trial')):
output.data[i] = decimate(data.data[i], ratio,
axis=data.index_of(axis),
zero_phase=True)
n_samples = output.data[i].shape[data.index_of(axis)]
output.axis[axis][i] = linspace(data.axis[axis][i][0],
data.axis[axis][i][-1] +
1 / data.s_freq,
n_samples)
output.s_freq = s_freq
return output | Downsample the data after applying a filter.
Parameters
----------
data : instance of Data
data to downsample
s_freq : int or float
desired sampling frequency
axis : str
axis you want to apply downsample on (most likely 'time')
ftype : str
filter type to apply. The default here is 'fir', like Matlab but unlike
the default in scipy, because it works better
n : int
The order of the filter (1 less than the length for ‘fir’).
Returns
-------
instance of Data
downsampled data | Below is the the instruction that describes the task:
### Input:
Downsample the data after applying a filter.
Parameters
----------
data : instance of Data
data to downsample
s_freq : int or float
desired sampling frequency
axis : str
axis you want to apply downsample on (most likely 'time')
ftype : str
filter type to apply. The default here is 'fir', like Matlab but unlike
the default in scipy, because it works better
n : int
The order of the filter (1 less than the length for ‘fir’).
Returns
-------
instance of Data
downsampled data
### Response:
def resample(data, s_freq=None, axis='time', ftype='fir', n=None):
"""Downsample the data after applying a filter.
Parameters
----------
data : instance of Data
data to downsample
s_freq : int or float
desired sampling frequency
axis : str
axis you want to apply downsample on (most likely 'time')
ftype : str
filter type to apply. The default here is 'fir', like Matlab but unlike
the default in scipy, because it works better
n : int
The order of the filter (1 less than the length for ‘fir’).
Returns
-------
instance of Data
downsampled data
"""
output = data._copy()
ratio = int(data.s_freq / s_freq)
for i in range(data.number_of('trial')):
output.data[i] = decimate(data.data[i], ratio,
axis=data.index_of(axis),
zero_phase=True)
n_samples = output.data[i].shape[data.index_of(axis)]
output.axis[axis][i] = linspace(data.axis[axis][i][0],
data.axis[axis][i][-1] +
1 / data.s_freq,
n_samples)
output.s_freq = s_freq
return output |
def next_joystick_device():
"""Finds the next available js device name."""
for i in range(100):
dev = "/dev/input/js{0}".format(i)
if not os.path.exists(dev):
return dev | Finds the next available js device name. | Below is the the instruction that describes the task:
### Input:
Finds the next available js device name.
### Response:
def next_joystick_device():
"""Finds the next available js device name."""
for i in range(100):
dev = "/dev/input/js{0}".format(i)
if not os.path.exists(dev):
return dev |
def reqHistoricalNews(
self, conId: int, providerCodes: str,
startDateTime: Union[str, datetime.date],
endDateTime: Union[str, datetime.date],
totalResults: int,
historicalNewsOptions: List[TagValue] = None) -> HistoricalNews:
"""
Get historical news headline.
https://interactivebrokers.github.io/tws-api/news.html
This method is blocking.
Args:
conId: Search news articles for contract with this conId.
providerCodes: A '+'-separated list of provider codes, like
'BZ+FLY'.
startDateTime: The (exclusive) start of the date range.
Can be given as a datetime.date or datetime.datetime,
or it can be given as a string in 'yyyyMMdd HH:mm:ss' format.
If no timezone is given then the TWS login timezone is used.
endDateTime: The (inclusive) end of the date range.
Can be given as a datetime.date or datetime.datetime,
or it can be given as a string in 'yyyyMMdd HH:mm:ss' format.
If no timezone is given then the TWS login timezone is used.
totalResults: Maximum number of headlines to fetch (300 max).
historicalNewsOptions: Unknown.
"""
return self._run(
self.reqHistoricalNewsAsync(
conId, providerCodes, startDateTime, endDateTime,
totalResults, historicalNewsOptions)) | Get historical news headline.
https://interactivebrokers.github.io/tws-api/news.html
This method is blocking.
Args:
conId: Search news articles for contract with this conId.
providerCodes: A '+'-separated list of provider codes, like
'BZ+FLY'.
startDateTime: The (exclusive) start of the date range.
Can be given as a datetime.date or datetime.datetime,
or it can be given as a string in 'yyyyMMdd HH:mm:ss' format.
If no timezone is given then the TWS login timezone is used.
endDateTime: The (inclusive) end of the date range.
Can be given as a datetime.date or datetime.datetime,
or it can be given as a string in 'yyyyMMdd HH:mm:ss' format.
If no timezone is given then the TWS login timezone is used.
totalResults: Maximum number of headlines to fetch (300 max).
historicalNewsOptions: Unknown. | Below is the the instruction that describes the task:
### Input:
Get historical news headline.
https://interactivebrokers.github.io/tws-api/news.html
This method is blocking.
Args:
conId: Search news articles for contract with this conId.
providerCodes: A '+'-separated list of provider codes, like
'BZ+FLY'.
startDateTime: The (exclusive) start of the date range.
Can be given as a datetime.date or datetime.datetime,
or it can be given as a string in 'yyyyMMdd HH:mm:ss' format.
If no timezone is given then the TWS login timezone is used.
endDateTime: The (inclusive) end of the date range.
Can be given as a datetime.date or datetime.datetime,
or it can be given as a string in 'yyyyMMdd HH:mm:ss' format.
If no timezone is given then the TWS login timezone is used.
totalResults: Maximum number of headlines to fetch (300 max).
historicalNewsOptions: Unknown.
### Response:
def reqHistoricalNews(
self, conId: int, providerCodes: str,
startDateTime: Union[str, datetime.date],
endDateTime: Union[str, datetime.date],
totalResults: int,
historicalNewsOptions: List[TagValue] = None) -> HistoricalNews:
"""
Get historical news headline.
https://interactivebrokers.github.io/tws-api/news.html
This method is blocking.
Args:
conId: Search news articles for contract with this conId.
providerCodes: A '+'-separated list of provider codes, like
'BZ+FLY'.
startDateTime: The (exclusive) start of the date range.
Can be given as a datetime.date or datetime.datetime,
or it can be given as a string in 'yyyyMMdd HH:mm:ss' format.
If no timezone is given then the TWS login timezone is used.
endDateTime: The (inclusive) end of the date range.
Can be given as a datetime.date or datetime.datetime,
or it can be given as a string in 'yyyyMMdd HH:mm:ss' format.
If no timezone is given then the TWS login timezone is used.
totalResults: Maximum number of headlines to fetch (300 max).
historicalNewsOptions: Unknown.
"""
return self._run(
self.reqHistoricalNewsAsync(
conId, providerCodes, startDateTime, endDateTime,
totalResults, historicalNewsOptions)) |
def get_url_repair_patterns():
"""Initialise and return a list of precompiled regexp patterns that
are used to try to re-assemble URLs that have been broken during
a document's conversion to plain-text.
@return: (list) of compiled re regexp patterns used for finding
various broken URLs.
"""
file_types_list = [
ur'h\s*t\s*m', # htm
ur'h\s*t\s*m\s*l', # html
ur't\s*x\s*t' # txt
ur'p\s*h\s*p' # php
ur'a\s*s\s*p\s*' # asp
ur'j\s*s\s*p', # jsp
ur'p\s*y', # py (python)
ur'p\s*l', # pl (perl)
ur'x\s*m\s*l', # xml
ur'j\s*p\s*g', # jpg
ur'g\s*i\s*f' # gif
ur'm\s*o\s*v' # mov
ur's\s*w\s*f' # swf
ur'p\s*d\s*f' # pdf
ur'p\s*s' # ps
ur'd\s*o\s*c', # doc
ur't\s*e\s*x', # tex
ur's\s*h\s*t\s*m\s*l', # shtml
]
pattern_list = [
ur'(h\s*t\s*t\s*p\s*\:\s*\/\s*\/)',
ur'(f\s*t\s*p\s*\:\s*\/\s*\/\s*)',
ur'((http|ftp):\/\/\s*[\w\d])',
ur'((http|ftp):\/\/([\w\d\s\._\-])+?\s*\/)',
ur'((http|ftp):\/\/([\w\d\_\.\-])+\/(([\w\d\_\s\.\-])+?\/)+)',
ur'((http|ftp):\/\/([\w\d\_\.\-])+\/(([\w\d\_\s\.\-])+?\/)*([\w\d\_\s\-]+\.\s?[\w\d]+))',
]
pattern_list = [re.compile(p, re.I | re.UNICODE) for p in pattern_list]
# some possible endings for URLs:
p = ur'((http|ftp):\/\/([\w\d\_\.\-])+\/(([\w\d\_\.\-])+?\/)*([\w\d\_\-]+\.%s))'
for extension in file_types_list:
p_url = re.compile(p % extension, re.I | re.UNICODE)
pattern_list.append(p_url)
# if url last thing in line, and only 10 letters max, concat them
p_url = re.compile(
r'((http|ftp):\/\/([\w\d\_\.\-])+\/(([\w\d\_\.\-])+?\/)*\s*?([\w\d\_\.\-]\s?){1,10}\s*)$',
re.I | re.UNICODE)
pattern_list.append(p_url)
return pattern_list | Initialise and return a list of precompiled regexp patterns that
are used to try to re-assemble URLs that have been broken during
a document's conversion to plain-text.
@return: (list) of compiled re regexp patterns used for finding
various broken URLs. | Below is the the instruction that describes the task:
### Input:
Initialise and return a list of precompiled regexp patterns that
are used to try to re-assemble URLs that have been broken during
a document's conversion to plain-text.
@return: (list) of compiled re regexp patterns used for finding
various broken URLs.
### Response:
def get_url_repair_patterns():
"""Initialise and return a list of precompiled regexp patterns that
are used to try to re-assemble URLs that have been broken during
a document's conversion to plain-text.
@return: (list) of compiled re regexp patterns used for finding
various broken URLs.
"""
file_types_list = [
ur'h\s*t\s*m', # htm
ur'h\s*t\s*m\s*l', # html
ur't\s*x\s*t' # txt
ur'p\s*h\s*p' # php
ur'a\s*s\s*p\s*' # asp
ur'j\s*s\s*p', # jsp
ur'p\s*y', # py (python)
ur'p\s*l', # pl (perl)
ur'x\s*m\s*l', # xml
ur'j\s*p\s*g', # jpg
ur'g\s*i\s*f' # gif
ur'm\s*o\s*v' # mov
ur's\s*w\s*f' # swf
ur'p\s*d\s*f' # pdf
ur'p\s*s' # ps
ur'd\s*o\s*c', # doc
ur't\s*e\s*x', # tex
ur's\s*h\s*t\s*m\s*l', # shtml
]
pattern_list = [
ur'(h\s*t\s*t\s*p\s*\:\s*\/\s*\/)',
ur'(f\s*t\s*p\s*\:\s*\/\s*\/\s*)',
ur'((http|ftp):\/\/\s*[\w\d])',
ur'((http|ftp):\/\/([\w\d\s\._\-])+?\s*\/)',
ur'((http|ftp):\/\/([\w\d\_\.\-])+\/(([\w\d\_\s\.\-])+?\/)+)',
ur'((http|ftp):\/\/([\w\d\_\.\-])+\/(([\w\d\_\s\.\-])+?\/)*([\w\d\_\s\-]+\.\s?[\w\d]+))',
]
pattern_list = [re.compile(p, re.I | re.UNICODE) for p in pattern_list]
# some possible endings for URLs:
p = ur'((http|ftp):\/\/([\w\d\_\.\-])+\/(([\w\d\_\.\-])+?\/)*([\w\d\_\-]+\.%s))'
for extension in file_types_list:
p_url = re.compile(p % extension, re.I | re.UNICODE)
pattern_list.append(p_url)
# if url last thing in line, and only 10 letters max, concat them
p_url = re.compile(
r'((http|ftp):\/\/([\w\d\_\.\-])+\/(([\w\d\_\.\-])+?\/)*\s*?([\w\d\_\.\-]\s?){1,10}\s*)$',
re.I | re.UNICODE)
pattern_list.append(p_url)
return pattern_list |
def load_app(target):
""" Load a bottle application from a module and make sure that the import
does not affect the current default application, but returns a separate
application object. See :func:`load` for the target parameter. """
global NORUN
NORUN, nr_old = True, NORUN
tmp = default_app.push() # Create a new "default application"
try:
rv = load(target) # Import the target module
return rv if callable(rv) else tmp
finally:
default_app.remove(tmp) # Remove the temporary added default application
NORUN = nr_old | Load a bottle application from a module and make sure that the import
does not affect the current default application, but returns a separate
application object. See :func:`load` for the target parameter. | Below is the the instruction that describes the task:
### Input:
Load a bottle application from a module and make sure that the import
does not affect the current default application, but returns a separate
application object. See :func:`load` for the target parameter.
### Response:
def load_app(target):
""" Load a bottle application from a module and make sure that the import
does not affect the current default application, but returns a separate
application object. See :func:`load` for the target parameter. """
global NORUN
NORUN, nr_old = True, NORUN
tmp = default_app.push() # Create a new "default application"
try:
rv = load(target) # Import the target module
return rv if callable(rv) else tmp
finally:
default_app.remove(tmp) # Remove the temporary added default application
NORUN = nr_old |
def get_relationship_admin_session_for_family(self, family_id):
"""Gets the ``OsidSession`` associated with the relationship administration service for the given family.
arg: family_id (osid.id.Id): the ``Id`` of the ``Family``
return: (osid.relationship.RelationshipAdminSession) - a
``RelationshipAdminSession``
raise: NotFound - no family found by the given ``Id``
raise: NullArgument - ``family_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_relationship_admin()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_relationship_admin()`` and
``supports_visible_federation()`` are ``true``*
"""
if not self.supports_relationship_admin():
raise errors.Unimplemented()
##
# Also include check to see if the catalog Id is found otherwise raise errors.NotFound
##
# pylint: disable=no-member
return sessions.RelationshipAdminSession(family_id, runtime=self._runtime) | Gets the ``OsidSession`` associated with the relationship administration service for the given family.
arg: family_id (osid.id.Id): the ``Id`` of the ``Family``
return: (osid.relationship.RelationshipAdminSession) - a
``RelationshipAdminSession``
raise: NotFound - no family found by the given ``Id``
raise: NullArgument - ``family_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_relationship_admin()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_relationship_admin()`` and
``supports_visible_federation()`` are ``true``* | Below is the the instruction that describes the task:
### Input:
Gets the ``OsidSession`` associated with the relationship administration service for the given family.
arg: family_id (osid.id.Id): the ``Id`` of the ``Family``
return: (osid.relationship.RelationshipAdminSession) - a
``RelationshipAdminSession``
raise: NotFound - no family found by the given ``Id``
raise: NullArgument - ``family_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_relationship_admin()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_relationship_admin()`` and
``supports_visible_federation()`` are ``true``*
### Response:
def get_relationship_admin_session_for_family(self, family_id):
"""Gets the ``OsidSession`` associated with the relationship administration service for the given family.
arg: family_id (osid.id.Id): the ``Id`` of the ``Family``
return: (osid.relationship.RelationshipAdminSession) - a
``RelationshipAdminSession``
raise: NotFound - no family found by the given ``Id``
raise: NullArgument - ``family_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_relationship_admin()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_relationship_admin()`` and
``supports_visible_federation()`` are ``true``*
"""
if not self.supports_relationship_admin():
raise errors.Unimplemented()
##
# Also include check to see if the catalog Id is found otherwise raise errors.NotFound
##
# pylint: disable=no-member
return sessions.RelationshipAdminSession(family_id, runtime=self._runtime) |
def screen(self):
'''
Turn on/off screen.
Usage:
d.screen.on()
d.screen.off()
d.screen == 'on' # Check if the screen is on, same as 'd.screenOn'
d.screen == 'off' # Check if the screen is off, same as 'not d.screenOn'
'''
devive_self = self
class _Screen(object):
def on(self):
return devive_self.wakeup()
def off(self):
return devive_self.sleep()
def __call__(self, action):
if action == "on":
return self.on()
elif action == "off":
return self.off()
else:
raise AttributeError("Invalid parameter: %s" % action)
def __eq__(self, value):
info = devive_self.info
if "screenOn" not in info:
raise EnvironmentError("Not supported on Android 4.3 and belows.")
if value in ["on", "On", "ON"]:
return info["screenOn"]
elif value in ["off", "Off", "OFF"]:
return not info["screenOn"]
raise ValueError("Invalid parameter. It can only be compared with on/off.")
def __ne__(self, value):
return not self.__eq__(value)
return _Screen() | Turn on/off screen.
Usage:
d.screen.on()
d.screen.off()
d.screen == 'on' # Check if the screen is on, same as 'd.screenOn'
d.screen == 'off' # Check if the screen is off, same as 'not d.screenOn' | Below is the the instruction that describes the task:
### Input:
Turn on/off screen.
Usage:
d.screen.on()
d.screen.off()
d.screen == 'on' # Check if the screen is on, same as 'd.screenOn'
d.screen == 'off' # Check if the screen is off, same as 'not d.screenOn'
### Response:
def screen(self):
'''
Turn on/off screen.
Usage:
d.screen.on()
d.screen.off()
d.screen == 'on' # Check if the screen is on, same as 'd.screenOn'
d.screen == 'off' # Check if the screen is off, same as 'not d.screenOn'
'''
devive_self = self
class _Screen(object):
def on(self):
return devive_self.wakeup()
def off(self):
return devive_self.sleep()
def __call__(self, action):
if action == "on":
return self.on()
elif action == "off":
return self.off()
else:
raise AttributeError("Invalid parameter: %s" % action)
def __eq__(self, value):
info = devive_self.info
if "screenOn" not in info:
raise EnvironmentError("Not supported on Android 4.3 and belows.")
if value in ["on", "On", "ON"]:
return info["screenOn"]
elif value in ["off", "Off", "OFF"]:
return not info["screenOn"]
raise ValueError("Invalid parameter. It can only be compared with on/off.")
def __ne__(self, value):
return not self.__eq__(value)
return _Screen() |
def _add_halfwave(data, events, s_freq, opts):
"""Find the next zero crossing and the intervening peak and add
them to events. If no zero found before max_dur, event is discarded. If
peak-to-peak is smaller than min_ptp, the event is discarded.
Parameters
----------
data : ndarray (dtype='float')
vector with the data
events : ndarray (dtype='int')
N x 3 matrix with start, trough, end samples
s_freq : float
sampling frequency
opts : instance of 'DetectSlowWave'
'duration' : tuple of float
min and max duration of SW
'min_ptp' : float
min peak-to-peak amplitude
Returns
-------
ndarray (dtype='int')
N x 5 matrix with start, trough, - to + zero crossing, peak,
and end samples
"""
max_dur = opts.duration[1]
if max_dur is None:
max_dur = MAXIMUM_DURATION
window = int(s_freq * max_dur)
peak_and_end = zeros((events.shape[0], 2), dtype='int')
events = concatenate((events, peak_and_end), axis=1)
selected = []
for ev in events:
zero_crossings = where(diff(sign(data[ev[2]:ev[0] + window])))[0]
if zero_crossings.any():
ev[4] = ev[2] + zero_crossings[0] + 1
#lg.info('0cross is at ' + str(ev[4]))
else:
selected.append(False)
#lg.info('no 0cross, rejected')
continue
ev[3] = ev[2] + argmin(data[ev[2]:ev[4]])
if abs(data[ev[1]] - data[ev[3]]) < opts.min_ptp:
selected.append(False)
#lg.info('ptp too low, rejected: ' + str(abs(data[ev[1]] - data[ev[3]])))
continue
selected.append(True)
#lg.info('SW checks out, accepted! ptp is ' + str(abs(data[ev[1]] - data[ev[3]])))
return events[selected, :] | Find the next zero crossing and the intervening peak and add
them to events. If no zero found before max_dur, event is discarded. If
peak-to-peak is smaller than min_ptp, the event is discarded.
Parameters
----------
data : ndarray (dtype='float')
vector with the data
events : ndarray (dtype='int')
N x 3 matrix with start, trough, end samples
s_freq : float
sampling frequency
opts : instance of 'DetectSlowWave'
'duration' : tuple of float
min and max duration of SW
'min_ptp' : float
min peak-to-peak amplitude
Returns
-------
ndarray (dtype='int')
N x 5 matrix with start, trough, - to + zero crossing, peak,
and end samples | Below is the the instruction that describes the task:
### Input:
Find the next zero crossing and the intervening peak and add
them to events. If no zero found before max_dur, event is discarded. If
peak-to-peak is smaller than min_ptp, the event is discarded.
Parameters
----------
data : ndarray (dtype='float')
vector with the data
events : ndarray (dtype='int')
N x 3 matrix with start, trough, end samples
s_freq : float
sampling frequency
opts : instance of 'DetectSlowWave'
'duration' : tuple of float
min and max duration of SW
'min_ptp' : float
min peak-to-peak amplitude
Returns
-------
ndarray (dtype='int')
N x 5 matrix with start, trough, - to + zero crossing, peak,
and end samples
### Response:
def _add_halfwave(data, events, s_freq, opts):
"""Find the next zero crossing and the intervening peak and add
them to events. If no zero found before max_dur, event is discarded. If
peak-to-peak is smaller than min_ptp, the event is discarded.
Parameters
----------
data : ndarray (dtype='float')
vector with the data
events : ndarray (dtype='int')
N x 3 matrix with start, trough, end samples
s_freq : float
sampling frequency
opts : instance of 'DetectSlowWave'
'duration' : tuple of float
min and max duration of SW
'min_ptp' : float
min peak-to-peak amplitude
Returns
-------
ndarray (dtype='int')
N x 5 matrix with start, trough, - to + zero crossing, peak,
and end samples
"""
max_dur = opts.duration[1]
if max_dur is None:
max_dur = MAXIMUM_DURATION
window = int(s_freq * max_dur)
peak_and_end = zeros((events.shape[0], 2), dtype='int')
events = concatenate((events, peak_and_end), axis=1)
selected = []
for ev in events:
zero_crossings = where(diff(sign(data[ev[2]:ev[0] + window])))[0]
if zero_crossings.any():
ev[4] = ev[2] + zero_crossings[0] + 1
#lg.info('0cross is at ' + str(ev[4]))
else:
selected.append(False)
#lg.info('no 0cross, rejected')
continue
ev[3] = ev[2] + argmin(data[ev[2]:ev[4]])
if abs(data[ev[1]] - data[ev[3]]) < opts.min_ptp:
selected.append(False)
#lg.info('ptp too low, rejected: ' + str(abs(data[ev[1]] - data[ev[3]])))
continue
selected.append(True)
#lg.info('SW checks out, accepted! ptp is ' + str(abs(data[ev[1]] - data[ev[3]])))
return events[selected, :] |
def split_string(self, string, splitter='.', allow_empty=True):
"""Split the string with respect of quotes"""
i = 0
rv = []
need_split = False
while i < len(string):
m = re.compile(_KEY_NAME).match(string, i)
if not need_split and m:
i = m.end()
body = m.group(1)
if body[:3] == '"""':
body = self.converter.unescape(body[3:-3])
elif body[:3] == "'''":
body = body[3:-3]
elif body[0] == '"':
body = self.converter.unescape(body[1:-1])
elif body[0] == "'":
body = body[1:-1]
if not allow_empty and not body:
raise TomlDecodeError(
self.lineno,
'Empty section name is not allowed: %r' % string)
rv.append(body)
need_split = True
elif need_split and string[i] == splitter:
need_split = False
i += 1
continue
else:
raise TomlDecodeError(self.lineno,
'Illegal section name: %r' % string)
if not need_split:
raise TomlDecodeError(
self.lineno,
'Empty section name is not allowed: %r' % string)
return rv | Split the string with respect of quotes | Below is the the instruction that describes the task:
### Input:
Split the string with respect of quotes
### Response:
def split_string(self, string, splitter='.', allow_empty=True):
"""Split the string with respect of quotes"""
i = 0
rv = []
need_split = False
while i < len(string):
m = re.compile(_KEY_NAME).match(string, i)
if not need_split and m:
i = m.end()
body = m.group(1)
if body[:3] == '"""':
body = self.converter.unescape(body[3:-3])
elif body[:3] == "'''":
body = body[3:-3]
elif body[0] == '"':
body = self.converter.unescape(body[1:-1])
elif body[0] == "'":
body = body[1:-1]
if not allow_empty and not body:
raise TomlDecodeError(
self.lineno,
'Empty section name is not allowed: %r' % string)
rv.append(body)
need_split = True
elif need_split and string[i] == splitter:
need_split = False
i += 1
continue
else:
raise TomlDecodeError(self.lineno,
'Illegal section name: %r' % string)
if not need_split:
raise TomlDecodeError(
self.lineno,
'Empty section name is not allowed: %r' % string)
return rv |
def FindUniqueId(dic):
"""Return a string not used as a key in the dictionary dic"""
name = str(len(dic))
while name in dic:
# Use bigger numbers so it is obvious when an id is picked randomly.
name = str(random.randint(1000000, 999999999))
return name | Return a string not used as a key in the dictionary dic | Below is the the instruction that describes the task:
### Input:
Return a string not used as a key in the dictionary dic
### Response:
def FindUniqueId(dic):
"""Return a string not used as a key in the dictionary dic"""
name = str(len(dic))
while name in dic:
# Use bigger numbers so it is obvious when an id is picked randomly.
name = str(random.randint(1000000, 999999999))
return name |
def get_cytoband_coord(chrom, pos):
"""Get the cytoband coordinate for a position
Args:
chrom(str): A chromosome
pos(int): The position
Returns:
cytoband
"""
chrom = chrom.strip('chr')
pos = int(pos)
result = None
logger.debug("Finding Cytoband for chrom:{0} pos:{1}".format(chrom, pos))
if chrom in CYTOBANDS:
for interval in CYTOBANDS[chrom][pos]:
result = "{0}{1}".format(chrom, interval.data)
return result | Get the cytoband coordinate for a position
Args:
chrom(str): A chromosome
pos(int): The position
Returns:
cytoband | Below is the the instruction that describes the task:
### Input:
Get the cytoband coordinate for a position
Args:
chrom(str): A chromosome
pos(int): The position
Returns:
cytoband
### Response:
def get_cytoband_coord(chrom, pos):
"""Get the cytoband coordinate for a position
Args:
chrom(str): A chromosome
pos(int): The position
Returns:
cytoband
"""
chrom = chrom.strip('chr')
pos = int(pos)
result = None
logger.debug("Finding Cytoband for chrom:{0} pos:{1}".format(chrom, pos))
if chrom in CYTOBANDS:
for interval in CYTOBANDS[chrom][pos]:
result = "{0}{1}".format(chrom, interval.data)
return result |
def split_token_in_parts(token):
"""
Take a Token, and turn it in a list of tokens, by splitting
it on ':' (taking that as a separator.)
"""
result = []
current = []
for part in token + (':', ):
if part == ':':
if current:
result.append(tuple(current))
current = []
else:
current.append(part)
return result | Take a Token, and turn it in a list of tokens, by splitting
it on ':' (taking that as a separator.) | Below is the the instruction that describes the task:
### Input:
Take a Token, and turn it in a list of tokens, by splitting
it on ':' (taking that as a separator.)
### Response:
def split_token_in_parts(token):
"""
Take a Token, and turn it in a list of tokens, by splitting
it on ':' (taking that as a separator.)
"""
result = []
current = []
for part in token + (':', ):
if part == ':':
if current:
result.append(tuple(current))
current = []
else:
current.append(part)
return result |
def _add_formatted_imports(self) -> None:
"""Adds the imports back to the file.
(at the index of the first import) sorted alphabetically and split between groups
"""
sort_ignore_case = self.config['force_alphabetical_sort_within_sections']
sections = itertools.chain(self.sections, self.config['forced_separate']) # type: Iterable[str]
if self.config['no_sections']:
self.imports['no_sections'] = {'straight': [], 'from': {}}
for section in sections:
self.imports['no_sections']['straight'].extend(self.imports[section].get('straight', []))
self.imports['no_sections']['from'].update(self.imports[section].get('from', {}))
sections = ('no_sections', )
output = [] # type: List[str]
pending_lines_before = False
for section in sections:
straight_modules = self.imports[section]['straight']
straight_modules = nsorted(straight_modules, key=lambda key: self._module_key(key, self.config, section_name=section))
from_modules = self.imports[section]['from']
from_modules = nsorted(from_modules, key=lambda key: self._module_key(key, self.config, section_name=section))
section_output = [] # type: List[str]
if self.config['from_first']:
self._add_from_imports(from_modules, section, section_output, sort_ignore_case)
if self.config['lines_between_types'] and from_modules and straight_modules:
section_output.extend([''] * self.config['lines_between_types'])
self._add_straight_imports(straight_modules, section, section_output)
else:
self._add_straight_imports(straight_modules, section, section_output)
if self.config['lines_between_types'] and from_modules and straight_modules:
section_output.extend([''] * self.config['lines_between_types'])
self._add_from_imports(from_modules, section, section_output, sort_ignore_case)
if self.config['force_sort_within_sections']:
def by_module(line: str) -> str:
section = 'B'
if line.startswith('#'):
return 'AA'
line = re.sub('^from ', '', line)
line = re.sub('^import ', '', line)
if line.split(' ')[0] in self.config['force_to_top']:
section = 'A'
if not self.config['order_by_type']:
line = line.lower()
return '{0}{1}'.format(section, line)
section_output = nsorted(section_output, key=by_module)
section_name = section
no_lines_before = section_name in self.config['no_lines_before']
if section_output:
if section_name in self.place_imports:
self.place_imports[section_name] = section_output
continue
section_title = self.config.get('import_heading_' + str(section_name).lower(), '')
if section_title:
section_comment = "# {0}".format(section_title)
if section_comment not in self.out_lines[0:1] and section_comment not in self.in_lines[0:1]:
section_output.insert(0, section_comment)
if pending_lines_before or not no_lines_before:
output += ([''] * self.config['lines_between_sections'])
output += section_output
pending_lines_before = False
else:
pending_lines_before = pending_lines_before or not no_lines_before
while output and output[-1].strip() == '':
output.pop()
while output and output[0].strip() == '':
output.pop(0)
output_at = 0
if self.import_index < self.original_num_of_lines:
output_at = self.import_index
elif self._first_comment_index_end != -1 and self._first_comment_index_start <= 2:
output_at = self._first_comment_index_end
self.out_lines[output_at:0] = output
imports_tail = output_at + len(output)
while [character.strip() for character in self.out_lines[imports_tail: imports_tail + 1]] == [""]:
self.out_lines.pop(imports_tail)
if len(self.out_lines) > imports_tail:
next_construct = ""
self._in_quote = False # type: Any
tail = self.out_lines[imports_tail:]
for index, line in enumerate(tail):
in_quote = self._in_quote
if not self._skip_line(line) and line.strip():
if line.strip().startswith("#") and len(tail) > (index + 1) and tail[index + 1].strip():
continue
next_construct = line
break
elif not in_quote:
parts = line.split()
if len(parts) >= 3 and parts[1] == '=' and "'" not in parts[0] and '"' not in parts[0]:
next_construct = line
break
if self.config['lines_after_imports'] != -1:
self.out_lines[imports_tail:0] = ["" for line in range(self.config['lines_after_imports'])]
elif next_construct.startswith("def ") or next_construct.startswith("class ") or \
next_construct.startswith("@") or next_construct.startswith("async def"):
self.out_lines[imports_tail:0] = ["", ""]
else:
self.out_lines[imports_tail:0] = [""]
if self.place_imports:
new_out_lines = []
for index, line in enumerate(self.out_lines):
new_out_lines.append(line)
if line in self.import_placements:
new_out_lines.extend(self.place_imports[self.import_placements[line]])
if len(self.out_lines) <= index or self.out_lines[index + 1].strip() != "":
new_out_lines.append("")
self.out_lines = new_out_lines | Adds the imports back to the file.
(at the index of the first import) sorted alphabetically and split between groups | Below is the the instruction that describes the task:
### Input:
Adds the imports back to the file.
(at the index of the first import) sorted alphabetically and split between groups
### Response:
def _add_formatted_imports(self) -> None:
"""Adds the imports back to the file.
(at the index of the first import) sorted alphabetically and split between groups
"""
sort_ignore_case = self.config['force_alphabetical_sort_within_sections']
sections = itertools.chain(self.sections, self.config['forced_separate']) # type: Iterable[str]
if self.config['no_sections']:
self.imports['no_sections'] = {'straight': [], 'from': {}}
for section in sections:
self.imports['no_sections']['straight'].extend(self.imports[section].get('straight', []))
self.imports['no_sections']['from'].update(self.imports[section].get('from', {}))
sections = ('no_sections', )
output = [] # type: List[str]
pending_lines_before = False
for section in sections:
straight_modules = self.imports[section]['straight']
straight_modules = nsorted(straight_modules, key=lambda key: self._module_key(key, self.config, section_name=section))
from_modules = self.imports[section]['from']
from_modules = nsorted(from_modules, key=lambda key: self._module_key(key, self.config, section_name=section))
section_output = [] # type: List[str]
if self.config['from_first']:
self._add_from_imports(from_modules, section, section_output, sort_ignore_case)
if self.config['lines_between_types'] and from_modules and straight_modules:
section_output.extend([''] * self.config['lines_between_types'])
self._add_straight_imports(straight_modules, section, section_output)
else:
self._add_straight_imports(straight_modules, section, section_output)
if self.config['lines_between_types'] and from_modules and straight_modules:
section_output.extend([''] * self.config['lines_between_types'])
self._add_from_imports(from_modules, section, section_output, sort_ignore_case)
if self.config['force_sort_within_sections']:
def by_module(line: str) -> str:
section = 'B'
if line.startswith('#'):
return 'AA'
line = re.sub('^from ', '', line)
line = re.sub('^import ', '', line)
if line.split(' ')[0] in self.config['force_to_top']:
section = 'A'
if not self.config['order_by_type']:
line = line.lower()
return '{0}{1}'.format(section, line)
section_output = nsorted(section_output, key=by_module)
section_name = section
no_lines_before = section_name in self.config['no_lines_before']
if section_output:
if section_name in self.place_imports:
self.place_imports[section_name] = section_output
continue
section_title = self.config.get('import_heading_' + str(section_name).lower(), '')
if section_title:
section_comment = "# {0}".format(section_title)
if section_comment not in self.out_lines[0:1] and section_comment not in self.in_lines[0:1]:
section_output.insert(0, section_comment)
if pending_lines_before or not no_lines_before:
output += ([''] * self.config['lines_between_sections'])
output += section_output
pending_lines_before = False
else:
pending_lines_before = pending_lines_before or not no_lines_before
while output and output[-1].strip() == '':
output.pop()
while output and output[0].strip() == '':
output.pop(0)
output_at = 0
if self.import_index < self.original_num_of_lines:
output_at = self.import_index
elif self._first_comment_index_end != -1 and self._first_comment_index_start <= 2:
output_at = self._first_comment_index_end
self.out_lines[output_at:0] = output
imports_tail = output_at + len(output)
while [character.strip() for character in self.out_lines[imports_tail: imports_tail + 1]] == [""]:
self.out_lines.pop(imports_tail)
if len(self.out_lines) > imports_tail:
next_construct = ""
self._in_quote = False # type: Any
tail = self.out_lines[imports_tail:]
for index, line in enumerate(tail):
in_quote = self._in_quote
if not self._skip_line(line) and line.strip():
if line.strip().startswith("#") and len(tail) > (index + 1) and tail[index + 1].strip():
continue
next_construct = line
break
elif not in_quote:
parts = line.split()
if len(parts) >= 3 and parts[1] == '=' and "'" not in parts[0] and '"' not in parts[0]:
next_construct = line
break
if self.config['lines_after_imports'] != -1:
self.out_lines[imports_tail:0] = ["" for line in range(self.config['lines_after_imports'])]
elif next_construct.startswith("def ") or next_construct.startswith("class ") or \
next_construct.startswith("@") or next_construct.startswith("async def"):
self.out_lines[imports_tail:0] = ["", ""]
else:
self.out_lines[imports_tail:0] = [""]
if self.place_imports:
new_out_lines = []
for index, line in enumerate(self.out_lines):
new_out_lines.append(line)
if line in self.import_placements:
new_out_lines.extend(self.place_imports[self.import_placements[line]])
if len(self.out_lines) <= index or self.out_lines[index + 1].strip() != "":
new_out_lines.append("")
self.out_lines = new_out_lines |
def DictReader(ltsvfile, labels=None, dict_type=dict):
"""Make LTSV Reader for reading selected labels.
:param ltsvfile: iterable of lines.
:param labels: sequence of labels.
:return: generator of record in {label: value, ...} form.
"""
for rec in reader(ltsvfile, labels):
yield dict_type(rec) | Make LTSV Reader for reading selected labels.
:param ltsvfile: iterable of lines.
:param labels: sequence of labels.
:return: generator of record in {label: value, ...} form. | Below is the the instruction that describes the task:
### Input:
Make LTSV Reader for reading selected labels.
:param ltsvfile: iterable of lines.
:param labels: sequence of labels.
:return: generator of record in {label: value, ...} form.
### Response:
def DictReader(ltsvfile, labels=None, dict_type=dict):
"""Make LTSV Reader for reading selected labels.
:param ltsvfile: iterable of lines.
:param labels: sequence of labels.
:return: generator of record in {label: value, ...} form.
"""
for rec in reader(ltsvfile, labels):
yield dict_type(rec) |
def ppaged(self, msg: str, end: str = '\n', chop: bool = False) -> None:
"""Print output using a pager if it would go off screen and stdout isn't currently being redirected.
Never uses a pager inside of a script (Python or text) or when output is being redirected or piped or when
stdout or stdin are not a fully functional terminal.
:param msg: message to print to current stdout (anything convertible to a str with '{}'.format() is OK)
:param end: string appended after the end of the message if not already present, default a newline
:param chop: True -> causes lines longer than the screen width to be chopped (truncated) rather than wrapped
- truncated text is still accessible by scrolling with the right & left arrow keys
- chopping is ideal for displaying wide tabular data as is done in utilities like pgcli
False -> causes lines longer than the screen width to wrap to the next line
- wrapping is ideal when you want to keep users from having to use horizontal scrolling
WARNING: On Windows, the text always wraps regardless of what the chop argument is set to
"""
import subprocess
if msg is not None and msg != '':
try:
msg_str = '{}'.format(msg)
if not msg_str.endswith(end):
msg_str += end
# Attempt to detect if we are not running within a fully functional terminal.
# Don't try to use the pager when being run by a continuous integration system like Jenkins + pexpect.
functional_terminal = False
if self.stdin.isatty() and self.stdout.isatty():
if sys.platform.startswith('win') or os.environ.get('TERM') is not None:
functional_terminal = True
# Don't attempt to use a pager that can block if redirecting or running a script (either text or Python)
# Also only attempt to use a pager if actually running in a real fully functional terminal
if functional_terminal and not self.redirecting and not self._in_py and not self._script_dir:
if self.colors.lower() == constants.COLORS_NEVER.lower():
msg_str = utils.strip_ansi(msg_str)
pager = self.pager
if chop:
pager = self.pager_chop
# Prevent KeyboardInterrupts while in the pager. The pager application will
# still receive the SIGINT since it is in the same process group as us.
with self.sigint_protection:
pipe_proc = subprocess.Popen(pager, shell=True, stdin=subprocess.PIPE)
pipe_proc.communicate(msg_str.encode('utf-8', 'replace'))
else:
self.decolorized_write(self.stdout, msg_str)
except BrokenPipeError:
# This occurs if a command's output is being piped to another process and that process closes before the
# command is finished. If you would like your application to print a warning message, then set the
# broken_pipe_warning attribute to the message you want printed.`
if self.broken_pipe_warning:
sys.stderr.write(self.broken_pipe_warning) | Print output using a pager if it would go off screen and stdout isn't currently being redirected.
Never uses a pager inside of a script (Python or text) or when output is being redirected or piped or when
stdout or stdin are not a fully functional terminal.
:param msg: message to print to current stdout (anything convertible to a str with '{}'.format() is OK)
:param end: string appended after the end of the message if not already present, default a newline
:param chop: True -> causes lines longer than the screen width to be chopped (truncated) rather than wrapped
- truncated text is still accessible by scrolling with the right & left arrow keys
- chopping is ideal for displaying wide tabular data as is done in utilities like pgcli
False -> causes lines longer than the screen width to wrap to the next line
- wrapping is ideal when you want to keep users from having to use horizontal scrolling
WARNING: On Windows, the text always wraps regardless of what the chop argument is set to | Below is the the instruction that describes the task:
### Input:
Print output using a pager if it would go off screen and stdout isn't currently being redirected.
Never uses a pager inside of a script (Python or text) or when output is being redirected or piped or when
stdout or stdin are not a fully functional terminal.
:param msg: message to print to current stdout (anything convertible to a str with '{}'.format() is OK)
:param end: string appended after the end of the message if not already present, default a newline
:param chop: True -> causes lines longer than the screen width to be chopped (truncated) rather than wrapped
- truncated text is still accessible by scrolling with the right & left arrow keys
- chopping is ideal for displaying wide tabular data as is done in utilities like pgcli
False -> causes lines longer than the screen width to wrap to the next line
- wrapping is ideal when you want to keep users from having to use horizontal scrolling
WARNING: On Windows, the text always wraps regardless of what the chop argument is set to
### Response:
def ppaged(self, msg: str, end: str = '\n', chop: bool = False) -> None:
"""Print output using a pager if it would go off screen and stdout isn't currently being redirected.
Never uses a pager inside of a script (Python or text) or when output is being redirected or piped or when
stdout or stdin are not a fully functional terminal.
:param msg: message to print to current stdout (anything convertible to a str with '{}'.format() is OK)
:param end: string appended after the end of the message if not already present, default a newline
:param chop: True -> causes lines longer than the screen width to be chopped (truncated) rather than wrapped
- truncated text is still accessible by scrolling with the right & left arrow keys
- chopping is ideal for displaying wide tabular data as is done in utilities like pgcli
False -> causes lines longer than the screen width to wrap to the next line
- wrapping is ideal when you want to keep users from having to use horizontal scrolling
WARNING: On Windows, the text always wraps regardless of what the chop argument is set to
"""
import subprocess
if msg is not None and msg != '':
try:
msg_str = '{}'.format(msg)
if not msg_str.endswith(end):
msg_str += end
# Attempt to detect if we are not running within a fully functional terminal.
# Don't try to use the pager when being run by a continuous integration system like Jenkins + pexpect.
functional_terminal = False
if self.stdin.isatty() and self.stdout.isatty():
if sys.platform.startswith('win') or os.environ.get('TERM') is not None:
functional_terminal = True
# Don't attempt to use a pager that can block if redirecting or running a script (either text or Python)
# Also only attempt to use a pager if actually running in a real fully functional terminal
if functional_terminal and not self.redirecting and not self._in_py and not self._script_dir:
if self.colors.lower() == constants.COLORS_NEVER.lower():
msg_str = utils.strip_ansi(msg_str)
pager = self.pager
if chop:
pager = self.pager_chop
# Prevent KeyboardInterrupts while in the pager. The pager application will
# still receive the SIGINT since it is in the same process group as us.
with self.sigint_protection:
pipe_proc = subprocess.Popen(pager, shell=True, stdin=subprocess.PIPE)
pipe_proc.communicate(msg_str.encode('utf-8', 'replace'))
else:
self.decolorized_write(self.stdout, msg_str)
except BrokenPipeError:
# This occurs if a command's output is being piped to another process and that process closes before the
# command is finished. If you would like your application to print a warning message, then set the
# broken_pipe_warning attribute to the message you want printed.`
if self.broken_pipe_warning:
sys.stderr.write(self.broken_pipe_warning) |
def parse_header_format(description):
"""Get the format from a vcf header line description
If format begins with white space it will be stripped
Args:
description(str): Description from a vcf header line
Return:
format(str): The format information from description
"""
description = description.strip('"')
keyword = 'Format:'
before_keyword, keyword, after_keyword = description.partition(keyword)
return after_keyword.strip() | Get the format from a vcf header line description
If format begins with white space it will be stripped
Args:
description(str): Description from a vcf header line
Return:
format(str): The format information from description | Below is the the instruction that describes the task:
### Input:
Get the format from a vcf header line description
If format begins with white space it will be stripped
Args:
description(str): Description from a vcf header line
Return:
format(str): The format information from description
### Response:
def parse_header_format(description):
"""Get the format from a vcf header line description
If format begins with white space it will be stripped
Args:
description(str): Description from a vcf header line
Return:
format(str): The format information from description
"""
description = description.strip('"')
keyword = 'Format:'
before_keyword, keyword, after_keyword = description.partition(keyword)
return after_keyword.strip() |
async def search(self, term: str, limit: int = 3) -> 'List[Word]':
"""Performs a search for a term and returns a list of possible matching :class:`Word`
objects.
Args:
term: The term to be defined.
limit (optional): Max amount of results returned.
Defaults to 3.
Note:
The API will relay a fixed number of words and definitions, so ``limit`` can be
arbitrarily high if needed or wanted.
Returns:
A list of :class:`Word` objects of up to the specified length.
Raises:
UrbanConnectionError: If the response status isn't ``200``.
WordNotFoundError: If the response doesn't contain data (i.e. no word found).
"""
resp = await self._get(term=term)
words = resp['list']
return [Word(x) for x in words[:limit]] | Performs a search for a term and returns a list of possible matching :class:`Word`
objects.
Args:
term: The term to be defined.
limit (optional): Max amount of results returned.
Defaults to 3.
Note:
The API will relay a fixed number of words and definitions, so ``limit`` can be
arbitrarily high if needed or wanted.
Returns:
A list of :class:`Word` objects of up to the specified length.
Raises:
UrbanConnectionError: If the response status isn't ``200``.
WordNotFoundError: If the response doesn't contain data (i.e. no word found). | Below is the the instruction that describes the task:
### Input:
Performs a search for a term and returns a list of possible matching :class:`Word`
objects.
Args:
term: The term to be defined.
limit (optional): Max amount of results returned.
Defaults to 3.
Note:
The API will relay a fixed number of words and definitions, so ``limit`` can be
arbitrarily high if needed or wanted.
Returns:
A list of :class:`Word` objects of up to the specified length.
Raises:
UrbanConnectionError: If the response status isn't ``200``.
WordNotFoundError: If the response doesn't contain data (i.e. no word found).
### Response:
async def search(self, term: str, limit: int = 3) -> 'List[Word]':
"""Performs a search for a term and returns a list of possible matching :class:`Word`
objects.
Args:
term: The term to be defined.
limit (optional): Max amount of results returned.
Defaults to 3.
Note:
The API will relay a fixed number of words and definitions, so ``limit`` can be
arbitrarily high if needed or wanted.
Returns:
A list of :class:`Word` objects of up to the specified length.
Raises:
UrbanConnectionError: If the response status isn't ``200``.
WordNotFoundError: If the response doesn't contain data (i.e. no word found).
"""
resp = await self._get(term=term)
words = resp['list']
return [Word(x) for x in words[:limit]] |
def http_query(self, method, path, data=None, dont_connect=False, **kwargs):
"""
:param dont_connect: If true do not reconnect if not connected
"""
if not self._connected and not dont_connect:
if self._id == "vm" and not self._controller.gns3vm.running:
yield from self._controller.gns3vm.start()
yield from self.connect()
if not self._connected and not dont_connect:
raise ComputeError("Cannot connect to compute '{}' with request {} {}".format(self._name, method, path))
response = yield from self._run_http_query(method, path, data=data, **kwargs)
return response | :param dont_connect: If true do not reconnect if not connected | Below is the the instruction that describes the task:
### Input:
:param dont_connect: If true do not reconnect if not connected
### Response:
def http_query(self, method, path, data=None, dont_connect=False, **kwargs):
"""
:param dont_connect: If true do not reconnect if not connected
"""
if not self._connected and not dont_connect:
if self._id == "vm" and not self._controller.gns3vm.running:
yield from self._controller.gns3vm.start()
yield from self.connect()
if not self._connected and not dont_connect:
raise ComputeError("Cannot connect to compute '{}' with request {} {}".format(self._name, method, path))
response = yield from self._run_http_query(method, path, data=data, **kwargs)
return response |
def load_json(json_str):
"""Loads symbol from json string.
Parameters
----------
json_str : str
A JSON string.
Returns
-------
sym : Symbol
The loaded symbol.
See Also
--------
Symbol.tojson : Used to save symbol into json string.
"""
if not isinstance(json_str, string_types):
raise TypeError('fname required to be string')
handle = SymbolHandle()
check_call(_LIB.MXSymbolCreateFromJSON(c_str(json_str), ctypes.byref(handle)))
return Symbol(handle) | Loads symbol from json string.
Parameters
----------
json_str : str
A JSON string.
Returns
-------
sym : Symbol
The loaded symbol.
See Also
--------
Symbol.tojson : Used to save symbol into json string. | Below is the the instruction that describes the task:
### Input:
Loads symbol from json string.
Parameters
----------
json_str : str
A JSON string.
Returns
-------
sym : Symbol
The loaded symbol.
See Also
--------
Symbol.tojson : Used to save symbol into json string.
### Response:
def load_json(json_str):
"""Loads symbol from json string.
Parameters
----------
json_str : str
A JSON string.
Returns
-------
sym : Symbol
The loaded symbol.
See Also
--------
Symbol.tojson : Used to save symbol into json string.
"""
if not isinstance(json_str, string_types):
raise TypeError('fname required to be string')
handle = SymbolHandle()
check_call(_LIB.MXSymbolCreateFromJSON(c_str(json_str), ctypes.byref(handle)))
return Symbol(handle) |
def morphAveragePitch(fromDataList, toDataList):
'''
Adjusts the values in fromPitchList to have the same average as toPitchList
Because other manipulations can alter the average pitch, morphing the pitch
is the last pitch manipulation that should be done
After the morphing, the code removes any values below zero, thus the
final average might not match the target average.
'''
timeList, fromPitchList = zip(*fromDataList)
toPitchList = [pitchVal for _, pitchVal in toDataList]
# Zero pitch values aren't meaningful, so filter them out if they are
# in the dataset
fromListNoZeroes = [val for val in fromPitchList if val > 0]
fromAverage = sum(fromListNoZeroes) / float(len(fromListNoZeroes))
toListNoZeroes = [val for val in toPitchList if val > 0]
toAverage = sum(toListNoZeroes) / float(len(toListNoZeroes))
newPitchList = [val - fromAverage + toAverage for val in fromPitchList]
# finalAverage = sum(newPitchList) / float(len(newPitchList))
# Removing zeroes and negative pitch values
retDataList = [(time, pitchVal) for time, pitchVal
in zip(timeList, newPitchList)
if pitchVal > 0]
return retDataList | Adjusts the values in fromPitchList to have the same average as toPitchList
Because other manipulations can alter the average pitch, morphing the pitch
is the last pitch manipulation that should be done
After the morphing, the code removes any values below zero, thus the
final average might not match the target average. | Below is the the instruction that describes the task:
### Input:
Adjusts the values in fromPitchList to have the same average as toPitchList
Because other manipulations can alter the average pitch, morphing the pitch
is the last pitch manipulation that should be done
After the morphing, the code removes any values below zero, thus the
final average might not match the target average.
### Response:
def morphAveragePitch(fromDataList, toDataList):
'''
Adjusts the values in fromPitchList to have the same average as toPitchList
Because other manipulations can alter the average pitch, morphing the pitch
is the last pitch manipulation that should be done
After the morphing, the code removes any values below zero, thus the
final average might not match the target average.
'''
timeList, fromPitchList = zip(*fromDataList)
toPitchList = [pitchVal for _, pitchVal in toDataList]
# Zero pitch values aren't meaningful, so filter them out if they are
# in the dataset
fromListNoZeroes = [val for val in fromPitchList if val > 0]
fromAverage = sum(fromListNoZeroes) / float(len(fromListNoZeroes))
toListNoZeroes = [val for val in toPitchList if val > 0]
toAverage = sum(toListNoZeroes) / float(len(toListNoZeroes))
newPitchList = [val - fromAverage + toAverage for val in fromPitchList]
# finalAverage = sum(newPitchList) / float(len(newPitchList))
# Removing zeroes and negative pitch values
retDataList = [(time, pitchVal) for time, pitchVal
in zip(timeList, newPitchList)
if pitchVal > 0]
return retDataList |
def cee_map_priority_table_map_cos5_pgid(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
cee_map = ET.SubElement(config, "cee-map", xmlns="urn:brocade.com:mgmt:brocade-cee-map")
name_key = ET.SubElement(cee_map, "name")
name_key.text = kwargs.pop('name')
priority_table = ET.SubElement(cee_map, "priority-table")
map_cos5_pgid = ET.SubElement(priority_table, "map-cos5-pgid")
map_cos5_pgid.text = kwargs.pop('map_cos5_pgid')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def cee_map_priority_table_map_cos5_pgid(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
cee_map = ET.SubElement(config, "cee-map", xmlns="urn:brocade.com:mgmt:brocade-cee-map")
name_key = ET.SubElement(cee_map, "name")
name_key.text = kwargs.pop('name')
priority_table = ET.SubElement(cee_map, "priority-table")
map_cos5_pgid = ET.SubElement(priority_table, "map-cos5-pgid")
map_cos5_pgid.text = kwargs.pop('map_cos5_pgid')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def chrom_sorted(in_chroms):
"""
Sort a list of chromosomes in the order 1..22, X, Y, M, <others in alphabetical order>.
:param list in_chroms: Input chromosomes
:return: Sorted chromosomes
:rtype: list[str]
"""
in_chroms.sort()
canonicals = [str(c) for c in range(1, 23)] + ['X', 'Y', 'M', 'MT']
canonical_chr = ['chr' + c for c in canonicals]
out_chroms_dict = {
'can': [c for c in in_chroms if c in canonicals],
'can_chr': [c for c in in_chroms if c in canonical_chr],
'others': [c for c in in_chroms if c not in canonicals + canonical_chr]}
assert not (out_chroms_dict['can'] and out_chroms_dict['can_chr'])
assert not ('M' in out_chroms_dict['can']and 'MT' in out_chroms_dict['can'])
assert not ('chrM' in out_chroms_dict['can_chr'] and 'chrMT' in out_chroms_dict['can_chr'])
out_chroms_dict['can'] = canonical_chrom_sorted(out_chroms_dict['can'])
out_chroms_dict['can_chr'] = canonical_chrom_sorted(out_chroms_dict['can_chr'])
out_chroms = out_chroms_dict['can'] or out_chroms_dict['can_chr']
out_chroms.extend(out_chroms_dict['others'])
return out_chroms | Sort a list of chromosomes in the order 1..22, X, Y, M, <others in alphabetical order>.
:param list in_chroms: Input chromosomes
:return: Sorted chromosomes
:rtype: list[str] | Below is the the instruction that describes the task:
### Input:
Sort a list of chromosomes in the order 1..22, X, Y, M, <others in alphabetical order>.
:param list in_chroms: Input chromosomes
:return: Sorted chromosomes
:rtype: list[str]
### Response:
def chrom_sorted(in_chroms):
"""
Sort a list of chromosomes in the order 1..22, X, Y, M, <others in alphabetical order>.
:param list in_chroms: Input chromosomes
:return: Sorted chromosomes
:rtype: list[str]
"""
in_chroms.sort()
canonicals = [str(c) for c in range(1, 23)] + ['X', 'Y', 'M', 'MT']
canonical_chr = ['chr' + c for c in canonicals]
out_chroms_dict = {
'can': [c for c in in_chroms if c in canonicals],
'can_chr': [c for c in in_chroms if c in canonical_chr],
'others': [c for c in in_chroms if c not in canonicals + canonical_chr]}
assert not (out_chroms_dict['can'] and out_chroms_dict['can_chr'])
assert not ('M' in out_chroms_dict['can']and 'MT' in out_chroms_dict['can'])
assert not ('chrM' in out_chroms_dict['can_chr'] and 'chrMT' in out_chroms_dict['can_chr'])
out_chroms_dict['can'] = canonical_chrom_sorted(out_chroms_dict['can'])
out_chroms_dict['can_chr'] = canonical_chrom_sorted(out_chroms_dict['can_chr'])
out_chroms = out_chroms_dict['can'] or out_chroms_dict['can_chr']
out_chroms.extend(out_chroms_dict['others'])
return out_chroms |
def compile_column(name: str, data_type: str, nullable: bool) -> str:
"""Create column definition statement."""
null_str = 'NULL' if nullable else 'NOT NULL'
return '{name} {data_type} {null},'.format(name=name,
data_type=data_type,
null=null_str) | Create column definition statement. | Below is the the instruction that describes the task:
### Input:
Create column definition statement.
### Response:
def compile_column(name: str, data_type: str, nullable: bool) -> str:
"""Create column definition statement."""
null_str = 'NULL' if nullable else 'NOT NULL'
return '{name} {data_type} {null},'.format(name=name,
data_type=data_type,
null=null_str) |
def tap_hold(self, x, y, duration=1.0):
"""
Tap and hold for a moment
Args:
- x, y(int): position
- duration(float): seconds of hold time
[[FBRoute POST:@"/wda/touchAndHold"] respondWithTarget:self action:@selector(handleTouchAndHoldCoordinate:)],
"""
data = {'x': x, 'y': y, 'duration': duration}
return self.http.post('/wda/touchAndHold', data=data) | Tap and hold for a moment
Args:
- x, y(int): position
- duration(float): seconds of hold time
[[FBRoute POST:@"/wda/touchAndHold"] respondWithTarget:self action:@selector(handleTouchAndHoldCoordinate:)], | Below is the the instruction that describes the task:
### Input:
Tap and hold for a moment
Args:
- x, y(int): position
- duration(float): seconds of hold time
[[FBRoute POST:@"/wda/touchAndHold"] respondWithTarget:self action:@selector(handleTouchAndHoldCoordinate:)],
### Response:
def tap_hold(self, x, y, duration=1.0):
"""
Tap and hold for a moment
Args:
- x, y(int): position
- duration(float): seconds of hold time
[[FBRoute POST:@"/wda/touchAndHold"] respondWithTarget:self action:@selector(handleTouchAndHoldCoordinate:)],
"""
data = {'x': x, 'y': y, 'duration': duration}
return self.http.post('/wda/touchAndHold', data=data) |
def get_as_boolean(self, key):
"""
Converts map element into a boolean or returns false if conversion is not possible.
:param key: an index of element to get.
:return: boolean value ot the element or false if conversion is not supported.
"""
value = self.get(key)
return BooleanConverter.to_boolean(value) | Converts map element into a boolean or returns false if conversion is not possible.
:param key: an index of element to get.
:return: boolean value ot the element or false if conversion is not supported. | Below is the the instruction that describes the task:
### Input:
Converts map element into a boolean or returns false if conversion is not possible.
:param key: an index of element to get.
:return: boolean value ot the element or false if conversion is not supported.
### Response:
def get_as_boolean(self, key):
"""
Converts map element into a boolean or returns false if conversion is not possible.
:param key: an index of element to get.
:return: boolean value ot the element or false if conversion is not supported.
"""
value = self.get(key)
return BooleanConverter.to_boolean(value) |
def _pickle_batch(self):
"""
Pickle the metrics into a form that can be understood
by the graphite pickle connector.
"""
# Pickle
payload = pickle.dumps(self.batch)
# Pack Message
header = struct.pack("!L", len(payload))
message = header + payload
# Return Message
return message | Pickle the metrics into a form that can be understood
by the graphite pickle connector. | Below is the the instruction that describes the task:
### Input:
Pickle the metrics into a form that can be understood
by the graphite pickle connector.
### Response:
def _pickle_batch(self):
"""
Pickle the metrics into a form that can be understood
by the graphite pickle connector.
"""
# Pickle
payload = pickle.dumps(self.batch)
# Pack Message
header = struct.pack("!L", len(payload))
message = header + payload
# Return Message
return message |
def parse(cls, specsFileOrString):
"""Parsers a file or string and returns a list of AudioClipSpec
Arguments:
specsFileOrString (str): specifications' file or string
Examples:
>>> SpecsParser.parse('23.4 34.1\n40.2 79.65 Hello World!')
[<AudioClipSpec start:23.40, end:34.10, text:''>,
<AudioClipSpec start:40.20, end:79.65, text:'Hello World!'>]
Returns: list(AudioClipSpec) or None
"""
stringToParse = None
# Read the contents of the file if specsFileOrString is not a string
if os.path.isfile(specsFileOrString):
with open(specsFileOrString, 'r') as f:
stringToParse = f.read()
else:
stringToParse = specsFileOrString
# Audacity uses \r for newlines
lines = [x.strip() for x in re.split(r'[\r\n]+', stringToParse)]
clips = []
for line in lines:
if line != '':
clips.append(cls._parseLine(line))
# if spec != None:
# clips.append(spec)
return clips | Parsers a file or string and returns a list of AudioClipSpec
Arguments:
specsFileOrString (str): specifications' file or string
Examples:
>>> SpecsParser.parse('23.4 34.1\n40.2 79.65 Hello World!')
[<AudioClipSpec start:23.40, end:34.10, text:''>,
<AudioClipSpec start:40.20, end:79.65, text:'Hello World!'>]
Returns: list(AudioClipSpec) or None | Below is the the instruction that describes the task:
### Input:
Parsers a file or string and returns a list of AudioClipSpec
Arguments:
specsFileOrString (str): specifications' file or string
Examples:
>>> SpecsParser.parse('23.4 34.1\n40.2 79.65 Hello World!')
[<AudioClipSpec start:23.40, end:34.10, text:''>,
<AudioClipSpec start:40.20, end:79.65, text:'Hello World!'>]
Returns: list(AudioClipSpec) or None
### Response:
def parse(cls, specsFileOrString):
"""Parsers a file or string and returns a list of AudioClipSpec
Arguments:
specsFileOrString (str): specifications' file or string
Examples:
>>> SpecsParser.parse('23.4 34.1\n40.2 79.65 Hello World!')
[<AudioClipSpec start:23.40, end:34.10, text:''>,
<AudioClipSpec start:40.20, end:79.65, text:'Hello World!'>]
Returns: list(AudioClipSpec) or None
"""
stringToParse = None
# Read the contents of the file if specsFileOrString is not a string
if os.path.isfile(specsFileOrString):
with open(specsFileOrString, 'r') as f:
stringToParse = f.read()
else:
stringToParse = specsFileOrString
# Audacity uses \r for newlines
lines = [x.strip() for x in re.split(r'[\r\n]+', stringToParse)]
clips = []
for line in lines:
if line != '':
clips.append(cls._parseLine(line))
# if spec != None:
# clips.append(spec)
return clips |
def calculate_s0_curve(s0, minpval, maxpval, minratio, maxratio, curve_interval=0.1):
"""
Calculate s0 curve for volcano plot.
Taking an min and max p value, and a min and max ratio, calculate an smooth
curve starting from parameter `s0` in each direction.
The `curve_interval` parameter defines the smoothness of the resulting curve.
:param s0: `float` offset of curve from interset
:param minpval: `float` minimum p value
:param maxpval: `float` maximum p value
:param minratio: `float` minimum ratio
:param maxratio: `float` maximum ratio
:param curve_interval: `float` stepsize (smoothness) of curve generator
:return: x, y, fn x,y points of curve, and fn generator
"""
mminpval = -np.log10(minpval)
mmaxpval = -np.log10(maxpval)
maxpval_adjust = mmaxpval - mminpval
ax0 = (s0 + maxpval_adjust * minratio) / maxpval_adjust
edge_offset = (maxratio-ax0) % curve_interval
max_x = maxratio-edge_offset
if (max_x > ax0):
x = np.arange(ax0, max_x, curve_interval)
else:
x = np.arange(max_x, ax0, curve_interval)
fn = lambda x: 10 ** (-s0/(x-minratio) - mminpval)
y = fn(x)
return x, y, fn | Calculate s0 curve for volcano plot.
Taking an min and max p value, and a min and max ratio, calculate an smooth
curve starting from parameter `s0` in each direction.
The `curve_interval` parameter defines the smoothness of the resulting curve.
:param s0: `float` offset of curve from interset
:param minpval: `float` minimum p value
:param maxpval: `float` maximum p value
:param minratio: `float` minimum ratio
:param maxratio: `float` maximum ratio
:param curve_interval: `float` stepsize (smoothness) of curve generator
:return: x, y, fn x,y points of curve, and fn generator | Below is the the instruction that describes the task:
### Input:
Calculate s0 curve for volcano plot.
Taking an min and max p value, and a min and max ratio, calculate an smooth
curve starting from parameter `s0` in each direction.
The `curve_interval` parameter defines the smoothness of the resulting curve.
:param s0: `float` offset of curve from interset
:param minpval: `float` minimum p value
:param maxpval: `float` maximum p value
:param minratio: `float` minimum ratio
:param maxratio: `float` maximum ratio
:param curve_interval: `float` stepsize (smoothness) of curve generator
:return: x, y, fn x,y points of curve, and fn generator
### Response:
def calculate_s0_curve(s0, minpval, maxpval, minratio, maxratio, curve_interval=0.1):
"""
Calculate s0 curve for volcano plot.
Taking an min and max p value, and a min and max ratio, calculate an smooth
curve starting from parameter `s0` in each direction.
The `curve_interval` parameter defines the smoothness of the resulting curve.
:param s0: `float` offset of curve from interset
:param minpval: `float` minimum p value
:param maxpval: `float` maximum p value
:param minratio: `float` minimum ratio
:param maxratio: `float` maximum ratio
:param curve_interval: `float` stepsize (smoothness) of curve generator
:return: x, y, fn x,y points of curve, and fn generator
"""
mminpval = -np.log10(minpval)
mmaxpval = -np.log10(maxpval)
maxpval_adjust = mmaxpval - mminpval
ax0 = (s0 + maxpval_adjust * minratio) / maxpval_adjust
edge_offset = (maxratio-ax0) % curve_interval
max_x = maxratio-edge_offset
if (max_x > ax0):
x = np.arange(ax0, max_x, curve_interval)
else:
x = np.arange(max_x, ax0, curve_interval)
fn = lambda x: 10 ** (-s0/(x-minratio) - mminpval)
y = fn(x)
return x, y, fn |
def build_tqdm_inner(self, desc, total):
"""
Extension point. Override to provide custom options to inner progress bars (Batch loop)
:param desc: Description
:param total: Number of batches
:return: new progress bar
"""
return self.tqdm(desc=desc, total=total, leave=self.leave_inner) | Extension point. Override to provide custom options to inner progress bars (Batch loop)
:param desc: Description
:param total: Number of batches
:return: new progress bar | Below is the the instruction that describes the task:
### Input:
Extension point. Override to provide custom options to inner progress bars (Batch loop)
:param desc: Description
:param total: Number of batches
:return: new progress bar
### Response:
def build_tqdm_inner(self, desc, total):
"""
Extension point. Override to provide custom options to inner progress bars (Batch loop)
:param desc: Description
:param total: Number of batches
:return: new progress bar
"""
return self.tqdm(desc=desc, total=total, leave=self.leave_inner) |
def install_os_updates(distribution, force=False):
""" installs OS updates """
if ('centos' in distribution or
'rhel' in distribution or
'redhat' in distribution):
bookshelf2.logging_helpers.log_green('installing OS updates')
sudo("yum -y --quiet clean all")
sudo("yum group mark convert")
sudo("yum -y --quiet update")
if ('ubuntu' in distribution or
'debian' in distribution):
with settings(hide('warnings', 'running', 'stdout', 'stderr'),
warn_only=False, capture=True):
sudo("DEBIAN_FRONTEND=noninteractive apt-get update")
if force:
sudo("sudo DEBIAN_FRONTEND=noninteractive apt-get -y -o "
"Dpkg::Options::='--force-confdef' "
"-o Dpkg::Options::='--force-confold' upgrade --force-yes")
else:
sudo("sudo DEBIAN_FRONTEND=noninteractive apt-get -y -o "
"Dpkg::Options::='--force-confdef' -o "
"Dpkg::Options::='--force-confold' upgrade") | installs OS updates | Below is the the instruction that describes the task:
### Input:
installs OS updates
### Response:
def install_os_updates(distribution, force=False):
""" installs OS updates """
if ('centos' in distribution or
'rhel' in distribution or
'redhat' in distribution):
bookshelf2.logging_helpers.log_green('installing OS updates')
sudo("yum -y --quiet clean all")
sudo("yum group mark convert")
sudo("yum -y --quiet update")
if ('ubuntu' in distribution or
'debian' in distribution):
with settings(hide('warnings', 'running', 'stdout', 'stderr'),
warn_only=False, capture=True):
sudo("DEBIAN_FRONTEND=noninteractive apt-get update")
if force:
sudo("sudo DEBIAN_FRONTEND=noninteractive apt-get -y -o "
"Dpkg::Options::='--force-confdef' "
"-o Dpkg::Options::='--force-confold' upgrade --force-yes")
else:
sudo("sudo DEBIAN_FRONTEND=noninteractive apt-get -y -o "
"Dpkg::Options::='--force-confdef' -o "
"Dpkg::Options::='--force-confold' upgrade") |
def send(socket, data, num_bytes=20):
"""Send data to specified socket.
:param socket: open socket instance
:param data: data to send
:param num_bytes: number of bytes to read
:return: received data
"""
pickled_data = pickle.dumps(data, -1)
length = str(len(pickled_data)).zfill(num_bytes)
socket.sendall(length.encode())
socket.sendall(pickled_data) | Send data to specified socket.
:param socket: open socket instance
:param data: data to send
:param num_bytes: number of bytes to read
:return: received data | Below is the the instruction that describes the task:
### Input:
Send data to specified socket.
:param socket: open socket instance
:param data: data to send
:param num_bytes: number of bytes to read
:return: received data
### Response:
def send(socket, data, num_bytes=20):
"""Send data to specified socket.
:param socket: open socket instance
:param data: data to send
:param num_bytes: number of bytes to read
:return: received data
"""
pickled_data = pickle.dumps(data, -1)
length = str(len(pickled_data)).zfill(num_bytes)
socket.sendall(length.encode())
socket.sendall(pickled_data) |
def _set_show_system_monitor(self, v, load=False):
"""
Setter method for show_system_monitor, mapped from YANG variable /brocade_system_monitor_ext_rpc/show_system_monitor (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_show_system_monitor is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_show_system_monitor() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=show_system_monitor.show_system_monitor, is_leaf=True, yang_name="show-system-monitor", rest_name="show-system-monitor", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'system-monitor-show'}}, namespace='urn:brocade.com:mgmt:brocade-system-monitor-ext', defining_module='brocade-system-monitor-ext', yang_type='rpc', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """show_system_monitor must be of a type compatible with rpc""",
'defined-type': "rpc",
'generated-type': """YANGDynClass(base=show_system_monitor.show_system_monitor, is_leaf=True, yang_name="show-system-monitor", rest_name="show-system-monitor", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'system-monitor-show'}}, namespace='urn:brocade.com:mgmt:brocade-system-monitor-ext', defining_module='brocade-system-monitor-ext', yang_type='rpc', is_config=True)""",
})
self.__show_system_monitor = t
if hasattr(self, '_set'):
self._set() | Setter method for show_system_monitor, mapped from YANG variable /brocade_system_monitor_ext_rpc/show_system_monitor (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_show_system_monitor is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_show_system_monitor() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for show_system_monitor, mapped from YANG variable /brocade_system_monitor_ext_rpc/show_system_monitor (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_show_system_monitor is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_show_system_monitor() directly.
### Response:
def _set_show_system_monitor(self, v, load=False):
"""
Setter method for show_system_monitor, mapped from YANG variable /brocade_system_monitor_ext_rpc/show_system_monitor (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_show_system_monitor is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_show_system_monitor() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=show_system_monitor.show_system_monitor, is_leaf=True, yang_name="show-system-monitor", rest_name="show-system-monitor", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'system-monitor-show'}}, namespace='urn:brocade.com:mgmt:brocade-system-monitor-ext', defining_module='brocade-system-monitor-ext', yang_type='rpc', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """show_system_monitor must be of a type compatible with rpc""",
'defined-type': "rpc",
'generated-type': """YANGDynClass(base=show_system_monitor.show_system_monitor, is_leaf=True, yang_name="show-system-monitor", rest_name="show-system-monitor", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'system-monitor-show'}}, namespace='urn:brocade.com:mgmt:brocade-system-monitor-ext', defining_module='brocade-system-monitor-ext', yang_type='rpc', is_config=True)""",
})
self.__show_system_monitor = t
if hasattr(self, '_set'):
self._set() |
def parse_posting_id(text, city):
"""
Parse the posting ID from the Backpage ad.
text -> The ad's HTML (or the a substring containing the "Post ID:" section)
city -> The Backpage city of the ad
"""
parts = text.split('Post ID: ')
if len(parts) == 2:
post_id = parts[1].split(' ')[0]
if post_id:
return post_id + post_id_bp_groups[city] | Parse the posting ID from the Backpage ad.
text -> The ad's HTML (or the a substring containing the "Post ID:" section)
city -> The Backpage city of the ad | Below is the the instruction that describes the task:
### Input:
Parse the posting ID from the Backpage ad.
text -> The ad's HTML (or the a substring containing the "Post ID:" section)
city -> The Backpage city of the ad
### Response:
def parse_posting_id(text, city):
"""
Parse the posting ID from the Backpage ad.
text -> The ad's HTML (or the a substring containing the "Post ID:" section)
city -> The Backpage city of the ad
"""
parts = text.split('Post ID: ')
if len(parts) == 2:
post_id = parts[1].split(' ')[0]
if post_id:
return post_id + post_id_bp_groups[city] |
def get_text(nodelist):
"""Get the value from a text node."""
value = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
value.append(node.data)
return ''.join(value) | Get the value from a text node. | Below is the the instruction that describes the task:
### Input:
Get the value from a text node.
### Response:
def get_text(nodelist):
"""Get the value from a text node."""
value = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
value.append(node.data)
return ''.join(value) |
def resolve(self, authorization: http.Header):
"""
Determine the user associated with a request, using HTTP Basic Authentication.
"""
if authorization is None:
return None
scheme, token = authorization.split()
if scheme.lower() != 'basic':
return None
username, password = base64.b64decode(token).decode('utf-8').split(':')
user = authenticate(username=username, password=password)
return user | Determine the user associated with a request, using HTTP Basic Authentication. | Below is the the instruction that describes the task:
### Input:
Determine the user associated with a request, using HTTP Basic Authentication.
### Response:
def resolve(self, authorization: http.Header):
"""
Determine the user associated with a request, using HTTP Basic Authentication.
"""
if authorization is None:
return None
scheme, token = authorization.split()
if scheme.lower() != 'basic':
return None
username, password = base64.b64decode(token).decode('utf-8').split(':')
user = authenticate(username=username, password=password)
return user |
def toProtocolElement(self):
"""
Converts this exception into the GA4GH protocol type so that
it can be communicated back to the client.
"""
error = protocol.GAException()
error.error_code = self.getErrorCode()
error.message = self.getMessage()
return error | Converts this exception into the GA4GH protocol type so that
it can be communicated back to the client. | Below is the the instruction that describes the task:
### Input:
Converts this exception into the GA4GH protocol type so that
it can be communicated back to the client.
### Response:
def toProtocolElement(self):
"""
Converts this exception into the GA4GH protocol type so that
it can be communicated back to the client.
"""
error = protocol.GAException()
error.error_code = self.getErrorCode()
error.message = self.getMessage()
return error |
def _set_tunnel(self, v, load=False):
"""
Setter method for tunnel, mapped from YANG variable /interface/tunnel (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_tunnel is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tunnel() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("identifier",tunnel.tunnel, yang_name="tunnel", rest_name="tunnel", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='identifier', extensions={u'tailf-common': {u'info': u'Tunnel <identifier>', u'cli-full-command': None, u'callpoint': u'GreVxlanTunnelCallpoint', u'cli-suppress-list-no': None, u'cli-mode-name': u'config-intf-tunnel-$(identifier)'}}), is_container='list', yang_name="tunnel", rest_name="tunnel", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Tunnel <identifier>', u'cli-full-command': None, u'callpoint': u'GreVxlanTunnelCallpoint', u'cli-suppress-list-no': None, u'cli-mode-name': u'config-intf-tunnel-$(identifier)'}}, namespace='urn:brocade.com:mgmt:brocade-gre-vxlan', defining_module='brocade-gre-vxlan', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """tunnel must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("identifier",tunnel.tunnel, yang_name="tunnel", rest_name="tunnel", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='identifier', extensions={u'tailf-common': {u'info': u'Tunnel <identifier>', u'cli-full-command': None, u'callpoint': u'GreVxlanTunnelCallpoint', u'cli-suppress-list-no': None, u'cli-mode-name': u'config-intf-tunnel-$(identifier)'}}), is_container='list', yang_name="tunnel", rest_name="tunnel", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Tunnel <identifier>', u'cli-full-command': None, u'callpoint': u'GreVxlanTunnelCallpoint', u'cli-suppress-list-no': None, u'cli-mode-name': u'config-intf-tunnel-$(identifier)'}}, namespace='urn:brocade.com:mgmt:brocade-gre-vxlan', defining_module='brocade-gre-vxlan', yang_type='list', is_config=True)""",
})
self.__tunnel = t
if hasattr(self, '_set'):
self._set() | Setter method for tunnel, mapped from YANG variable /interface/tunnel (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_tunnel is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tunnel() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for tunnel, mapped from YANG variable /interface/tunnel (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_tunnel is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tunnel() directly.
### Response:
def _set_tunnel(self, v, load=False):
"""
Setter method for tunnel, mapped from YANG variable /interface/tunnel (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_tunnel is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tunnel() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("identifier",tunnel.tunnel, yang_name="tunnel", rest_name="tunnel", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='identifier', extensions={u'tailf-common': {u'info': u'Tunnel <identifier>', u'cli-full-command': None, u'callpoint': u'GreVxlanTunnelCallpoint', u'cli-suppress-list-no': None, u'cli-mode-name': u'config-intf-tunnel-$(identifier)'}}), is_container='list', yang_name="tunnel", rest_name="tunnel", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Tunnel <identifier>', u'cli-full-command': None, u'callpoint': u'GreVxlanTunnelCallpoint', u'cli-suppress-list-no': None, u'cli-mode-name': u'config-intf-tunnel-$(identifier)'}}, namespace='urn:brocade.com:mgmt:brocade-gre-vxlan', defining_module='brocade-gre-vxlan', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """tunnel must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("identifier",tunnel.tunnel, yang_name="tunnel", rest_name="tunnel", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='identifier', extensions={u'tailf-common': {u'info': u'Tunnel <identifier>', u'cli-full-command': None, u'callpoint': u'GreVxlanTunnelCallpoint', u'cli-suppress-list-no': None, u'cli-mode-name': u'config-intf-tunnel-$(identifier)'}}), is_container='list', yang_name="tunnel", rest_name="tunnel", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Tunnel <identifier>', u'cli-full-command': None, u'callpoint': u'GreVxlanTunnelCallpoint', u'cli-suppress-list-no': None, u'cli-mode-name': u'config-intf-tunnel-$(identifier)'}}, namespace='urn:brocade.com:mgmt:brocade-gre-vxlan', defining_module='brocade-gre-vxlan', yang_type='list', is_config=True)""",
})
self.__tunnel = t
if hasattr(self, '_set'):
self._set() |
def _insert_base_path(self):
"""If the "base" path is set in the paths section of the config, insert
it into the python path.
"""
if config.BASE in self.paths:
sys.path.insert(0, self.paths[config.BASE]) | If the "base" path is set in the paths section of the config, insert
it into the python path. | Below is the the instruction that describes the task:
### Input:
If the "base" path is set in the paths section of the config, insert
it into the python path.
### Response:
def _insert_base_path(self):
"""If the "base" path is set in the paths section of the config, insert
it into the python path.
"""
if config.BASE in self.paths:
sys.path.insert(0, self.paths[config.BASE]) |
def matrix_undirected_unweighted(user):
"""
Returns an undirected, unweighted matrix where an edge exists if the
relationship is reciprocated.
"""
matrix = matrix_undirected_weighted(user, interaction=None)
for a, b in combinations(range(len(matrix)), 2):
if matrix[a][b] is None or matrix[b][a] is None:
continue
if matrix[a][b] > 0 and matrix[b][a] > 0:
matrix[a][b], matrix[b][a] = 1, 1
return matrix | Returns an undirected, unweighted matrix where an edge exists if the
relationship is reciprocated. | Below is the the instruction that describes the task:
### Input:
Returns an undirected, unweighted matrix where an edge exists if the
relationship is reciprocated.
### Response:
def matrix_undirected_unweighted(user):
"""
Returns an undirected, unweighted matrix where an edge exists if the
relationship is reciprocated.
"""
matrix = matrix_undirected_weighted(user, interaction=None)
for a, b in combinations(range(len(matrix)), 2):
if matrix[a][b] is None or matrix[b][a] is None:
continue
if matrix[a][b] > 0 and matrix[b][a] > 0:
matrix[a][b], matrix[b][a] = 1, 1
return matrix |
def _get_si():
'''
Authenticate with vCenter server and return service instance object.
'''
url = config.get_cloud_config_value(
'url', get_configured_provider(), __opts__, search_global=False
)
username = config.get_cloud_config_value(
'user', get_configured_provider(), __opts__, search_global=False
)
password = config.get_cloud_config_value(
'password', get_configured_provider(), __opts__, search_global=False
)
protocol = config.get_cloud_config_value(
'protocol', get_configured_provider(), __opts__, search_global=False, default='https'
)
port = config.get_cloud_config_value(
'port', get_configured_provider(), __opts__, search_global=False, default=443
)
return salt.utils.vmware.get_service_instance(url,
username,
password,
protocol=protocol,
port=port) | Authenticate with vCenter server and return service instance object. | Below is the the instruction that describes the task:
### Input:
Authenticate with vCenter server and return service instance object.
### Response:
def _get_si():
'''
Authenticate with vCenter server and return service instance object.
'''
url = config.get_cloud_config_value(
'url', get_configured_provider(), __opts__, search_global=False
)
username = config.get_cloud_config_value(
'user', get_configured_provider(), __opts__, search_global=False
)
password = config.get_cloud_config_value(
'password', get_configured_provider(), __opts__, search_global=False
)
protocol = config.get_cloud_config_value(
'protocol', get_configured_provider(), __opts__, search_global=False, default='https'
)
port = config.get_cloud_config_value(
'port', get_configured_provider(), __opts__, search_global=False, default=443
)
return salt.utils.vmware.get_service_instance(url,
username,
password,
protocol=protocol,
port=port) |
def acknowledge_message(self, delivery_tag, **kwargs):
"""Acknowledge the message delivery from RabbitMQ by sending a
Basic.Ack RPC method for the delivery tag.
:param int delivery_tag: The delivery tag from the Basic.Deliver frame
"""
logger.info('Acknowledging message', delivery_tag=delivery_tag, **kwargs)
self._channel.basic_ack(delivery_tag) | Acknowledge the message delivery from RabbitMQ by sending a
Basic.Ack RPC method for the delivery tag.
:param int delivery_tag: The delivery tag from the Basic.Deliver frame | Below is the the instruction that describes the task:
### Input:
Acknowledge the message delivery from RabbitMQ by sending a
Basic.Ack RPC method for the delivery tag.
:param int delivery_tag: The delivery tag from the Basic.Deliver frame
### Response:
def acknowledge_message(self, delivery_tag, **kwargs):
"""Acknowledge the message delivery from RabbitMQ by sending a
Basic.Ack RPC method for the delivery tag.
:param int delivery_tag: The delivery tag from the Basic.Deliver frame
"""
logger.info('Acknowledging message', delivery_tag=delivery_tag, **kwargs)
self._channel.basic_ack(delivery_tag) |
def _get_hostname(self, hostname, metric):
"""
If hostname is None, look at label_to_hostname setting
"""
if hostname is None and self.label_to_hostname is not None:
for label in metric.label:
if label.name == self.label_to_hostname:
return label.value + self.label_to_hostname_suffix
return hostname | If hostname is None, look at label_to_hostname setting | Below is the the instruction that describes the task:
### Input:
If hostname is None, look at label_to_hostname setting
### Response:
def _get_hostname(self, hostname, metric):
"""
If hostname is None, look at label_to_hostname setting
"""
if hostname is None and self.label_to_hostname is not None:
for label in metric.label:
if label.name == self.label_to_hostname:
return label.value + self.label_to_hostname_suffix
return hostname |
def ParseVideoRow(self, parser_mediator, query, row, **unused_kwargs):
"""Parses a Video row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
"""
query_hash = hash(query)
event_data = KodiVideoEventData()
event_data.filename = self._GetRowValue(query_hash, row, 'strFilename')
event_data.play_count = self._GetRowValue(query_hash, row, 'playCount')
event_data.query = query
timestamp = self._GetRowValue(query_hash, row, 'lastPlayed')
date_time = dfdatetime_time_elements.TimeElements()
date_time.CopyFromDateTimeString(timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)
parser_mediator.ProduceEventWithEventData(event, event_data) | Parses a Video row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row. | Below is the the instruction that describes the task:
### Input:
Parses a Video row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
### Response:
def ParseVideoRow(self, parser_mediator, query, row, **unused_kwargs):
"""Parses a Video row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
"""
query_hash = hash(query)
event_data = KodiVideoEventData()
event_data.filename = self._GetRowValue(query_hash, row, 'strFilename')
event_data.play_count = self._GetRowValue(query_hash, row, 'playCount')
event_data.query = query
timestamp = self._GetRowValue(query_hash, row, 'lastPlayed')
date_time = dfdatetime_time_elements.TimeElements()
date_time.CopyFromDateTimeString(timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)
parser_mediator.ProduceEventWithEventData(event, event_data) |
def get_features(self):
"""Get the features contained in `self`.
:return: the list of features.
:returntype: `list` of `unicode`"""
l = self.xpath_ctxt.xpathEval("d:feature")
ret = []
for f in l:
if f.hasProp("var"):
ret.append( f.prop("var").decode("utf-8") )
return ret | Get the features contained in `self`.
:return: the list of features.
:returntype: `list` of `unicode` | Below is the the instruction that describes the task:
### Input:
Get the features contained in `self`.
:return: the list of features.
:returntype: `list` of `unicode`
### Response:
def get_features(self):
"""Get the features contained in `self`.
:return: the list of features.
:returntype: `list` of `unicode`"""
l = self.xpath_ctxt.xpathEval("d:feature")
ret = []
for f in l:
if f.hasProp("var"):
ret.append( f.prop("var").decode("utf-8") )
return ret |
def start(self):
"""starts behaviour in the event loop"""
self.agent.submit(self._start())
self.is_running = True | starts behaviour in the event loop | Below is the the instruction that describes the task:
### Input:
starts behaviour in the event loop
### Response:
def start(self):
"""starts behaviour in the event loop"""
self.agent.submit(self._start())
self.is_running = True |
def connection_made(self, transport):
"""Called when the connection is made"""
self.logger.info('Connection made at object %s', id(self))
self.transport = transport
self.keepalive = True
if self._timeout:
self.logger.debug('Registering timeout event')
self._timout_handle = self._loop.call_later(
self._timeout, self._handle_timeout) | Called when the connection is made | Below is the the instruction that describes the task:
### Input:
Called when the connection is made
### Response:
def connection_made(self, transport):
"""Called when the connection is made"""
self.logger.info('Connection made at object %s', id(self))
self.transport = transport
self.keepalive = True
if self._timeout:
self.logger.debug('Registering timeout event')
self._timout_handle = self._loop.call_later(
self._timeout, self._handle_timeout) |
def __should_warn_on_redef(
ctx: GeneratorContext, defsym: sym.Symbol, safe_name: str, def_meta: lmap.Map
) -> bool:
"""Return True if the compiler should emit a warning about this name being redefined."""
no_warn_on_redef = def_meta.entry(SYM_NO_WARN_ON_REDEF_META_KEY, False)
if no_warn_on_redef:
return False
elif safe_name in ctx.current_ns.module.__dict__:
return True
elif defsym in ctx.current_ns.interns:
var = ctx.current_ns.find(defsym)
assert var is not None, f"Var {defsym} cannot be none here"
if var.meta is not None and var.meta.entry(SYM_REDEF_META_KEY):
return False
elif var.is_bound:
return True
else:
return False
else:
return False | Return True if the compiler should emit a warning about this name being redefined. | Below is the the instruction that describes the task:
### Input:
Return True if the compiler should emit a warning about this name being redefined.
### Response:
def __should_warn_on_redef(
ctx: GeneratorContext, defsym: sym.Symbol, safe_name: str, def_meta: lmap.Map
) -> bool:
"""Return True if the compiler should emit a warning about this name being redefined."""
no_warn_on_redef = def_meta.entry(SYM_NO_WARN_ON_REDEF_META_KEY, False)
if no_warn_on_redef:
return False
elif safe_name in ctx.current_ns.module.__dict__:
return True
elif defsym in ctx.current_ns.interns:
var = ctx.current_ns.find(defsym)
assert var is not None, f"Var {defsym} cannot be none here"
if var.meta is not None and var.meta.entry(SYM_REDEF_META_KEY):
return False
elif var.is_bound:
return True
else:
return False
else:
return False |
def find_idx_by_threshold(self, threshold):
"""
Retrieve the index in this metric's threshold list at which the given threshold is located.
:param threshold: Find the index of this input threshold.
:returns: the index
:raises ValueError: if no such index can be found.
"""
assert_is_type(threshold, numeric)
thresh2d = self._metric_json['thresholds_and_metric_scores']
for i, e in enumerate(thresh2d.cell_values):
t = float(e[0])
if abs(t - threshold) < 1e-8 * max(t, threshold):
return i
if 0 <= threshold <= 1:
thresholds = [float(e[0]) for i, e in enumerate(thresh2d.cell_values)]
threshold_diffs = [abs(t - threshold) for t in thresholds]
closest_idx = threshold_diffs.index(min(threshold_diffs))
closest_threshold = thresholds[closest_idx]
print("Could not find exact threshold {0}; using closest threshold found {1}."
.format(threshold, closest_threshold))
return closest_idx
raise ValueError("Threshold must be between 0 and 1, but got {0} ".format(threshold)) | Retrieve the index in this metric's threshold list at which the given threshold is located.
:param threshold: Find the index of this input threshold.
:returns: the index
:raises ValueError: if no such index can be found. | Below is the the instruction that describes the task:
### Input:
Retrieve the index in this metric's threshold list at which the given threshold is located.
:param threshold: Find the index of this input threshold.
:returns: the index
:raises ValueError: if no such index can be found.
### Response:
def find_idx_by_threshold(self, threshold):
"""
Retrieve the index in this metric's threshold list at which the given threshold is located.
:param threshold: Find the index of this input threshold.
:returns: the index
:raises ValueError: if no such index can be found.
"""
assert_is_type(threshold, numeric)
thresh2d = self._metric_json['thresholds_and_metric_scores']
for i, e in enumerate(thresh2d.cell_values):
t = float(e[0])
if abs(t - threshold) < 1e-8 * max(t, threshold):
return i
if 0 <= threshold <= 1:
thresholds = [float(e[0]) for i, e in enumerate(thresh2d.cell_values)]
threshold_diffs = [abs(t - threshold) for t in thresholds]
closest_idx = threshold_diffs.index(min(threshold_diffs))
closest_threshold = thresholds[closest_idx]
print("Could not find exact threshold {0}; using closest threshold found {1}."
.format(threshold, closest_threshold))
return closest_idx
raise ValueError("Threshold must be between 0 and 1, but got {0} ".format(threshold)) |
def convert_svhn_format_1(directory, output_directory,
output_filename='svhn_format_1.hdf5'):
"""Converts the SVHN dataset (format 1) to HDF5.
This method assumes the existence of the files
`{train,test,extra}.tar.gz`, which are accessible through the
official website [SVHNSITE].
.. [SVHNSITE] http://ufldl.stanford.edu/housenumbers/
Parameters
----------
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to 'svhn_format_1.hdf5'.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset.
"""
try:
output_path = os.path.join(output_directory, output_filename)
h5file = h5py.File(output_path, mode='w')
TMPDIR = tempfile.mkdtemp()
# Every image has three channels (RGB) and variable height and width.
# It features a variable number of bounding boxes that identify the
# location and label of digits. The bounding box location is specified
# using the x and y coordinates of its top left corner along with its
# width and height.
BoundingBoxes = namedtuple(
'BoundingBoxes', ['labels', 'heights', 'widths', 'lefts', 'tops'])
sources = ('features',) + tuple('bbox_{}'.format(field)
for field in BoundingBoxes._fields)
source_dtypes = dict([(source, 'uint8') for source in sources[:2]] +
[(source, 'uint16') for source in sources[2:]])
source_axis_labels = {
'features': ('channel', 'height', 'width'),
'bbox_labels': ('bounding_box', 'index'),
'bbox_heights': ('bounding_box', 'height'),
'bbox_widths': ('bounding_box', 'width'),
'bbox_lefts': ('bounding_box', 'x'),
'bbox_tops': ('bounding_box', 'y')}
# The dataset is split into three sets: the training set, the test set
# and an extra set of examples that are somewhat less difficult but
# can be used as extra training data. These sets are stored separately
# as 'train.tar.gz', 'test.tar.gz' and 'extra.tar.gz'. Each file
# contains a directory named after the split it stores. The examples
# are stored in that directory as PNG images. The directory also
# contains a 'digitStruct.mat' file with all the bounding box and
# label information.
splits = ('train', 'test', 'extra')
file_paths = dict(zip(splits, FORMAT_1_FILES))
for split, path in file_paths.items():
file_paths[split] = os.path.join(directory, path)
digit_struct_paths = dict(
[(split, os.path.join(TMPDIR, split, 'digitStruct.mat'))
for split in splits])
# We first extract the data files in a temporary directory. While doing
# that, we also count the number of examples for each split. Files are
# extracted individually, which allows to display a progress bar. Since
# the splits will be concatenated in the HDF5 file, we also compute the
# start and stop intervals of each split within the concatenated array.
def extract_tar(split):
with tarfile.open(file_paths[split], 'r:gz') as f:
members = f.getmembers()
num_examples = sum(1 for m in members if '.png' in m.name)
progress_bar_context = progress_bar(
name='{} file'.format(split), maxval=len(members),
prefix='Extracting')
with progress_bar_context as bar:
for i, member in enumerate(members):
f.extract(member, path=TMPDIR)
bar.update(i)
return num_examples
examples_per_split = OrderedDict(
[(split, extract_tar(split)) for split in splits])
cumulative_num_examples = numpy.cumsum(
[0] + list(examples_per_split.values()))
num_examples = cumulative_num_examples[-1]
intervals = zip(cumulative_num_examples[:-1],
cumulative_num_examples[1:])
split_intervals = dict(zip(splits, intervals))
# The start and stop indices are used to create a split dict that will
# be parsed into the split array required by the H5PYDataset interface.
# The split dict is organized as follows:
#
# dict(split -> dict(source -> (start, stop)))
#
split_dict = OrderedDict([
(split, OrderedDict([(s, split_intervals[split])
for s in sources]))
for split in splits])
h5file.attrs['split'] = H5PYDataset.create_split_array(split_dict)
# We then prepare the HDF5 dataset. This involves creating datasets to
# store data sources and datasets to store auxiliary information
# (namely the shapes for variable-length axes, and labels to indicate
# what these variable-length axes represent).
def make_vlen_dataset(source):
# Create a variable-length 1D dataset
dtype = h5py.special_dtype(vlen=numpy.dtype(source_dtypes[source]))
dataset = h5file.create_dataset(
source, (num_examples,), dtype=dtype)
# Create a dataset to store variable-length shapes.
axis_labels = source_axis_labels[source]
dataset_shapes = h5file.create_dataset(
'{}_shapes'.format(source), (num_examples, len(axis_labels)),
dtype='uint16')
# Create a dataset to store labels for variable-length axes.
dataset_vlen_axis_labels = h5file.create_dataset(
'{}_vlen_axis_labels'.format(source), (len(axis_labels),),
dtype='S{}'.format(
numpy.max([len(label) for label in axis_labels])))
# Fill variable-length axis labels
dataset_vlen_axis_labels[...] = [
label.encode('utf8') for label in axis_labels]
# Attach auxiliary datasets as dimension scales of the
# variable-length 1D dataset. This is in accordance with the
# H5PYDataset interface.
dataset.dims.create_scale(dataset_shapes, 'shapes')
dataset.dims[0].attach_scale(dataset_shapes)
dataset.dims.create_scale(dataset_vlen_axis_labels, 'shape_labels')
dataset.dims[0].attach_scale(dataset_vlen_axis_labels)
# Tag fixed-length axis with its label
dataset.dims[0].label = 'batch'
for source in sources:
make_vlen_dataset(source)
# The "fun" part begins: we extract the bounding box and label
# information contained in 'digitStruct.mat'. This is a version 7.3
# Matlab file, which uses HDF5 under the hood, albeit with a very
# convoluted layout.
def get_boxes(split):
boxes = []
with h5py.File(digit_struct_paths[split], 'r') as f:
bar_name = '{} digitStruct'.format(split)
bar_maxval = examples_per_split[split]
with progress_bar(bar_name, bar_maxval) as bar:
for image_number in range(examples_per_split[split]):
# The 'digitStruct' group is the main group of the HDF5
# file. It contains two datasets: 'bbox' and 'name'.
# The 'name' dataset isn't of interest to us, as it
# stores file names and there's already a one-to-one
# mapping between row numbers and image names (e.g.
# row 0 corresponds to '1.png', row 1 corresponds to
# '2.png', and so on).
main_group = f['digitStruct']
# The 'bbox' dataset contains the bounding box and
# label information we're after. It has as many rows
# as there are images, and one column. Elements of the
# 'bbox' dataset are object references that point to
# (yet another) group that contains the information
# for the corresponding image.
image_reference = main_group['bbox'][image_number, 0]
# There are five datasets contained in that group:
# 'label', 'height', 'width', 'left' and 'top'. Each of
# those datasets has as many rows as there are bounding
# boxes in the corresponding image, and one column.
def get_dataset(name):
return main_group[image_reference][name][:, 0]
names = ('label', 'height', 'width', 'left', 'top')
datasets = dict(
[(name, get_dataset(name)) for name in names])
# If there is only one bounding box, the information is
# stored directly in the datasets. If there are
# multiple bounding boxes, elements of those datasets
# are object references pointing to 1x1 datasets that
# store the information (fortunately, it's the last
# hop we need to make).
def get_elements(dataset):
if len(dataset) > 1:
return [int(main_group[reference][0, 0])
for reference in dataset]
else:
return [int(dataset[0])]
# Names are pluralized in the BoundingBox named tuple.
kwargs = dict(
[(name + 's', get_elements(dataset))
for name, dataset in iteritems(datasets)])
boxes.append(BoundingBoxes(**kwargs))
if bar:
bar.update(image_number)
return boxes
split_boxes = dict([(split, get_boxes(split)) for split in splits])
# The final step is to fill the HDF5 file.
def fill_split(split, bar=None):
for image_number in range(examples_per_split[split]):
image_path = os.path.join(
TMPDIR, split, '{}.png'.format(image_number + 1))
image = numpy.asarray(
Image.open(image_path)).transpose(2, 0, 1)
bounding_boxes = split_boxes[split][image_number]
num_boxes = len(bounding_boxes.labels)
index = image_number + split_intervals[split][0]
h5file['features'][index] = image.flatten()
h5file['features'].dims[0]['shapes'][index] = image.shape
for field in BoundingBoxes._fields:
name = 'bbox_{}'.format(field)
h5file[name][index] = numpy.maximum(0,
getattr(bounding_boxes,
field))
h5file[name].dims[0]['shapes'][index] = [num_boxes, 1]
# Replace label '10' with '0'.
labels = h5file['bbox_labels'][index]
labels[labels == 10] = 0
h5file['bbox_labels'][index] = labels
if image_number % 1000 == 0:
h5file.flush()
if bar:
bar.update(index)
with progress_bar('SVHN format 1', num_examples) as bar:
for split in splits:
fill_split(split, bar=bar)
finally:
if 'TMPDIR' in locals() and os.path.isdir(TMPDIR):
shutil.rmtree(TMPDIR)
if 'h5file' in locals():
h5file.flush()
h5file.close()
return (output_path,) | Converts the SVHN dataset (format 1) to HDF5.
This method assumes the existence of the files
`{train,test,extra}.tar.gz`, which are accessible through the
official website [SVHNSITE].
.. [SVHNSITE] http://ufldl.stanford.edu/housenumbers/
Parameters
----------
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to 'svhn_format_1.hdf5'.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset. | Below is the the instruction that describes the task:
### Input:
Converts the SVHN dataset (format 1) to HDF5.
This method assumes the existence of the files
`{train,test,extra}.tar.gz`, which are accessible through the
official website [SVHNSITE].
.. [SVHNSITE] http://ufldl.stanford.edu/housenumbers/
Parameters
----------
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to 'svhn_format_1.hdf5'.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset.
### Response:
def convert_svhn_format_1(directory, output_directory,
output_filename='svhn_format_1.hdf5'):
"""Converts the SVHN dataset (format 1) to HDF5.
This method assumes the existence of the files
`{train,test,extra}.tar.gz`, which are accessible through the
official website [SVHNSITE].
.. [SVHNSITE] http://ufldl.stanford.edu/housenumbers/
Parameters
----------
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to 'svhn_format_1.hdf5'.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset.
"""
try:
output_path = os.path.join(output_directory, output_filename)
h5file = h5py.File(output_path, mode='w')
TMPDIR = tempfile.mkdtemp()
# Every image has three channels (RGB) and variable height and width.
# It features a variable number of bounding boxes that identify the
# location and label of digits. The bounding box location is specified
# using the x and y coordinates of its top left corner along with its
# width and height.
BoundingBoxes = namedtuple(
'BoundingBoxes', ['labels', 'heights', 'widths', 'lefts', 'tops'])
sources = ('features',) + tuple('bbox_{}'.format(field)
for field in BoundingBoxes._fields)
source_dtypes = dict([(source, 'uint8') for source in sources[:2]] +
[(source, 'uint16') for source in sources[2:]])
source_axis_labels = {
'features': ('channel', 'height', 'width'),
'bbox_labels': ('bounding_box', 'index'),
'bbox_heights': ('bounding_box', 'height'),
'bbox_widths': ('bounding_box', 'width'),
'bbox_lefts': ('bounding_box', 'x'),
'bbox_tops': ('bounding_box', 'y')}
# The dataset is split into three sets: the training set, the test set
# and an extra set of examples that are somewhat less difficult but
# can be used as extra training data. These sets are stored separately
# as 'train.tar.gz', 'test.tar.gz' and 'extra.tar.gz'. Each file
# contains a directory named after the split it stores. The examples
# are stored in that directory as PNG images. The directory also
# contains a 'digitStruct.mat' file with all the bounding box and
# label information.
splits = ('train', 'test', 'extra')
file_paths = dict(zip(splits, FORMAT_1_FILES))
for split, path in file_paths.items():
file_paths[split] = os.path.join(directory, path)
digit_struct_paths = dict(
[(split, os.path.join(TMPDIR, split, 'digitStruct.mat'))
for split in splits])
# We first extract the data files in a temporary directory. While doing
# that, we also count the number of examples for each split. Files are
# extracted individually, which allows to display a progress bar. Since
# the splits will be concatenated in the HDF5 file, we also compute the
# start and stop intervals of each split within the concatenated array.
def extract_tar(split):
with tarfile.open(file_paths[split], 'r:gz') as f:
members = f.getmembers()
num_examples = sum(1 for m in members if '.png' in m.name)
progress_bar_context = progress_bar(
name='{} file'.format(split), maxval=len(members),
prefix='Extracting')
with progress_bar_context as bar:
for i, member in enumerate(members):
f.extract(member, path=TMPDIR)
bar.update(i)
return num_examples
examples_per_split = OrderedDict(
[(split, extract_tar(split)) for split in splits])
cumulative_num_examples = numpy.cumsum(
[0] + list(examples_per_split.values()))
num_examples = cumulative_num_examples[-1]
intervals = zip(cumulative_num_examples[:-1],
cumulative_num_examples[1:])
split_intervals = dict(zip(splits, intervals))
# The start and stop indices are used to create a split dict that will
# be parsed into the split array required by the H5PYDataset interface.
# The split dict is organized as follows:
#
# dict(split -> dict(source -> (start, stop)))
#
split_dict = OrderedDict([
(split, OrderedDict([(s, split_intervals[split])
for s in sources]))
for split in splits])
h5file.attrs['split'] = H5PYDataset.create_split_array(split_dict)
# We then prepare the HDF5 dataset. This involves creating datasets to
# store data sources and datasets to store auxiliary information
# (namely the shapes for variable-length axes, and labels to indicate
# what these variable-length axes represent).
def make_vlen_dataset(source):
# Create a variable-length 1D dataset
dtype = h5py.special_dtype(vlen=numpy.dtype(source_dtypes[source]))
dataset = h5file.create_dataset(
source, (num_examples,), dtype=dtype)
# Create a dataset to store variable-length shapes.
axis_labels = source_axis_labels[source]
dataset_shapes = h5file.create_dataset(
'{}_shapes'.format(source), (num_examples, len(axis_labels)),
dtype='uint16')
# Create a dataset to store labels for variable-length axes.
dataset_vlen_axis_labels = h5file.create_dataset(
'{}_vlen_axis_labels'.format(source), (len(axis_labels),),
dtype='S{}'.format(
numpy.max([len(label) for label in axis_labels])))
# Fill variable-length axis labels
dataset_vlen_axis_labels[...] = [
label.encode('utf8') for label in axis_labels]
# Attach auxiliary datasets as dimension scales of the
# variable-length 1D dataset. This is in accordance with the
# H5PYDataset interface.
dataset.dims.create_scale(dataset_shapes, 'shapes')
dataset.dims[0].attach_scale(dataset_shapes)
dataset.dims.create_scale(dataset_vlen_axis_labels, 'shape_labels')
dataset.dims[0].attach_scale(dataset_vlen_axis_labels)
# Tag fixed-length axis with its label
dataset.dims[0].label = 'batch'
for source in sources:
make_vlen_dataset(source)
# The "fun" part begins: we extract the bounding box and label
# information contained in 'digitStruct.mat'. This is a version 7.3
# Matlab file, which uses HDF5 under the hood, albeit with a very
# convoluted layout.
def get_boxes(split):
boxes = []
with h5py.File(digit_struct_paths[split], 'r') as f:
bar_name = '{} digitStruct'.format(split)
bar_maxval = examples_per_split[split]
with progress_bar(bar_name, bar_maxval) as bar:
for image_number in range(examples_per_split[split]):
# The 'digitStruct' group is the main group of the HDF5
# file. It contains two datasets: 'bbox' and 'name'.
# The 'name' dataset isn't of interest to us, as it
# stores file names and there's already a one-to-one
# mapping between row numbers and image names (e.g.
# row 0 corresponds to '1.png', row 1 corresponds to
# '2.png', and so on).
main_group = f['digitStruct']
# The 'bbox' dataset contains the bounding box and
# label information we're after. It has as many rows
# as there are images, and one column. Elements of the
# 'bbox' dataset are object references that point to
# (yet another) group that contains the information
# for the corresponding image.
image_reference = main_group['bbox'][image_number, 0]
# There are five datasets contained in that group:
# 'label', 'height', 'width', 'left' and 'top'. Each of
# those datasets has as many rows as there are bounding
# boxes in the corresponding image, and one column.
def get_dataset(name):
return main_group[image_reference][name][:, 0]
names = ('label', 'height', 'width', 'left', 'top')
datasets = dict(
[(name, get_dataset(name)) for name in names])
# If there is only one bounding box, the information is
# stored directly in the datasets. If there are
# multiple bounding boxes, elements of those datasets
# are object references pointing to 1x1 datasets that
# store the information (fortunately, it's the last
# hop we need to make).
def get_elements(dataset):
if len(dataset) > 1:
return [int(main_group[reference][0, 0])
for reference in dataset]
else:
return [int(dataset[0])]
# Names are pluralized in the BoundingBox named tuple.
kwargs = dict(
[(name + 's', get_elements(dataset))
for name, dataset in iteritems(datasets)])
boxes.append(BoundingBoxes(**kwargs))
if bar:
bar.update(image_number)
return boxes
split_boxes = dict([(split, get_boxes(split)) for split in splits])
# The final step is to fill the HDF5 file.
def fill_split(split, bar=None):
for image_number in range(examples_per_split[split]):
image_path = os.path.join(
TMPDIR, split, '{}.png'.format(image_number + 1))
image = numpy.asarray(
Image.open(image_path)).transpose(2, 0, 1)
bounding_boxes = split_boxes[split][image_number]
num_boxes = len(bounding_boxes.labels)
index = image_number + split_intervals[split][0]
h5file['features'][index] = image.flatten()
h5file['features'].dims[0]['shapes'][index] = image.shape
for field in BoundingBoxes._fields:
name = 'bbox_{}'.format(field)
h5file[name][index] = numpy.maximum(0,
getattr(bounding_boxes,
field))
h5file[name].dims[0]['shapes'][index] = [num_boxes, 1]
# Replace label '10' with '0'.
labels = h5file['bbox_labels'][index]
labels[labels == 10] = 0
h5file['bbox_labels'][index] = labels
if image_number % 1000 == 0:
h5file.flush()
if bar:
bar.update(index)
with progress_bar('SVHN format 1', num_examples) as bar:
for split in splits:
fill_split(split, bar=bar)
finally:
if 'TMPDIR' in locals() and os.path.isdir(TMPDIR):
shutil.rmtree(TMPDIR)
if 'h5file' in locals():
h5file.flush()
h5file.close()
return (output_path,) |
def merge(args):
"""
%prog merge gffiles
Merge several gff files into one. When only one file is given, it is assumed
to be a file with a list of gff files.
"""
p = OptionParser(merge.__doc__)
p.add_option("--seq", default=False, action="store_true",
help="Print FASTA sequences at the end")
p.set_outfile()
opts, args = p.parse_args(args)
nargs = len(args)
if nargs < 1:
sys.exit(not p.print_help())
if nargs == 1:
listfile, = args
fp = open(listfile)
gffiles = [x.strip() for x in fp]
else:
gffiles = args
outfile = opts.outfile
deflines = set()
fw = must_open(outfile, "w")
fastarecs = {}
for gffile in natsorted(gffiles, key=lambda x: op.basename(x)):
logging.debug(gffile)
fp = open(gffile)
for row in fp:
row = row.rstrip()
if not row or row[0] == '#':
if row == FastaTag:
break
if row in deflines:
continue
else:
deflines.add(row)
print(row, file=fw)
if not opts.seq:
continue
f = Fasta(gffile, lazy=True)
for key, rec in f.iteritems_ordered():
if key in fastarecs:
continue
fastarecs[key] = rec
if opts.seq:
print(FastaTag, file=fw)
SeqIO.write(fastarecs.values(), fw, "fasta")
fw.close() | %prog merge gffiles
Merge several gff files into one. When only one file is given, it is assumed
to be a file with a list of gff files. | Below is the the instruction that describes the task:
### Input:
%prog merge gffiles
Merge several gff files into one. When only one file is given, it is assumed
to be a file with a list of gff files.
### Response:
def merge(args):
"""
%prog merge gffiles
Merge several gff files into one. When only one file is given, it is assumed
to be a file with a list of gff files.
"""
p = OptionParser(merge.__doc__)
p.add_option("--seq", default=False, action="store_true",
help="Print FASTA sequences at the end")
p.set_outfile()
opts, args = p.parse_args(args)
nargs = len(args)
if nargs < 1:
sys.exit(not p.print_help())
if nargs == 1:
listfile, = args
fp = open(listfile)
gffiles = [x.strip() for x in fp]
else:
gffiles = args
outfile = opts.outfile
deflines = set()
fw = must_open(outfile, "w")
fastarecs = {}
for gffile in natsorted(gffiles, key=lambda x: op.basename(x)):
logging.debug(gffile)
fp = open(gffile)
for row in fp:
row = row.rstrip()
if not row or row[0] == '#':
if row == FastaTag:
break
if row in deflines:
continue
else:
deflines.add(row)
print(row, file=fw)
if not opts.seq:
continue
f = Fasta(gffile, lazy=True)
for key, rec in f.iteritems_ordered():
if key in fastarecs:
continue
fastarecs[key] = rec
if opts.seq:
print(FastaTag, file=fw)
SeqIO.write(fastarecs.values(), fw, "fasta")
fw.close() |
def rsync_upload():
"""
Uploads the project with rsync excluding some files and folders.
"""
excludes = ["*.pyc", "*.pyo", "*.db", ".DS_Store", ".coverage",
"local_settings.py", "/static", "/.git", "/.hg"]
local_dir = os.getcwd() + os.sep
return rsync_project(remote_dir=env.proj_path, local_dir=local_dir,
exclude=excludes) | Uploads the project with rsync excluding some files and folders. | Below is the the instruction that describes the task:
### Input:
Uploads the project with rsync excluding some files and folders.
### Response:
def rsync_upload():
"""
Uploads the project with rsync excluding some files and folders.
"""
excludes = ["*.pyc", "*.pyo", "*.db", ".DS_Store", ".coverage",
"local_settings.py", "/static", "/.git", "/.hg"]
local_dir = os.getcwd() + os.sep
return rsync_project(remote_dir=env.proj_path, local_dir=local_dir,
exclude=excludes) |
def calculate_dimensions(image, long_side, short_side):
"""Returns the thumbnail dimensions depending on the images format."""
if image.width >= image.height:
return '{0}x{1}'.format(long_side, short_side)
return '{0}x{1}'.format(short_side, long_side) | Returns the thumbnail dimensions depending on the images format. | Below is the the instruction that describes the task:
### Input:
Returns the thumbnail dimensions depending on the images format.
### Response:
def calculate_dimensions(image, long_side, short_side):
"""Returns the thumbnail dimensions depending on the images format."""
if image.width >= image.height:
return '{0}x{1}'.format(long_side, short_side)
return '{0}x{1}'.format(short_side, long_side) |
def is_storage(self):
"""
Return true if the variable is located in storage
See https://solidity.readthedocs.io/en/v0.4.24/types.html?highlight=storage%20location#data-location
Returns:
(bool)
"""
if self.location == 'memory':
return False
# Use by slithIR SSA
if self.location == 'reference_to_storage':
return False
if self.location == 'storage':
return True
if isinstance(self.type, (ArrayType, MappingType)):
return True
if isinstance(self.type, UserDefinedType):
return isinstance(self.type.type, Structure)
return False | Return true if the variable is located in storage
See https://solidity.readthedocs.io/en/v0.4.24/types.html?highlight=storage%20location#data-location
Returns:
(bool) | Below is the the instruction that describes the task:
### Input:
Return true if the variable is located in storage
See https://solidity.readthedocs.io/en/v0.4.24/types.html?highlight=storage%20location#data-location
Returns:
(bool)
### Response:
def is_storage(self):
"""
Return true if the variable is located in storage
See https://solidity.readthedocs.io/en/v0.4.24/types.html?highlight=storage%20location#data-location
Returns:
(bool)
"""
if self.location == 'memory':
return False
# Use by slithIR SSA
if self.location == 'reference_to_storage':
return False
if self.location == 'storage':
return True
if isinstance(self.type, (ArrayType, MappingType)):
return True
if isinstance(self.type, UserDefinedType):
return isinstance(self.type.type, Structure)
return False |
def fpy_interface_sub(fpy, dtype, kind, suffix=None):
"""Generates the interface for reading/writing values for variables of the specified suffix
for dimensions 0 through 7.
:arg fpy: the generating function that accepts the dimension and type information
and returns the (xnames, subtext).
:arg suffix: one of [None, "_f", "_p"] corresponding to "allocatable", fixed dimension,
and "pointer" type variables.
"""
allsubs = []
xnames = []
drange = list(range(8)) if suffix is None else list(range(1,8))
for D in drange:
xname, sub = fpy(D, dtype, kind, suffix)
xnames.append(xname)
allsubs.append(sub)
return (xnames, '\n'.join(allsubs)) | Generates the interface for reading/writing values for variables of the specified suffix
for dimensions 0 through 7.
:arg fpy: the generating function that accepts the dimension and type information
and returns the (xnames, subtext).
:arg suffix: one of [None, "_f", "_p"] corresponding to "allocatable", fixed dimension,
and "pointer" type variables. | Below is the the instruction that describes the task:
### Input:
Generates the interface for reading/writing values for variables of the specified suffix
for dimensions 0 through 7.
:arg fpy: the generating function that accepts the dimension and type information
and returns the (xnames, subtext).
:arg suffix: one of [None, "_f", "_p"] corresponding to "allocatable", fixed dimension,
and "pointer" type variables.
### Response:
def fpy_interface_sub(fpy, dtype, kind, suffix=None):
"""Generates the interface for reading/writing values for variables of the specified suffix
for dimensions 0 through 7.
:arg fpy: the generating function that accepts the dimension and type information
and returns the (xnames, subtext).
:arg suffix: one of [None, "_f", "_p"] corresponding to "allocatable", fixed dimension,
and "pointer" type variables.
"""
allsubs = []
xnames = []
drange = list(range(8)) if suffix is None else list(range(1,8))
for D in drange:
xname, sub = fpy(D, dtype, kind, suffix)
xnames.append(xname)
allsubs.append(sub)
return (xnames, '\n'.join(allsubs)) |
def _group_unique(self, clusters, adj_list, counts):
''' return groups for unique method'''
if len(clusters) == 1:
groups = [clusters]
else:
groups = [[x] for x in clusters]
return groups | return groups for unique method | Below is the the instruction that describes the task:
### Input:
return groups for unique method
### Response:
def _group_unique(self, clusters, adj_list, counts):
''' return groups for unique method'''
if len(clusters) == 1:
groups = [clusters]
else:
groups = [[x] for x in clusters]
return groups |
def find_bp(self, filename, lineno, frame):
"""Determine which breakpoint for this file:line is to be acted upon.
Called only if we know there is a bpt at this
location. Returns breakpoint that was triggered and a flag
that indicates if it is ok to delete a temporary breakpoint.
"""
possibles = self.bplist[filename, lineno]
for i in range(0, len(possibles)):
b = possibles[i]
if not b.enabled:
continue
if not checkfuncname(b, frame):
continue
# Count every hit when bp is enabled
b.hits += 1
if not b.condition:
# If unconditional, and ignoring, go on to next, else
# break
if b.ignore > 0:
b.ignore = b.ignore -1
continue
else:
# breakpoint and marker that's ok to delete if
# temporary
return (b, True)
else:
# Conditional bp.
# Ignore count applies only to those bpt hits where the
# condition evaluates to true.
try:
val = eval(b.condition, frame.f_globals, frame.f_locals)
if val:
if b.ignore > 0:
b.ignore = b.ignore -1
# continue
else:
return (b, True)
# else:
# continue
except:
# if eval fails, most conservative thing is to
# stop on breakpoint regardless of ignore count.
# Don't delete temporary, as another hint to user.
return (b, False)
pass
pass
return (None, None) | Determine which breakpoint for this file:line is to be acted upon.
Called only if we know there is a bpt at this
location. Returns breakpoint that was triggered and a flag
that indicates if it is ok to delete a temporary breakpoint. | Below is the the instruction that describes the task:
### Input:
Determine which breakpoint for this file:line is to be acted upon.
Called only if we know there is a bpt at this
location. Returns breakpoint that was triggered and a flag
that indicates if it is ok to delete a temporary breakpoint.
### Response:
def find_bp(self, filename, lineno, frame):
"""Determine which breakpoint for this file:line is to be acted upon.
Called only if we know there is a bpt at this
location. Returns breakpoint that was triggered and a flag
that indicates if it is ok to delete a temporary breakpoint.
"""
possibles = self.bplist[filename, lineno]
for i in range(0, len(possibles)):
b = possibles[i]
if not b.enabled:
continue
if not checkfuncname(b, frame):
continue
# Count every hit when bp is enabled
b.hits += 1
if not b.condition:
# If unconditional, and ignoring, go on to next, else
# break
if b.ignore > 0:
b.ignore = b.ignore -1
continue
else:
# breakpoint and marker that's ok to delete if
# temporary
return (b, True)
else:
# Conditional bp.
# Ignore count applies only to those bpt hits where the
# condition evaluates to true.
try:
val = eval(b.condition, frame.f_globals, frame.f_locals)
if val:
if b.ignore > 0:
b.ignore = b.ignore -1
# continue
else:
return (b, True)
# else:
# continue
except:
# if eval fails, most conservative thing is to
# stop on breakpoint regardless of ignore count.
# Don't delete temporary, as another hint to user.
return (b, False)
pass
pass
return (None, None) |
def element(self, inp=None):
"""Return a complex number from ``inp`` or from scratch."""
if inp is not None:
# Workaround for missing __complex__ of numpy.ndarray
# for Numpy version < 1.12
# TODO: remove when Numpy >= 1.12 is required
if isinstance(inp, np.ndarray):
return complex(inp.reshape([1])[0])
else:
return complex(inp)
else:
return complex(0.0, 0.0) | Return a complex number from ``inp`` or from scratch. | Below is the the instruction that describes the task:
### Input:
Return a complex number from ``inp`` or from scratch.
### Response:
def element(self, inp=None):
"""Return a complex number from ``inp`` or from scratch."""
if inp is not None:
# Workaround for missing __complex__ of numpy.ndarray
# for Numpy version < 1.12
# TODO: remove when Numpy >= 1.12 is required
if isinstance(inp, np.ndarray):
return complex(inp.reshape([1])[0])
else:
return complex(inp)
else:
return complex(0.0, 0.0) |
def _set_original_fields(instance):
"""
Save fields value, only for non-m2m fields.
"""
original_fields = {}
def _set_original_field(instance, field):
if instance.pk is None:
original_fields[field] = None
else:
if isinstance(instance._meta.get_field(field), ForeignKey):
# Only get the PK, we don't want to get the object
# (which would make an additional request)
original_fields[field] = getattr(instance,
'{0}_id'.format(field))
else:
original_fields[field] = getattr(instance, field)
for field in getattr(instance, '_tracked_fields', []):
_set_original_field(instance, field)
for field in getattr(instance, '_tracked_related_fields', {}).keys():
_set_original_field(instance, field)
instance._original_fields = original_fields
# Include pk to detect the creation of an object
instance._original_fields['pk'] = instance.pk | Save fields value, only for non-m2m fields. | Below is the the instruction that describes the task:
### Input:
Save fields value, only for non-m2m fields.
### Response:
def _set_original_fields(instance):
"""
Save fields value, only for non-m2m fields.
"""
original_fields = {}
def _set_original_field(instance, field):
if instance.pk is None:
original_fields[field] = None
else:
if isinstance(instance._meta.get_field(field), ForeignKey):
# Only get the PK, we don't want to get the object
# (which would make an additional request)
original_fields[field] = getattr(instance,
'{0}_id'.format(field))
else:
original_fields[field] = getattr(instance, field)
for field in getattr(instance, '_tracked_fields', []):
_set_original_field(instance, field)
for field in getattr(instance, '_tracked_related_fields', {}).keys():
_set_original_field(instance, field)
instance._original_fields = original_fields
# Include pk to detect the creation of an object
instance._original_fields['pk'] = instance.pk |
def add_or_renew_pool(self, host, is_host_addition):
"""
For internal use only.
"""
distance = self._profile_manager.distance(host)
if distance == HostDistance.IGNORED:
return None
def run_add_or_renew_pool():
try:
if self._protocol_version >= 3:
new_pool = HostConnection(host, distance, self)
else:
# TODO remove host pool again ???
new_pool = HostConnectionPool(host, distance, self)
except AuthenticationFailed as auth_exc:
conn_exc = ConnectionException(str(auth_exc), host=host)
self.cluster.signal_connection_failure(host, conn_exc, is_host_addition)
return False
except Exception as conn_exc:
log.warning("Failed to create connection pool for new host %s:",
host, exc_info=conn_exc)
# the host itself will still be marked down, so we need to pass
# a special flag to make sure the reconnector is created
self.cluster.signal_connection_failure(
host, conn_exc, is_host_addition, expect_host_to_be_down=True)
return False
previous = self._pools.get(host)
with self._lock:
while new_pool._keyspace != self.keyspace:
self._lock.release()
set_keyspace_event = Event()
errors_returned = []
def callback(pool, errors):
errors_returned.extend(errors)
set_keyspace_event.set()
new_pool._set_keyspace_for_all_conns(self.keyspace, callback)
set_keyspace_event.wait(self.cluster.connect_timeout)
if not set_keyspace_event.is_set() or errors_returned:
log.warning("Failed setting keyspace for pool after keyspace changed during connect: %s", errors_returned)
self.cluster.on_down(host, is_host_addition)
new_pool.shutdown()
self._lock.acquire()
return False
self._lock.acquire()
self._pools[host] = new_pool
log.debug("Added pool for host %s to session", host)
if previous:
previous.shutdown()
return True
return self.submit(run_add_or_renew_pool) | For internal use only. | Below is the the instruction that describes the task:
### Input:
For internal use only.
### Response:
def add_or_renew_pool(self, host, is_host_addition):
"""
For internal use only.
"""
distance = self._profile_manager.distance(host)
if distance == HostDistance.IGNORED:
return None
def run_add_or_renew_pool():
try:
if self._protocol_version >= 3:
new_pool = HostConnection(host, distance, self)
else:
# TODO remove host pool again ???
new_pool = HostConnectionPool(host, distance, self)
except AuthenticationFailed as auth_exc:
conn_exc = ConnectionException(str(auth_exc), host=host)
self.cluster.signal_connection_failure(host, conn_exc, is_host_addition)
return False
except Exception as conn_exc:
log.warning("Failed to create connection pool for new host %s:",
host, exc_info=conn_exc)
# the host itself will still be marked down, so we need to pass
# a special flag to make sure the reconnector is created
self.cluster.signal_connection_failure(
host, conn_exc, is_host_addition, expect_host_to_be_down=True)
return False
previous = self._pools.get(host)
with self._lock:
while new_pool._keyspace != self.keyspace:
self._lock.release()
set_keyspace_event = Event()
errors_returned = []
def callback(pool, errors):
errors_returned.extend(errors)
set_keyspace_event.set()
new_pool._set_keyspace_for_all_conns(self.keyspace, callback)
set_keyspace_event.wait(self.cluster.connect_timeout)
if not set_keyspace_event.is_set() or errors_returned:
log.warning("Failed setting keyspace for pool after keyspace changed during connect: %s", errors_returned)
self.cluster.on_down(host, is_host_addition)
new_pool.shutdown()
self._lock.acquire()
return False
self._lock.acquire()
self._pools[host] = new_pool
log.debug("Added pool for host %s to session", host)
if previous:
previous.shutdown()
return True
return self.submit(run_add_or_renew_pool) |
def get_emb_szs(self, sz_dict=None):
"Return the default embedding sizes suitable for this data or takes the ones in `sz_dict`."
return [def_emb_sz(self.classes, n, sz_dict) for n in self.cat_names] | Return the default embedding sizes suitable for this data or takes the ones in `sz_dict`. | Below is the the instruction that describes the task:
### Input:
Return the default embedding sizes suitable for this data or takes the ones in `sz_dict`.
### Response:
def get_emb_szs(self, sz_dict=None):
"Return the default embedding sizes suitable for this data or takes the ones in `sz_dict`."
return [def_emb_sz(self.classes, n, sz_dict) for n in self.cat_names] |
def mode(self):
"""Get alarm mode."""
mode = self.get_value('mode').get(self.device_id, None)
return mode.lower() | Get alarm mode. | Below is the the instruction that describes the task:
### Input:
Get alarm mode.
### Response:
def mode(self):
"""Get alarm mode."""
mode = self.get_value('mode').get(self.device_id, None)
return mode.lower() |
def from_dict(d: Dict[str, Any]) -> 'CoverageInstructions':
"""
Loads a set of coverage instructions from a given dictionary.
Raises:
BadCoverageInstructions: if the given coverage instructions are
illegal.
"""
name_type = d['type']
cls = _NAME_TO_INSTRUCTIONS[name_type]
return cls.from_dict(d) | Loads a set of coverage instructions from a given dictionary.
Raises:
BadCoverageInstructions: if the given coverage instructions are
illegal. | Below is the the instruction that describes the task:
### Input:
Loads a set of coverage instructions from a given dictionary.
Raises:
BadCoverageInstructions: if the given coverage instructions are
illegal.
### Response:
def from_dict(d: Dict[str, Any]) -> 'CoverageInstructions':
"""
Loads a set of coverage instructions from a given dictionary.
Raises:
BadCoverageInstructions: if the given coverage instructions are
illegal.
"""
name_type = d['type']
cls = _NAME_TO_INSTRUCTIONS[name_type]
return cls.from_dict(d) |
def get_html_color(self):
"""
get a string representing the color, using HTML notation
"""
color = self.color
return ("#%02x%02x%02x" % (
int(color.red), int(color.green), int(color.blue)
)) | get a string representing the color, using HTML notation | Below is the the instruction that describes the task:
### Input:
get a string representing the color, using HTML notation
### Response:
def get_html_color(self):
"""
get a string representing the color, using HTML notation
"""
color = self.color
return ("#%02x%02x%02x" % (
int(color.red), int(color.green), int(color.blue)
)) |
def exists(self, index):
"""Checks whether :index: exists in the Model.
:index: Index to look for.
:returns: True if :index: exists in the Model, False otherwise.
"""
data = self.data
try:
for c in self._split(index):
i = int(c) - 1
data = data[i][4]
except Exception:
return False
return True | Checks whether :index: exists in the Model.
:index: Index to look for.
:returns: True if :index: exists in the Model, False otherwise. | Below is the the instruction that describes the task:
### Input:
Checks whether :index: exists in the Model.
:index: Index to look for.
:returns: True if :index: exists in the Model, False otherwise.
### Response:
def exists(self, index):
"""Checks whether :index: exists in the Model.
:index: Index to look for.
:returns: True if :index: exists in the Model, False otherwise.
"""
data = self.data
try:
for c in self._split(index):
i = int(c) - 1
data = data[i][4]
except Exception:
return False
return True |
def get_gpio_mode(self, gpio_id):
"""
Return the gpio mode for gpio :gpio_id:.
@gpio_id Character A or B.
"""
if not self._connected:
return
return self._protocol.status.get("OTGW_GPIO_{}".format(gpio_id)) | Return the gpio mode for gpio :gpio_id:.
@gpio_id Character A or B. | Below is the the instruction that describes the task:
### Input:
Return the gpio mode for gpio :gpio_id:.
@gpio_id Character A or B.
### Response:
def get_gpio_mode(self, gpio_id):
"""
Return the gpio mode for gpio :gpio_id:.
@gpio_id Character A or B.
"""
if not self._connected:
return
return self._protocol.status.get("OTGW_GPIO_{}".format(gpio_id)) |
def create_resource(self, resource_type=None, uri=None):
'''
Convenience method for creating a new resource
Note: A Resource is instantiated, but is not yet created. Still requires resource.create().
Args:
uri (rdflib.term.URIRef, str): uri of resource to create
resource_type (NonRDFSource (Binary), BasicContainer, DirectContainer, IndirectContainer): resource type to create
Returns:
(NonRDFSource (Binary), BasicContainer, DirectContainer, IndirectContainer): instance of appropriate type
'''
if resource_type in [NonRDFSource, Binary, BasicContainer, DirectContainer, IndirectContainer]:
return resource_type(self, uri)
else:
raise TypeError("expecting Resource type, such as BasicContainer or NonRDFSource") | Convenience method for creating a new resource
Note: A Resource is instantiated, but is not yet created. Still requires resource.create().
Args:
uri (rdflib.term.URIRef, str): uri of resource to create
resource_type (NonRDFSource (Binary), BasicContainer, DirectContainer, IndirectContainer): resource type to create
Returns:
(NonRDFSource (Binary), BasicContainer, DirectContainer, IndirectContainer): instance of appropriate type | Below is the the instruction that describes the task:
### Input:
Convenience method for creating a new resource
Note: A Resource is instantiated, but is not yet created. Still requires resource.create().
Args:
uri (rdflib.term.URIRef, str): uri of resource to create
resource_type (NonRDFSource (Binary), BasicContainer, DirectContainer, IndirectContainer): resource type to create
Returns:
(NonRDFSource (Binary), BasicContainer, DirectContainer, IndirectContainer): instance of appropriate type
### Response:
def create_resource(self, resource_type=None, uri=None):
'''
Convenience method for creating a new resource
Note: A Resource is instantiated, but is not yet created. Still requires resource.create().
Args:
uri (rdflib.term.URIRef, str): uri of resource to create
resource_type (NonRDFSource (Binary), BasicContainer, DirectContainer, IndirectContainer): resource type to create
Returns:
(NonRDFSource (Binary), BasicContainer, DirectContainer, IndirectContainer): instance of appropriate type
'''
if resource_type in [NonRDFSource, Binary, BasicContainer, DirectContainer, IndirectContainer]:
return resource_type(self, uri)
else:
raise TypeError("expecting Resource type, such as BasicContainer or NonRDFSource") |
def raise_stmt__26(self, raise_loc, type_opt):
"""(2.6, 2.7) raise_stmt: 'raise' [test [',' test [',' test]]]"""
type_ = inst = tback = None
loc = raise_loc
if type_opt:
type_, inst_opt = type_opt
loc = loc.join(type_.loc)
if inst_opt:
_, inst, tback = inst_opt
loc = loc.join(inst.loc)
if tback:
loc = loc.join(tback.loc)
return ast.Raise(exc=type_, inst=inst, tback=tback, cause=None,
keyword_loc=raise_loc, from_loc=None, loc=loc) | (2.6, 2.7) raise_stmt: 'raise' [test [',' test [',' test]]] | Below is the the instruction that describes the task:
### Input:
(2.6, 2.7) raise_stmt: 'raise' [test [',' test [',' test]]]
### Response:
def raise_stmt__26(self, raise_loc, type_opt):
"""(2.6, 2.7) raise_stmt: 'raise' [test [',' test [',' test]]]"""
type_ = inst = tback = None
loc = raise_loc
if type_opt:
type_, inst_opt = type_opt
loc = loc.join(type_.loc)
if inst_opt:
_, inst, tback = inst_opt
loc = loc.join(inst.loc)
if tback:
loc = loc.join(tback.loc)
return ast.Raise(exc=type_, inst=inst, tback=tback, cause=None,
keyword_loc=raise_loc, from_loc=None, loc=loc) |
def shift_peaks(sig, peak_inds, search_radius, peak_up):
"""
Helper function for correct_peaks. Return the shifted peaks to local
maxima or minima within a radius.
peak_up : bool
Whether the expected peak direction is up
"""
sig_len = sig.shape[0]
n_peaks = len(peak_inds)
# The indices to shift each peak ind by
shift_inds = np.zeros(n_peaks, dtype='int')
# Iterate through peaks
for i in range(n_peaks):
ind = peak_inds[i]
local_sig = sig[max(0, ind - search_radius):min(ind + search_radius, sig_len-1)]
if peak_up:
shift_inds[i] = np.argmax(local_sig)
else:
shift_inds[i] = np.argmin(local_sig)
# May have to adjust early values
for i in range(n_peaks):
ind = peak_inds[i]
if ind >= search_radius:
break
shift_inds[i] -= search_radius - ind
shifted_peak_inds = peak_inds + shift_inds - search_radius
return shifted_peak_inds | Helper function for correct_peaks. Return the shifted peaks to local
maxima or minima within a radius.
peak_up : bool
Whether the expected peak direction is up | Below is the the instruction that describes the task:
### Input:
Helper function for correct_peaks. Return the shifted peaks to local
maxima or minima within a radius.
peak_up : bool
Whether the expected peak direction is up
### Response:
def shift_peaks(sig, peak_inds, search_radius, peak_up):
"""
Helper function for correct_peaks. Return the shifted peaks to local
maxima or minima within a radius.
peak_up : bool
Whether the expected peak direction is up
"""
sig_len = sig.shape[0]
n_peaks = len(peak_inds)
# The indices to shift each peak ind by
shift_inds = np.zeros(n_peaks, dtype='int')
# Iterate through peaks
for i in range(n_peaks):
ind = peak_inds[i]
local_sig = sig[max(0, ind - search_radius):min(ind + search_radius, sig_len-1)]
if peak_up:
shift_inds[i] = np.argmax(local_sig)
else:
shift_inds[i] = np.argmin(local_sig)
# May have to adjust early values
for i in range(n_peaks):
ind = peak_inds[i]
if ind >= search_radius:
break
shift_inds[i] -= search_radius - ind
shifted_peak_inds = peak_inds + shift_inds - search_radius
return shifted_peak_inds |
def _unique_name(self, basename, ext, existing, force=False):
"""
Find a unique basename for a new file/key where existing is
either a list of (basename, ext) pairs or an absolute path to
a directory.
By default, uniqueness is enforced depending on the state of
the unique_name parameter (for export names). If force is
True, this parameter is ignored and uniqueness is guaranteed.
"""
skip = False if force else (not self.unique_name)
if skip: return (basename, ext)
ext = '' if ext is None else ext
if isinstance(existing, str):
split = [os.path.splitext(el)
for el in os.listdir(os.path.abspath(existing))]
existing = [(n, ex if not ex else ex[1:]) for (n, ex) in split]
new_name, counter = basename, 1
while (new_name, ext) in existing:
new_name = basename+'-'+str(counter)
counter += 1
return (sanitizer(new_name), ext) | Find a unique basename for a new file/key where existing is
either a list of (basename, ext) pairs or an absolute path to
a directory.
By default, uniqueness is enforced depending on the state of
the unique_name parameter (for export names). If force is
True, this parameter is ignored and uniqueness is guaranteed. | Below is the the instruction that describes the task:
### Input:
Find a unique basename for a new file/key where existing is
either a list of (basename, ext) pairs or an absolute path to
a directory.
By default, uniqueness is enforced depending on the state of
the unique_name parameter (for export names). If force is
True, this parameter is ignored and uniqueness is guaranteed.
### Response:
def _unique_name(self, basename, ext, existing, force=False):
"""
Find a unique basename for a new file/key where existing is
either a list of (basename, ext) pairs or an absolute path to
a directory.
By default, uniqueness is enforced depending on the state of
the unique_name parameter (for export names). If force is
True, this parameter is ignored and uniqueness is guaranteed.
"""
skip = False if force else (not self.unique_name)
if skip: return (basename, ext)
ext = '' if ext is None else ext
if isinstance(existing, str):
split = [os.path.splitext(el)
for el in os.listdir(os.path.abspath(existing))]
existing = [(n, ex if not ex else ex[1:]) for (n, ex) in split]
new_name, counter = basename, 1
while (new_name, ext) in existing:
new_name = basename+'-'+str(counter)
counter += 1
return (sanitizer(new_name), ext) |
def find(command, on):
"""Find the command usage."""
output_lines = parse_man_page(command, on)
click.echo(''.join(output_lines)) | Find the command usage. | Below is the the instruction that describes the task:
### Input:
Find the command usage.
### Response:
def find(command, on):
"""Find the command usage."""
output_lines = parse_man_page(command, on)
click.echo(''.join(output_lines)) |
def obtain_all_devices(my_devices):
"""Dynamically create 'all' group."""
new_devices = {}
for device_name, device_or_group in my_devices.items():
# Skip any groups
if not isinstance(device_or_group, list):
new_devices[device_name] = device_or_group
return new_devices | Dynamically create 'all' group. | Below is the the instruction that describes the task:
### Input:
Dynamically create 'all' group.
### Response:
def obtain_all_devices(my_devices):
"""Dynamically create 'all' group."""
new_devices = {}
for device_name, device_or_group in my_devices.items():
# Skip any groups
if not isinstance(device_or_group, list):
new_devices[device_name] = device_or_group
return new_devices |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.