code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def emit(self,prob_min=0.0,prob_max=1.0):
"""
m.emit(,prob_min=0.0,prob_max=1.0) -- Consider motif as a generative model, and have it emit a sequence
"""
if not self.cumP:
for logcol in self.logP:
tups = []
for L in ACGT:
p = math.pow(2,logcol[L])
tups.append((p,L))
tups.sort()
cumu = []
tot = 0
for p,L in tups:
tot = tot + p
cumu.append((tot,L))
self.cumP.append(cumu)
s = []
#u = random()+0.01 #Can make higher for more consistent motifs
u = (prob_max-prob_min)*random() + prob_min
for cumu in self.cumP:
#u = random()+0.01 #Can make higher for more consistent motifs
last = 0
for p,L in cumu:
if last < u and u <= p:
letter = L
break
else: last = p
# print L,'%8.4f'%u,cumu
s.append(L)
#print ''.join(s)
return ''.join(s) | m.emit(,prob_min=0.0,prob_max=1.0) -- Consider motif as a generative model, and have it emit a sequence | Below is the the instruction that describes the task:
### Input:
m.emit(,prob_min=0.0,prob_max=1.0) -- Consider motif as a generative model, and have it emit a sequence
### Response:
def emit(self,prob_min=0.0,prob_max=1.0):
"""
m.emit(,prob_min=0.0,prob_max=1.0) -- Consider motif as a generative model, and have it emit a sequence
"""
if not self.cumP:
for logcol in self.logP:
tups = []
for L in ACGT:
p = math.pow(2,logcol[L])
tups.append((p,L))
tups.sort()
cumu = []
tot = 0
for p,L in tups:
tot = tot + p
cumu.append((tot,L))
self.cumP.append(cumu)
s = []
#u = random()+0.01 #Can make higher for more consistent motifs
u = (prob_max-prob_min)*random() + prob_min
for cumu in self.cumP:
#u = random()+0.01 #Can make higher for more consistent motifs
last = 0
for p,L in cumu:
if last < u and u <= p:
letter = L
break
else: last = p
# print L,'%8.4f'%u,cumu
s.append(L)
#print ''.join(s)
return ''.join(s) |
def to_mono(y):
'''Force an audio signal down to mono.
Parameters
----------
y : np.ndarray [shape=(2,n) or shape=(n,)]
audio time series, either stereo or mono
Returns
-------
y_mono : np.ndarray [shape=(n,)]
`y` as a monophonic time-series
Notes
-----
This function caches at level 20.
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file(), mono=False)
>>> y.shape
(2, 1355168)
>>> y_mono = librosa.to_mono(y)
>>> y_mono.shape
(1355168,)
'''
# Validate the buffer. Stereo is ok here.
util.valid_audio(y, mono=False)
if y.ndim > 1:
y = np.mean(y, axis=0)
return y | Force an audio signal down to mono.
Parameters
----------
y : np.ndarray [shape=(2,n) or shape=(n,)]
audio time series, either stereo or mono
Returns
-------
y_mono : np.ndarray [shape=(n,)]
`y` as a monophonic time-series
Notes
-----
This function caches at level 20.
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file(), mono=False)
>>> y.shape
(2, 1355168)
>>> y_mono = librosa.to_mono(y)
>>> y_mono.shape
(1355168,) | Below is the the instruction that describes the task:
### Input:
Force an audio signal down to mono.
Parameters
----------
y : np.ndarray [shape=(2,n) or shape=(n,)]
audio time series, either stereo or mono
Returns
-------
y_mono : np.ndarray [shape=(n,)]
`y` as a monophonic time-series
Notes
-----
This function caches at level 20.
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file(), mono=False)
>>> y.shape
(2, 1355168)
>>> y_mono = librosa.to_mono(y)
>>> y_mono.shape
(1355168,)
### Response:
def to_mono(y):
'''Force an audio signal down to mono.
Parameters
----------
y : np.ndarray [shape=(2,n) or shape=(n,)]
audio time series, either stereo or mono
Returns
-------
y_mono : np.ndarray [shape=(n,)]
`y` as a monophonic time-series
Notes
-----
This function caches at level 20.
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file(), mono=False)
>>> y.shape
(2, 1355168)
>>> y_mono = librosa.to_mono(y)
>>> y_mono.shape
(1355168,)
'''
# Validate the buffer. Stereo is ok here.
util.valid_audio(y, mono=False)
if y.ndim > 1:
y = np.mean(y, axis=0)
return y |
def measured_current(self):
"""
The measured current that the battery is supplying (in microamps)
"""
self._measured_current, value = self.get_attr_int(self._measured_current, 'current_now')
return value | The measured current that the battery is supplying (in microamps) | Below is the the instruction that describes the task:
### Input:
The measured current that the battery is supplying (in microamps)
### Response:
def measured_current(self):
"""
The measured current that the battery is supplying (in microamps)
"""
self._measured_current, value = self.get_attr_int(self._measured_current, 'current_now')
return value |
def update_from_dict(self, attribute_dict):
"""
Method overriden from the base class
"""
if 'template' in attribute_dict:
self.update_from_dict(attribute_dict['template'])
setattr(self,
self.Meta.id_field, attribute_dict['template']['name'])
return
try:
for k, v in attribute_dict.items():
setattr(self, k, v)
except Exception:
setattr(self, self.Meta.id_field, attribute_dict) | Method overriden from the base class | Below is the the instruction that describes the task:
### Input:
Method overriden from the base class
### Response:
def update_from_dict(self, attribute_dict):
"""
Method overriden from the base class
"""
if 'template' in attribute_dict:
self.update_from_dict(attribute_dict['template'])
setattr(self,
self.Meta.id_field, attribute_dict['template']['name'])
return
try:
for k, v in attribute_dict.items():
setattr(self, k, v)
except Exception:
setattr(self, self.Meta.id_field, attribute_dict) |
def my_init(self):
"""
Method automatically called from base class constructor.
"""
self._start_time = time.time()
self._stats = {}
self._stats_lock = threading.Lock() | Method automatically called from base class constructor. | Below is the the instruction that describes the task:
### Input:
Method automatically called from base class constructor.
### Response:
def my_init(self):
"""
Method automatically called from base class constructor.
"""
self._start_time = time.time()
self._stats = {}
self._stats_lock = threading.Lock() |
def SETGE(cpu, dest):
"""
Sets byte if greater or equal.
:param cpu: current CPU.
:param dest: destination operand.
"""
dest.write(Operators.ITEBV(dest.size, cpu.SF == cpu.OF, 1, 0)) | Sets byte if greater or equal.
:param cpu: current CPU.
:param dest: destination operand. | Below is the the instruction that describes the task:
### Input:
Sets byte if greater or equal.
:param cpu: current CPU.
:param dest: destination operand.
### Response:
def SETGE(cpu, dest):
"""
Sets byte if greater or equal.
:param cpu: current CPU.
:param dest: destination operand.
"""
dest.write(Operators.ITEBV(dest.size, cpu.SF == cpu.OF, 1, 0)) |
def _friendlyAuthError(fn):
''' Decorator to print a friendly you-are-not-authorised message. Use
**outside** the _handleAuth decorator to only print the message after
the user has been given a chance to login. '''
@functools.wraps(fn)
def wrapped(*args, **kwargs):
try:
return fn(*args, **kwargs)
except requests.exceptions.HTTPError as e:
if e.response.status_code == requests.codes.unauthorized: #pylint: disable=no-member
logger.error('insufficient permission')
elif e.response.status_code == requests.codes.bad and 'jwt has expired' in e.response.text.lower(): #pylint: disable=no-member
logger.error('server returned status %s: %s', e.response.status_code, e.response.text)
logger.error('Check that your system clock is set accurately!')
else:
logger.error('server returned status %s: %s', e.response.status_code, e.response.text)
raise
return wrapped | Decorator to print a friendly you-are-not-authorised message. Use
**outside** the _handleAuth decorator to only print the message after
the user has been given a chance to login. | Below is the the instruction that describes the task:
### Input:
Decorator to print a friendly you-are-not-authorised message. Use
**outside** the _handleAuth decorator to only print the message after
the user has been given a chance to login.
### Response:
def _friendlyAuthError(fn):
''' Decorator to print a friendly you-are-not-authorised message. Use
**outside** the _handleAuth decorator to only print the message after
the user has been given a chance to login. '''
@functools.wraps(fn)
def wrapped(*args, **kwargs):
try:
return fn(*args, **kwargs)
except requests.exceptions.HTTPError as e:
if e.response.status_code == requests.codes.unauthorized: #pylint: disable=no-member
logger.error('insufficient permission')
elif e.response.status_code == requests.codes.bad and 'jwt has expired' in e.response.text.lower(): #pylint: disable=no-member
logger.error('server returned status %s: %s', e.response.status_code, e.response.text)
logger.error('Check that your system clock is set accurately!')
else:
logger.error('server returned status %s: %s', e.response.status_code, e.response.text)
raise
return wrapped |
def rgb_to_vector(image):
"""
Convert an RGB ANTsImage to a Vector ANTsImage
Arguments
---------
image : ANTsImage
RGB image to be converted
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> mni = ants.image_read(ants.get_data('mni'))
>>> mni_rgb = mni.scalar_to_rgb()
>>> mni_vector = mni.rgb_to_vector()
>>> mni_rgb2 = mni.vector_to_rgb()
"""
if image.pixeltype != 'unsigned char':
image = image.clone('unsigned char')
idim = image.dimension
libfn = utils.get_lib_fn('RgbToVector%i' % idim)
new_ptr = libfn(image.pointer)
new_img = iio.ANTsImage(pixeltype=image.pixeltype, dimension=image.dimension,
components=3, pointer=new_ptr, is_rgb=False)
return new_img | Convert an RGB ANTsImage to a Vector ANTsImage
Arguments
---------
image : ANTsImage
RGB image to be converted
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> mni = ants.image_read(ants.get_data('mni'))
>>> mni_rgb = mni.scalar_to_rgb()
>>> mni_vector = mni.rgb_to_vector()
>>> mni_rgb2 = mni.vector_to_rgb() | Below is the the instruction that describes the task:
### Input:
Convert an RGB ANTsImage to a Vector ANTsImage
Arguments
---------
image : ANTsImage
RGB image to be converted
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> mni = ants.image_read(ants.get_data('mni'))
>>> mni_rgb = mni.scalar_to_rgb()
>>> mni_vector = mni.rgb_to_vector()
>>> mni_rgb2 = mni.vector_to_rgb()
### Response:
def rgb_to_vector(image):
"""
Convert an RGB ANTsImage to a Vector ANTsImage
Arguments
---------
image : ANTsImage
RGB image to be converted
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> mni = ants.image_read(ants.get_data('mni'))
>>> mni_rgb = mni.scalar_to_rgb()
>>> mni_vector = mni.rgb_to_vector()
>>> mni_rgb2 = mni.vector_to_rgb()
"""
if image.pixeltype != 'unsigned char':
image = image.clone('unsigned char')
idim = image.dimension
libfn = utils.get_lib_fn('RgbToVector%i' % idim)
new_ptr = libfn(image.pointer)
new_img = iio.ANTsImage(pixeltype=image.pixeltype, dimension=image.dimension,
components=3, pointer=new_ptr, is_rgb=False)
return new_img |
def connect(self):
"""Connects to the Graphite server if not already connected."""
if self.sock is not None:
return
backoff = 0.01
while True:
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(5)
sock.connect((self.host, self.port))
self.sock = sock
return
except socket.error:
time.sleep(random.uniform(0, 2.0*backoff))
backoff = min(backoff*2.0, 5.0) | Connects to the Graphite server if not already connected. | Below is the the instruction that describes the task:
### Input:
Connects to the Graphite server if not already connected.
### Response:
def connect(self):
"""Connects to the Graphite server if not already connected."""
if self.sock is not None:
return
backoff = 0.01
while True:
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(5)
sock.connect((self.host, self.port))
self.sock = sock
return
except socket.error:
time.sleep(random.uniform(0, 2.0*backoff))
backoff = min(backoff*2.0, 5.0) |
def tree_words(node):
"""Return all the significant text below the given node as a list of words.
>>> list(tree_words(parse_minidom('<h1>one</h1> two <div>three<em>four</em></div>')))
['one', 'two', 'three', 'four']
"""
for word in split_text(tree_text(node)):
word = word.strip()
if word:
yield word | Return all the significant text below the given node as a list of words.
>>> list(tree_words(parse_minidom('<h1>one</h1> two <div>three<em>four</em></div>')))
['one', 'two', 'three', 'four'] | Below is the the instruction that describes the task:
### Input:
Return all the significant text below the given node as a list of words.
>>> list(tree_words(parse_minidom('<h1>one</h1> two <div>three<em>four</em></div>')))
['one', 'two', 'three', 'four']
### Response:
def tree_words(node):
"""Return all the significant text below the given node as a list of words.
>>> list(tree_words(parse_minidom('<h1>one</h1> two <div>three<em>four</em></div>')))
['one', 'two', 'three', 'four']
"""
for word in split_text(tree_text(node)):
word = word.strip()
if word:
yield word |
def normal_check(name, status, device_type):
"""if the status is "ok" in the NORMAL_STATE dict, return ok + string
if the status is not "ok", return critical + string"""
status_string = NORMAL_STATE.get(int(status), "unknown")
if status_string == "ok":
return ok, "{} '{}': {}".format(device_type, name, status_string)
elif status_string == "unknown":
return unknown, "{} '{}': {}".format(device_type, name, status_string)
return critical, "{} '{}': {}".format(device_type, name, status_string) | if the status is "ok" in the NORMAL_STATE dict, return ok + string
if the status is not "ok", return critical + string | Below is the the instruction that describes the task:
### Input:
if the status is "ok" in the NORMAL_STATE dict, return ok + string
if the status is not "ok", return critical + string
### Response:
def normal_check(name, status, device_type):
"""if the status is "ok" in the NORMAL_STATE dict, return ok + string
if the status is not "ok", return critical + string"""
status_string = NORMAL_STATE.get(int(status), "unknown")
if status_string == "ok":
return ok, "{} '{}': {}".format(device_type, name, status_string)
elif status_string == "unknown":
return unknown, "{} '{}': {}".format(device_type, name, status_string)
return critical, "{} '{}': {}".format(device_type, name, status_string) |
def _write_cache(self, lines, append=False):
"""Write virtualenv metadata to cache."""
mode = 'at' if append else 'wt'
with open(self.filepath, mode, encoding='utf8') as fh:
fh.writelines(line + '\n' for line in lines) | Write virtualenv metadata to cache. | Below is the the instruction that describes the task:
### Input:
Write virtualenv metadata to cache.
### Response:
def _write_cache(self, lines, append=False):
"""Write virtualenv metadata to cache."""
mode = 'at' if append else 'wt'
with open(self.filepath, mode, encoding='utf8') as fh:
fh.writelines(line + '\n' for line in lines) |
def raise_or_lock(self, key, timeout):
"""
Checks if the task is locked and raises an exception, else locks
the task. By default, the tasks and the key expire after 60 minutes.
(meaning it will not be executed and the lock will clear).
"""
acquired = Lock(
self.redis,
key,
timeout=timeout,
blocking=self.blocking,
blocking_timeout=self.blocking_timeout
).acquire()
if not acquired:
# Time remaining in milliseconds
# https://redis.io/commands/pttl
ttl = self.redis.pttl(key)
raise AlreadyQueued(ttl / 1000.) | Checks if the task is locked and raises an exception, else locks
the task. By default, the tasks and the key expire after 60 minutes.
(meaning it will not be executed and the lock will clear). | Below is the the instruction that describes the task:
### Input:
Checks if the task is locked and raises an exception, else locks
the task. By default, the tasks and the key expire after 60 minutes.
(meaning it will not be executed and the lock will clear).
### Response:
def raise_or_lock(self, key, timeout):
"""
Checks if the task is locked and raises an exception, else locks
the task. By default, the tasks and the key expire after 60 minutes.
(meaning it will not be executed and the lock will clear).
"""
acquired = Lock(
self.redis,
key,
timeout=timeout,
blocking=self.blocking,
blocking_timeout=self.blocking_timeout
).acquire()
if not acquired:
# Time remaining in milliseconds
# https://redis.io/commands/pttl
ttl = self.redis.pttl(key)
raise AlreadyQueued(ttl / 1000.) |
def get_pourbaix_entries(self, chemsys):
"""
A helper function to get all entries necessary to generate
a pourbaix diagram from the rest interface.
Args:
chemsys ([str]): A list of elements comprising the chemical
system, e.g. ['Li', 'Fe']
"""
from pymatgen.analysis.pourbaix_diagram import PourbaixEntry, IonEntry
from pymatgen.analysis.phase_diagram import PhaseDiagram
from pymatgen.core.ion import Ion
from pymatgen.entries.compatibility import \
MaterialsProjectAqueousCompatibility
pbx_entries = []
# Get ion entries first, because certain ions have reference
# solids that aren't necessarily in the chemsys (Na2SO4)
url = '/pourbaix_diagram/reference_data/' + '-'.join(chemsys)
ion_data = self._make_request(url)
ion_ref_comps = [Composition(d['Reference Solid']) for d in ion_data]
ion_ref_elts = list(itertools.chain.from_iterable(
i.elements for i in ion_ref_comps))
ion_ref_entries = self.get_entries_in_chemsys(
list(set([str(e) for e in ion_ref_elts] + ['O', 'H'])),
property_data=['e_above_hull'], compatible_only=False)
compat = MaterialsProjectAqueousCompatibility("Advanced")
ion_ref_entries = compat.process_entries(ion_ref_entries)
ion_ref_pd = PhaseDiagram(ion_ref_entries)
# position the ion energies relative to most stable reference state
for n, i_d in enumerate(ion_data):
ion_entry = IonEntry(Ion.from_formula(i_d['Name']), i_d['Energy'])
refs = [e for e in ion_ref_entries
if e.composition.reduced_formula == i_d['Reference Solid']]
if not refs:
raise ValueError("Reference solid not contained in entry list")
stable_ref = sorted(refs, key=lambda x: x.data['e_above_hull'])[0]
rf = stable_ref.composition.get_reduced_composition_and_factor()[1]
solid_diff = ion_ref_pd.get_form_energy(stable_ref) \
- i_d['Reference solid energy'] * rf
elt = i_d['Major_Elements'][0]
correction_factor = ion_entry.ion.composition[elt] \
/ stable_ref.composition[elt]
ion_entry.energy += solid_diff * correction_factor
pbx_entries.append(PourbaixEntry(ion_entry, 'ion-{}'.format(n)))
# Construct the solid pourbaix entries from filtered ion_ref entries
extra_elts = set(ion_ref_elts) - {Element(s) for s in chemsys} \
- {Element('H'), Element('O')}
for entry in ion_ref_entries:
entry_elts = set(entry.composition.elements)
# Ensure no OH chemsys or extraneous elements from ion references
if not (entry_elts <= {Element('H'), Element('O')} or \
extra_elts.intersection(entry_elts)):
# replace energy with formation energy, use dict to
# avoid messing with the ion_ref_pd and to keep all old params
form_e = ion_ref_pd.get_form_energy(entry)
new_entry = deepcopy(entry)
new_entry.uncorrected_energy = form_e
new_entry.correction = 0.0
pbx_entry = PourbaixEntry(new_entry)
pbx_entries.append(pbx_entry)
return pbx_entries | A helper function to get all entries necessary to generate
a pourbaix diagram from the rest interface.
Args:
chemsys ([str]): A list of elements comprising the chemical
system, e.g. ['Li', 'Fe'] | Below is the the instruction that describes the task:
### Input:
A helper function to get all entries necessary to generate
a pourbaix diagram from the rest interface.
Args:
chemsys ([str]): A list of elements comprising the chemical
system, e.g. ['Li', 'Fe']
### Response:
def get_pourbaix_entries(self, chemsys):
"""
A helper function to get all entries necessary to generate
a pourbaix diagram from the rest interface.
Args:
chemsys ([str]): A list of elements comprising the chemical
system, e.g. ['Li', 'Fe']
"""
from pymatgen.analysis.pourbaix_diagram import PourbaixEntry, IonEntry
from pymatgen.analysis.phase_diagram import PhaseDiagram
from pymatgen.core.ion import Ion
from pymatgen.entries.compatibility import \
MaterialsProjectAqueousCompatibility
pbx_entries = []
# Get ion entries first, because certain ions have reference
# solids that aren't necessarily in the chemsys (Na2SO4)
url = '/pourbaix_diagram/reference_data/' + '-'.join(chemsys)
ion_data = self._make_request(url)
ion_ref_comps = [Composition(d['Reference Solid']) for d in ion_data]
ion_ref_elts = list(itertools.chain.from_iterable(
i.elements for i in ion_ref_comps))
ion_ref_entries = self.get_entries_in_chemsys(
list(set([str(e) for e in ion_ref_elts] + ['O', 'H'])),
property_data=['e_above_hull'], compatible_only=False)
compat = MaterialsProjectAqueousCompatibility("Advanced")
ion_ref_entries = compat.process_entries(ion_ref_entries)
ion_ref_pd = PhaseDiagram(ion_ref_entries)
# position the ion energies relative to most stable reference state
for n, i_d in enumerate(ion_data):
ion_entry = IonEntry(Ion.from_formula(i_d['Name']), i_d['Energy'])
refs = [e for e in ion_ref_entries
if e.composition.reduced_formula == i_d['Reference Solid']]
if not refs:
raise ValueError("Reference solid not contained in entry list")
stable_ref = sorted(refs, key=lambda x: x.data['e_above_hull'])[0]
rf = stable_ref.composition.get_reduced_composition_and_factor()[1]
solid_diff = ion_ref_pd.get_form_energy(stable_ref) \
- i_d['Reference solid energy'] * rf
elt = i_d['Major_Elements'][0]
correction_factor = ion_entry.ion.composition[elt] \
/ stable_ref.composition[elt]
ion_entry.energy += solid_diff * correction_factor
pbx_entries.append(PourbaixEntry(ion_entry, 'ion-{}'.format(n)))
# Construct the solid pourbaix entries from filtered ion_ref entries
extra_elts = set(ion_ref_elts) - {Element(s) for s in chemsys} \
- {Element('H'), Element('O')}
for entry in ion_ref_entries:
entry_elts = set(entry.composition.elements)
# Ensure no OH chemsys or extraneous elements from ion references
if not (entry_elts <= {Element('H'), Element('O')} or \
extra_elts.intersection(entry_elts)):
# replace energy with formation energy, use dict to
# avoid messing with the ion_ref_pd and to keep all old params
form_e = ion_ref_pd.get_form_energy(entry)
new_entry = deepcopy(entry)
new_entry.uncorrected_energy = form_e
new_entry.correction = 0.0
pbx_entry = PourbaixEntry(new_entry)
pbx_entries.append(pbx_entry)
return pbx_entries |
def read(self, n=None):
"""Read at most *n* characters from this stream.
If *n* is ``None``, return all available characters.
"""
response = b""
while len(self.streams) > 0 and (n is None or n > 0):
txt = self.streams[0].read(n)
response += txt
if n is not None:
n -= len(txt)
if n is None or n > 0:
del self.streams[0]
return response | Read at most *n* characters from this stream.
If *n* is ``None``, return all available characters. | Below is the the instruction that describes the task:
### Input:
Read at most *n* characters from this stream.
If *n* is ``None``, return all available characters.
### Response:
def read(self, n=None):
"""Read at most *n* characters from this stream.
If *n* is ``None``, return all available characters.
"""
response = b""
while len(self.streams) > 0 and (n is None or n > 0):
txt = self.streams[0].read(n)
response += txt
if n is not None:
n -= len(txt)
if n is None or n > 0:
del self.streams[0]
return response |
def new(self, event):
"""Creates a new spreadsheet. Expects code_array in event."""
# Grid table handles interaction to code_array
self.grid.actions.clear(event.shape)
_grid_table = GridTable(self.grid, self.grid.code_array)
self.grid.SetTable(_grid_table, True)
# Update toolbars
self.grid.update_entry_line()
self.grid.update_attribute_toolbar() | Creates a new spreadsheet. Expects code_array in event. | Below is the the instruction that describes the task:
### Input:
Creates a new spreadsheet. Expects code_array in event.
### Response:
def new(self, event):
"""Creates a new spreadsheet. Expects code_array in event."""
# Grid table handles interaction to code_array
self.grid.actions.clear(event.shape)
_grid_table = GridTable(self.grid, self.grid.code_array)
self.grid.SetTable(_grid_table, True)
# Update toolbars
self.grid.update_entry_line()
self.grid.update_attribute_toolbar() |
def __setIncludes(self, schema):
"""Add dictionary of includes to schema instance.
schema -- XMLSchema instance
"""
for schemaLocation, val in schema.includes.items():
if self._includes.has_key(schemaLocation):
schema.addIncludeSchema(schemaLocation, self._imports[schemaLocation]) | Add dictionary of includes to schema instance.
schema -- XMLSchema instance | Below is the the instruction that describes the task:
### Input:
Add dictionary of includes to schema instance.
schema -- XMLSchema instance
### Response:
def __setIncludes(self, schema):
"""Add dictionary of includes to schema instance.
schema -- XMLSchema instance
"""
for schemaLocation, val in schema.includes.items():
if self._includes.has_key(schemaLocation):
schema.addIncludeSchema(schemaLocation, self._imports[schemaLocation]) |
def get_absolute_url_link(self, text=None, cls=None, icon_class=None,
**attrs):
"""Gets the html link for the object."""
if text is None:
text = self.get_link_text()
return build_link(href=self.get_absolute_url(),
text=text,
cls=cls,
icon_class=icon_class,
**attrs) | Gets the html link for the object. | Below is the the instruction that describes the task:
### Input:
Gets the html link for the object.
### Response:
def get_absolute_url_link(self, text=None, cls=None, icon_class=None,
**attrs):
"""Gets the html link for the object."""
if text is None:
text = self.get_link_text()
return build_link(href=self.get_absolute_url(),
text=text,
cls=cls,
icon_class=icon_class,
**attrs) |
def rotate_and_detach_tab_labels(self):
"""Rotates tab labels of a given notebook by 90 degrees and makes them detachable.
:param notebook: GTK Notebook container, whose tab labels are to be rotated and made detachable
"""
icons = {'Libraries': constants.SIGN_LIB, 'States Tree': constants.ICON_TREE,
'Global Variables': constants.ICON_GLOB, 'Modification History': constants.ICON_HIST,
'Execution History': constants.ICON_EHIST, 'network': constants.ICON_NET}
for notebook in self.left_bar_notebooks:
for i in range(notebook.get_n_pages()):
child = notebook.get_nth_page(i)
tab_label = notebook.get_tab_label(child)
tab_label_text = tab_label.get_text()
notebook.set_tab_label(child, gui_helper_label.create_tab_header_label(tab_label_text, icons))
notebook.set_tab_reorderable(child, True)
notebook.set_tab_detachable(child, True) | Rotates tab labels of a given notebook by 90 degrees and makes them detachable.
:param notebook: GTK Notebook container, whose tab labels are to be rotated and made detachable | Below is the the instruction that describes the task:
### Input:
Rotates tab labels of a given notebook by 90 degrees and makes them detachable.
:param notebook: GTK Notebook container, whose tab labels are to be rotated and made detachable
### Response:
def rotate_and_detach_tab_labels(self):
"""Rotates tab labels of a given notebook by 90 degrees and makes them detachable.
:param notebook: GTK Notebook container, whose tab labels are to be rotated and made detachable
"""
icons = {'Libraries': constants.SIGN_LIB, 'States Tree': constants.ICON_TREE,
'Global Variables': constants.ICON_GLOB, 'Modification History': constants.ICON_HIST,
'Execution History': constants.ICON_EHIST, 'network': constants.ICON_NET}
for notebook in self.left_bar_notebooks:
for i in range(notebook.get_n_pages()):
child = notebook.get_nth_page(i)
tab_label = notebook.get_tab_label(child)
tab_label_text = tab_label.get_text()
notebook.set_tab_label(child, gui_helper_label.create_tab_header_label(tab_label_text, icons))
notebook.set_tab_reorderable(child, True)
notebook.set_tab_detachable(child, True) |
def monitor_resource_sync_state(resource, callback, exit_event=None):
"""Coroutine that monitors a KATCPResource's sync state.
Calls callback(True/False) whenever the resource becomes synced or unsynced. Will
always do an initial callback(False) call. Exits without calling callback() if
exit_event is set
"""
exit_event = exit_event or AsyncEvent()
callback(False) # Initial condition, assume resource is not connected
while not exit_event.is_set():
# Wait for resource to be synced
yield until_any(resource.until_synced(), exit_event.until_set())
if exit_event.is_set():
break # If exit event is set we stop without calling callback
else:
callback(True)
# Wait for resource to be un-synced
yield until_any(resource.until_not_synced(), exit_event.until_set())
if exit_event.is_set():
break # If exit event is set we stop without calling callback
else:
callback(False) | Coroutine that monitors a KATCPResource's sync state.
Calls callback(True/False) whenever the resource becomes synced or unsynced. Will
always do an initial callback(False) call. Exits without calling callback() if
exit_event is set | Below is the the instruction that describes the task:
### Input:
Coroutine that monitors a KATCPResource's sync state.
Calls callback(True/False) whenever the resource becomes synced or unsynced. Will
always do an initial callback(False) call. Exits without calling callback() if
exit_event is set
### Response:
def monitor_resource_sync_state(resource, callback, exit_event=None):
"""Coroutine that monitors a KATCPResource's sync state.
Calls callback(True/False) whenever the resource becomes synced or unsynced. Will
always do an initial callback(False) call. Exits without calling callback() if
exit_event is set
"""
exit_event = exit_event or AsyncEvent()
callback(False) # Initial condition, assume resource is not connected
while not exit_event.is_set():
# Wait for resource to be synced
yield until_any(resource.until_synced(), exit_event.until_set())
if exit_event.is_set():
break # If exit event is set we stop without calling callback
else:
callback(True)
# Wait for resource to be un-synced
yield until_any(resource.until_not_synced(), exit_event.until_set())
if exit_event.is_set():
break # If exit event is set we stop without calling callback
else:
callback(False) |
def _add_boundaries(self, interval):
"""
Records the boundaries of the interval in the boundary table.
"""
begin = interval.begin
end = interval.end
if begin in self.boundary_table:
self.boundary_table[begin] += 1
else:
self.boundary_table[begin] = 1
if end in self.boundary_table:
self.boundary_table[end] += 1
else:
self.boundary_table[end] = 1 | Records the boundaries of the interval in the boundary table. | Below is the the instruction that describes the task:
### Input:
Records the boundaries of the interval in the boundary table.
### Response:
def _add_boundaries(self, interval):
"""
Records the boundaries of the interval in the boundary table.
"""
begin = interval.begin
end = interval.end
if begin in self.boundary_table:
self.boundary_table[begin] += 1
else:
self.boundary_table[begin] = 1
if end in self.boundary_table:
self.boundary_table[end] += 1
else:
self.boundary_table[end] = 1 |
def check(func):
"""
Check the permissions, http method and login state.
"""
def iCheck(request, *args, **kwargs):
if not request.method == "POST":
return HttpResponseBadRequest("Must be POST request.")
follow = func(request, *args, **kwargs)
if request.is_ajax():
return HttpResponse('ok')
try:
if 'next' in request.GET:
return HttpResponseRedirect(request.GET.get('next'))
if 'next' in request.POST:
return HttpResponseRedirect(request.POST.get('next'))
return HttpResponseRedirect(follow.target.get_absolute_url())
except (AttributeError, TypeError):
if 'HTTP_REFERER' in request.META:
return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))
if follow:
return HttpResponseServerError('"%s" object of type ``%s`` has no method ``get_absolute_url()``.' % (
unicode(follow.target), follow.target.__class__))
return HttpResponseServerError('No follow object and `next` parameter found.')
return iCheck | Check the permissions, http method and login state. | Below is the the instruction that describes the task:
### Input:
Check the permissions, http method and login state.
### Response:
def check(func):
"""
Check the permissions, http method and login state.
"""
def iCheck(request, *args, **kwargs):
if not request.method == "POST":
return HttpResponseBadRequest("Must be POST request.")
follow = func(request, *args, **kwargs)
if request.is_ajax():
return HttpResponse('ok')
try:
if 'next' in request.GET:
return HttpResponseRedirect(request.GET.get('next'))
if 'next' in request.POST:
return HttpResponseRedirect(request.POST.get('next'))
return HttpResponseRedirect(follow.target.get_absolute_url())
except (AttributeError, TypeError):
if 'HTTP_REFERER' in request.META:
return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))
if follow:
return HttpResponseServerError('"%s" object of type ``%s`` has no method ``get_absolute_url()``.' % (
unicode(follow.target), follow.target.__class__))
return HttpResponseServerError('No follow object and `next` parameter found.')
return iCheck |
def __notify_listeners(self, data, stats):
""" notify all listeners about aggregate data and stats """
for listener in self.listeners:
listener.on_aggregated_data(data, stats) | notify all listeners about aggregate data and stats | Below is the the instruction that describes the task:
### Input:
notify all listeners about aggregate data and stats
### Response:
def __notify_listeners(self, data, stats):
""" notify all listeners about aggregate data and stats """
for listener in self.listeners:
listener.on_aggregated_data(data, stats) |
def ReadClientStats(self,
client_id,
min_timestamp,
max_timestamp,
cursor=None):
"""Reads ClientStats for a given client and time range."""
cursor.execute(
"""
SELECT payload FROM client_stats
WHERE client_id = %s
AND timestamp BETWEEN FROM_UNIXTIME(%s) AND FROM_UNIXTIME(%s)
ORDER BY timestamp ASC
""", [
db_utils.ClientIDToInt(client_id),
mysql_utils.RDFDatetimeToTimestamp(min_timestamp),
mysql_utils.RDFDatetimeToTimestamp(max_timestamp)
])
return [
rdf_client_stats.ClientStats.FromSerializedString(stats_bytes)
for stats_bytes, in cursor.fetchall()
] | Reads ClientStats for a given client and time range. | Below is the the instruction that describes the task:
### Input:
Reads ClientStats for a given client and time range.
### Response:
def ReadClientStats(self,
client_id,
min_timestamp,
max_timestamp,
cursor=None):
"""Reads ClientStats for a given client and time range."""
cursor.execute(
"""
SELECT payload FROM client_stats
WHERE client_id = %s
AND timestamp BETWEEN FROM_UNIXTIME(%s) AND FROM_UNIXTIME(%s)
ORDER BY timestamp ASC
""", [
db_utils.ClientIDToInt(client_id),
mysql_utils.RDFDatetimeToTimestamp(min_timestamp),
mysql_utils.RDFDatetimeToTimestamp(max_timestamp)
])
return [
rdf_client_stats.ClientStats.FromSerializedString(stats_bytes)
for stats_bytes, in cursor.fetchall()
] |
def speciate(self, config, population, generation):
"""
Place genomes into species by genetic similarity.
Note that this method assumes the current representatives of the species are from the old
generation, and that after speciation has been performed, the old representatives should be
dropped and replaced with representatives from the new generation. If you violate this
assumption, you should make sure other necessary parts of the code are updated to reflect
the new behavior.
"""
assert isinstance(population, dict)
compatibility_threshold = self.species_set_config.compatibility_threshold
# Find the best representatives for each existing species.
unspeciated = set(iterkeys(population))
distances = GenomeDistanceCache(config.genome_config)
new_representatives = {}
new_members = {}
for sid, s in iteritems(self.species):
candidates = []
for gid in unspeciated:
g = population[gid]
d = distances(s.representative, g)
candidates.append((d, g))
# The new representative is the genome closest to the current representative.
ignored_rdist, new_rep = min(candidates, key=lambda x: x[0])
new_rid = new_rep.key
new_representatives[sid] = new_rid
new_members[sid] = [new_rid]
unspeciated.remove(new_rid)
# Partition population into species based on genetic similarity.
while unspeciated:
gid = unspeciated.pop()
g = population[gid]
# Find the species with the most similar representative.
candidates = []
for sid, rid in iteritems(new_representatives):
rep = population[rid]
d = distances(rep, g)
if d < compatibility_threshold:
candidates.append((d, sid))
if candidates:
ignored_sdist, sid = min(candidates, key=lambda x: x[0])
new_members[sid].append(gid)
else:
# No species is similar enough, create a new species, using
# this genome as its representative.
sid = next(self.indexer)
new_representatives[sid] = gid
new_members[sid] = [gid]
# Update species collection based on new speciation.
self.genome_to_species = {}
for sid, rid in iteritems(new_representatives):
s = self.species.get(sid)
if s is None:
s = Species(sid, generation)
self.species[sid] = s
members = new_members[sid]
for gid in members:
self.genome_to_species[gid] = sid
member_dict = dict((gid, population[gid]) for gid in members)
s.update(population[rid], member_dict)
gdmean = mean(itervalues(distances.distances))
gdstdev = stdev(itervalues(distances.distances))
self.reporters.info(
'Mean genetic distance {0:.3f}, standard deviation {1:.3f}'.format(gdmean, gdstdev)) | Place genomes into species by genetic similarity.
Note that this method assumes the current representatives of the species are from the old
generation, and that after speciation has been performed, the old representatives should be
dropped and replaced with representatives from the new generation. If you violate this
assumption, you should make sure other necessary parts of the code are updated to reflect
the new behavior. | Below is the the instruction that describes the task:
### Input:
Place genomes into species by genetic similarity.
Note that this method assumes the current representatives of the species are from the old
generation, and that after speciation has been performed, the old representatives should be
dropped and replaced with representatives from the new generation. If you violate this
assumption, you should make sure other necessary parts of the code are updated to reflect
the new behavior.
### Response:
def speciate(self, config, population, generation):
"""
Place genomes into species by genetic similarity.
Note that this method assumes the current representatives of the species are from the old
generation, and that after speciation has been performed, the old representatives should be
dropped and replaced with representatives from the new generation. If you violate this
assumption, you should make sure other necessary parts of the code are updated to reflect
the new behavior.
"""
assert isinstance(population, dict)
compatibility_threshold = self.species_set_config.compatibility_threshold
# Find the best representatives for each existing species.
unspeciated = set(iterkeys(population))
distances = GenomeDistanceCache(config.genome_config)
new_representatives = {}
new_members = {}
for sid, s in iteritems(self.species):
candidates = []
for gid in unspeciated:
g = population[gid]
d = distances(s.representative, g)
candidates.append((d, g))
# The new representative is the genome closest to the current representative.
ignored_rdist, new_rep = min(candidates, key=lambda x: x[0])
new_rid = new_rep.key
new_representatives[sid] = new_rid
new_members[sid] = [new_rid]
unspeciated.remove(new_rid)
# Partition population into species based on genetic similarity.
while unspeciated:
gid = unspeciated.pop()
g = population[gid]
# Find the species with the most similar representative.
candidates = []
for sid, rid in iteritems(new_representatives):
rep = population[rid]
d = distances(rep, g)
if d < compatibility_threshold:
candidates.append((d, sid))
if candidates:
ignored_sdist, sid = min(candidates, key=lambda x: x[0])
new_members[sid].append(gid)
else:
# No species is similar enough, create a new species, using
# this genome as its representative.
sid = next(self.indexer)
new_representatives[sid] = gid
new_members[sid] = [gid]
# Update species collection based on new speciation.
self.genome_to_species = {}
for sid, rid in iteritems(new_representatives):
s = self.species.get(sid)
if s is None:
s = Species(sid, generation)
self.species[sid] = s
members = new_members[sid]
for gid in members:
self.genome_to_species[gid] = sid
member_dict = dict((gid, population[gid]) for gid in members)
s.update(population[rid], member_dict)
gdmean = mean(itervalues(distances.distances))
gdstdev = stdev(itervalues(distances.distances))
self.reporters.info(
'Mean genetic distance {0:.3f}, standard deviation {1:.3f}'.format(gdmean, gdstdev)) |
def _update_geography(self, countries, regions, cities, city_country_mapping):
""" Update database with new countries, regions and cities """
existing = {
'cities': list(City.objects.values_list('id', flat=True)),
'regions': list(Region.objects.values('name', 'country__code')),
'countries': Country.objects.values_list('code', flat=True)
}
for country_code in countries:
if country_code not in existing['countries']:
Country.objects.create(code=country_code, name=ISO_CODES.get(country_code, country_code))
for entry in regions:
if entry not in existing['regions']:
Region.objects.create(name=entry['name'], country_id=entry['country__code'])
for entry in cities:
if int(entry['id']) not in existing['cities']:
code = city_country_mapping.get(entry['id'])
if code:
region = Region.objects.get(name=entry['region__name'], country__code=code)
City.objects.create(id=entry['id'], name=entry['name'], region=region,
latitude=entry.get('latitude'), longitude=entry.get('longitude')) | Update database with new countries, regions and cities | Below is the the instruction that describes the task:
### Input:
Update database with new countries, regions and cities
### Response:
def _update_geography(self, countries, regions, cities, city_country_mapping):
""" Update database with new countries, regions and cities """
existing = {
'cities': list(City.objects.values_list('id', flat=True)),
'regions': list(Region.objects.values('name', 'country__code')),
'countries': Country.objects.values_list('code', flat=True)
}
for country_code in countries:
if country_code not in existing['countries']:
Country.objects.create(code=country_code, name=ISO_CODES.get(country_code, country_code))
for entry in regions:
if entry not in existing['regions']:
Region.objects.create(name=entry['name'], country_id=entry['country__code'])
for entry in cities:
if int(entry['id']) not in existing['cities']:
code = city_country_mapping.get(entry['id'])
if code:
region = Region.objects.get(name=entry['region__name'], country__code=code)
City.objects.create(id=entry['id'], name=entry['name'], region=region,
latitude=entry.get('latitude'), longitude=entry.get('longitude')) |
def body_template(self, value):
"""
Must be an instance of a prestans.types.DataCollection subclass; this is
generally set during the RequestHandler lifecycle. Setting this spwans the
parsing process of the body. If the HTTP verb is GET an AssertionError is
thrown. Use with extreme caution.
"""
if self.method == VERB.GET:
raise AssertionError("body_template cannot be set for GET requests")
if value is None:
self.logger.warning("body_template is None, parsing will be ignored")
return
if not isinstance(value, DataCollection):
msg = "body_template must be an instance of %s.%s" % (
DataCollection.__module__,
DataCollection.__name__
)
raise AssertionError(msg)
self._body_template = value
# get a deserializer based on the Content-Type header
# do this here so the handler gets a chance to setup extra serializers
self.set_deserializer_by_mime_type(self.content_type) | Must be an instance of a prestans.types.DataCollection subclass; this is
generally set during the RequestHandler lifecycle. Setting this spwans the
parsing process of the body. If the HTTP verb is GET an AssertionError is
thrown. Use with extreme caution. | Below is the the instruction that describes the task:
### Input:
Must be an instance of a prestans.types.DataCollection subclass; this is
generally set during the RequestHandler lifecycle. Setting this spwans the
parsing process of the body. If the HTTP verb is GET an AssertionError is
thrown. Use with extreme caution.
### Response:
def body_template(self, value):
"""
Must be an instance of a prestans.types.DataCollection subclass; this is
generally set during the RequestHandler lifecycle. Setting this spwans the
parsing process of the body. If the HTTP verb is GET an AssertionError is
thrown. Use with extreme caution.
"""
if self.method == VERB.GET:
raise AssertionError("body_template cannot be set for GET requests")
if value is None:
self.logger.warning("body_template is None, parsing will be ignored")
return
if not isinstance(value, DataCollection):
msg = "body_template must be an instance of %s.%s" % (
DataCollection.__module__,
DataCollection.__name__
)
raise AssertionError(msg)
self._body_template = value
# get a deserializer based on the Content-Type header
# do this here so the handler gets a chance to setup extra serializers
self.set_deserializer_by_mime_type(self.content_type) |
def stop(name=None, backdate=None,
unique=None, keep_subdivisions=None, quick_print=None,
un=None, ks=None, qp=None):
"""
Mark the end of timing. Optionally performs a stamp, hence accepts the
same arguments.
Notes:
If keeping subdivisions and not calling a stamp, any awaiting subdivisions
will be assigned to a special 'UNASSIGNED' position to indicate that they
are not properly accounted for in the hierarchy (these can happen at
different places and may be combined inadvertently).
Backdating: For subdivisions only. Backdate time must be in the past
but more recent than the latest stamp.
Args:
name (any, optional): If used, passed to a call to stamp()
backdate (float, optional): time to use for stop instead of current
unique (bool, optional): see stamp()
keep_subdivisions (bool, optional): keep awaiting subdivisions
quick_print (bool, optional): boolean, print total time
un (bool, optional): see stamp()
ks (bool, optional): see stamp()
qp (bool, optional): see stamp()
Returns:
float: The current time.
Raises:
BackdateError: If given backdate is out of range, or if used in root timer.
PausedError: If attempting stamp in paused timer.
StoppedError: If timer already stopped.
TypeError: If given backdate value is not type float.
"""
t = timer()
if f.t.stopped:
raise StoppedError("Timer already stopped.")
if backdate is None:
t_stop = t
else:
if f.t is f.root:
raise BackdateError("Cannot backdate stop of root timer.")
if not isinstance(backdate, float):
raise TypeError("Backdate must be type float.")
if backdate > t:
raise BackdateError("Cannot backdate to future time.")
if backdate < f.t.last_t:
raise BackdateError("Cannot backdate to time earlier than last stamp.")
t_stop = backdate
unique = SET['UN'] if (unique is None and un is None) else bool(unique or un) # bool(None) becomes False
keep_subdivisions = SET['KS'] if (keep_subdivisions is None and ks is None) else bool(keep_subdivisions or ks)
quick_print = SET['QP'] if (quick_print is None and qp is None) else bool(quick_print or qp)
if name is not None:
if f.t.paused:
raise PausedError("Cannot stamp paused timer.")
elapsed = t_stop - f.t.last_t
_stamp(name, elapsed, unique, keep_subdivisions, quick_print)
else:
times_priv.assign_subdivisions(UNASGN, keep_subdivisions)
for s in f.t.rgstr_stamps:
if s not in f.s.cum:
f.s.cum[s] = 0.
f.s.order.append(s)
if not f.t.paused:
f.t.tmp_total += t_stop - f.t.start_t
f.t.tmp_total -= f.t.self_cut
f.t.self_cut += timer() - t # AFTER subtraction from tmp_total, before dump
times_priv.dump_times()
f.t.stopped = True
if quick_print:
print("({}) Total: {:.4f}".format(f.t.name, f.r.total))
return t | Mark the end of timing. Optionally performs a stamp, hence accepts the
same arguments.
Notes:
If keeping subdivisions and not calling a stamp, any awaiting subdivisions
will be assigned to a special 'UNASSIGNED' position to indicate that they
are not properly accounted for in the hierarchy (these can happen at
different places and may be combined inadvertently).
Backdating: For subdivisions only. Backdate time must be in the past
but more recent than the latest stamp.
Args:
name (any, optional): If used, passed to a call to stamp()
backdate (float, optional): time to use for stop instead of current
unique (bool, optional): see stamp()
keep_subdivisions (bool, optional): keep awaiting subdivisions
quick_print (bool, optional): boolean, print total time
un (bool, optional): see stamp()
ks (bool, optional): see stamp()
qp (bool, optional): see stamp()
Returns:
float: The current time.
Raises:
BackdateError: If given backdate is out of range, or if used in root timer.
PausedError: If attempting stamp in paused timer.
StoppedError: If timer already stopped.
TypeError: If given backdate value is not type float. | Below is the the instruction that describes the task:
### Input:
Mark the end of timing. Optionally performs a stamp, hence accepts the
same arguments.
Notes:
If keeping subdivisions and not calling a stamp, any awaiting subdivisions
will be assigned to a special 'UNASSIGNED' position to indicate that they
are not properly accounted for in the hierarchy (these can happen at
different places and may be combined inadvertently).
Backdating: For subdivisions only. Backdate time must be in the past
but more recent than the latest stamp.
Args:
name (any, optional): If used, passed to a call to stamp()
backdate (float, optional): time to use for stop instead of current
unique (bool, optional): see stamp()
keep_subdivisions (bool, optional): keep awaiting subdivisions
quick_print (bool, optional): boolean, print total time
un (bool, optional): see stamp()
ks (bool, optional): see stamp()
qp (bool, optional): see stamp()
Returns:
float: The current time.
Raises:
BackdateError: If given backdate is out of range, or if used in root timer.
PausedError: If attempting stamp in paused timer.
StoppedError: If timer already stopped.
TypeError: If given backdate value is not type float.
### Response:
def stop(name=None, backdate=None,
unique=None, keep_subdivisions=None, quick_print=None,
un=None, ks=None, qp=None):
"""
Mark the end of timing. Optionally performs a stamp, hence accepts the
same arguments.
Notes:
If keeping subdivisions and not calling a stamp, any awaiting subdivisions
will be assigned to a special 'UNASSIGNED' position to indicate that they
are not properly accounted for in the hierarchy (these can happen at
different places and may be combined inadvertently).
Backdating: For subdivisions only. Backdate time must be in the past
but more recent than the latest stamp.
Args:
name (any, optional): If used, passed to a call to stamp()
backdate (float, optional): time to use for stop instead of current
unique (bool, optional): see stamp()
keep_subdivisions (bool, optional): keep awaiting subdivisions
quick_print (bool, optional): boolean, print total time
un (bool, optional): see stamp()
ks (bool, optional): see stamp()
qp (bool, optional): see stamp()
Returns:
float: The current time.
Raises:
BackdateError: If given backdate is out of range, or if used in root timer.
PausedError: If attempting stamp in paused timer.
StoppedError: If timer already stopped.
TypeError: If given backdate value is not type float.
"""
t = timer()
if f.t.stopped:
raise StoppedError("Timer already stopped.")
if backdate is None:
t_stop = t
else:
if f.t is f.root:
raise BackdateError("Cannot backdate stop of root timer.")
if not isinstance(backdate, float):
raise TypeError("Backdate must be type float.")
if backdate > t:
raise BackdateError("Cannot backdate to future time.")
if backdate < f.t.last_t:
raise BackdateError("Cannot backdate to time earlier than last stamp.")
t_stop = backdate
unique = SET['UN'] if (unique is None and un is None) else bool(unique or un) # bool(None) becomes False
keep_subdivisions = SET['KS'] if (keep_subdivisions is None and ks is None) else bool(keep_subdivisions or ks)
quick_print = SET['QP'] if (quick_print is None and qp is None) else bool(quick_print or qp)
if name is not None:
if f.t.paused:
raise PausedError("Cannot stamp paused timer.")
elapsed = t_stop - f.t.last_t
_stamp(name, elapsed, unique, keep_subdivisions, quick_print)
else:
times_priv.assign_subdivisions(UNASGN, keep_subdivisions)
for s in f.t.rgstr_stamps:
if s not in f.s.cum:
f.s.cum[s] = 0.
f.s.order.append(s)
if not f.t.paused:
f.t.tmp_total += t_stop - f.t.start_t
f.t.tmp_total -= f.t.self_cut
f.t.self_cut += timer() - t # AFTER subtraction from tmp_total, before dump
times_priv.dump_times()
f.t.stopped = True
if quick_print:
print("({}) Total: {:.4f}".format(f.t.name, f.r.total))
return t |
def convert_elementwise_div(
params, w_name, scope_name, inputs, layers, weights, names
):
"""
Convert elementwise multiplication.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers
"""
print('Converting elementwise_div ...')
if names == 'short':
tf_name = 'D' + random_string(7)
elif names == 'keep':
tf_name = w_name
else:
tf_name = w_name + str(random.random())
def target_layer(x):
layer = tf.div(
x[0],
x[1]
)
return layer
lambda_layer = keras.layers.Lambda(target_layer, name=tf_name)
layers[scope_name] = lambda_layer([layers[inputs[0]], layers[inputs[1]]]) | Convert elementwise multiplication.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers | Below is the the instruction that describes the task:
### Input:
Convert elementwise multiplication.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers
### Response:
def convert_elementwise_div(
params, w_name, scope_name, inputs, layers, weights, names
):
"""
Convert elementwise multiplication.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers
"""
print('Converting elementwise_div ...')
if names == 'short':
tf_name = 'D' + random_string(7)
elif names == 'keep':
tf_name = w_name
else:
tf_name = w_name + str(random.random())
def target_layer(x):
layer = tf.div(
x[0],
x[1]
)
return layer
lambda_layer = keras.layers.Lambda(target_layer, name=tf_name)
layers[scope_name] = lambda_layer([layers[inputs[0]], layers[inputs[1]]]) |
def aggregate(self, index):
"""Performs a groupby of the unique Columns by index, as constructed from self.df.
Args:
index (str, or pd.Index): Index or column name of self.df.
Returns:
pd.DataFrame: A dataframe, aggregated by index, that contains the result
of the various ColumnFunctions, and named accordingly.
"""
# deal with index as a string vs index as a index/MultiIndex
if isinstance(index, string_types):
col_df_grouped = self.col_df.groupby(self.df[index])
else:
self.col_df.index = pd.MultiIndex.from_arrays([self.df[i] for i in index])
col_df_grouped = self.col_df.groupby(level=index)
self.col_df.index = self.df.index
# perform the actual aggregation
self.reduced_df = pd.DataFrame({
colred: col_df_grouped[colred.column].agg(colred.agg_func)
for colred in self.column_reductions
})
# then apply the functions to produce the final dataframe
reduced_dfs = []
for cf in self.column_functions:
# each apply_and_name() calls get_reduced() with the column reductions it wants
reduced_dfs.append(cf.apply_and_name(self))
return pd.concat(reduced_dfs, axis=1) | Performs a groupby of the unique Columns by index, as constructed from self.df.
Args:
index (str, or pd.Index): Index or column name of self.df.
Returns:
pd.DataFrame: A dataframe, aggregated by index, that contains the result
of the various ColumnFunctions, and named accordingly. | Below is the the instruction that describes the task:
### Input:
Performs a groupby of the unique Columns by index, as constructed from self.df.
Args:
index (str, or pd.Index): Index or column name of self.df.
Returns:
pd.DataFrame: A dataframe, aggregated by index, that contains the result
of the various ColumnFunctions, and named accordingly.
### Response:
def aggregate(self, index):
"""Performs a groupby of the unique Columns by index, as constructed from self.df.
Args:
index (str, or pd.Index): Index or column name of self.df.
Returns:
pd.DataFrame: A dataframe, aggregated by index, that contains the result
of the various ColumnFunctions, and named accordingly.
"""
# deal with index as a string vs index as a index/MultiIndex
if isinstance(index, string_types):
col_df_grouped = self.col_df.groupby(self.df[index])
else:
self.col_df.index = pd.MultiIndex.from_arrays([self.df[i] for i in index])
col_df_grouped = self.col_df.groupby(level=index)
self.col_df.index = self.df.index
# perform the actual aggregation
self.reduced_df = pd.DataFrame({
colred: col_df_grouped[colred.column].agg(colred.agg_func)
for colred in self.column_reductions
})
# then apply the functions to produce the final dataframe
reduced_dfs = []
for cf in self.column_functions:
# each apply_and_name() calls get_reduced() with the column reductions it wants
reduced_dfs.append(cf.apply_and_name(self))
return pd.concat(reduced_dfs, axis=1) |
def get_smtlib_script_satisfiability(self, extra_constraints=(), extra_variables=()):
"""
Return an smt-lib script that check the satisfiability of the current constraints
:return string: smt-lib script
"""
try:
e_csts = self._solver_backend.convert_list(extra_constraints + tuple(self.constraints))
e_variables = self._solver_backend.convert_list(extra_variables)
variables, csts = self._solver_backend._get_all_vars_and_constraints(e_c=e_csts, e_v=e_variables)
return self._solver_backend._get_satisfiability_smt_script(csts, variables)
except BackendError as e:
raise ClaripyFrontendError("Backend error during smtlib script generation") from e | Return an smt-lib script that check the satisfiability of the current constraints
:return string: smt-lib script | Below is the the instruction that describes the task:
### Input:
Return an smt-lib script that check the satisfiability of the current constraints
:return string: smt-lib script
### Response:
def get_smtlib_script_satisfiability(self, extra_constraints=(), extra_variables=()):
"""
Return an smt-lib script that check the satisfiability of the current constraints
:return string: smt-lib script
"""
try:
e_csts = self._solver_backend.convert_list(extra_constraints + tuple(self.constraints))
e_variables = self._solver_backend.convert_list(extra_variables)
variables, csts = self._solver_backend._get_all_vars_and_constraints(e_c=e_csts, e_v=e_variables)
return self._solver_backend._get_satisfiability_smt_script(csts, variables)
except BackendError as e:
raise ClaripyFrontendError("Backend error during smtlib script generation") from e |
def get_last_result(self):
"""Read the last conversion result when in continuous conversion mode.
Will return a signed integer value.
"""
# Retrieve the conversion register value, convert to a signed int, and
# return it.
result = self._device.readList(ADS1x15_POINTER_CONVERSION, 2)
return self._conversion_value(result[1], result[0]) | Read the last conversion result when in continuous conversion mode.
Will return a signed integer value. | Below is the the instruction that describes the task:
### Input:
Read the last conversion result when in continuous conversion mode.
Will return a signed integer value.
### Response:
def get_last_result(self):
"""Read the last conversion result when in continuous conversion mode.
Will return a signed integer value.
"""
# Retrieve the conversion register value, convert to a signed int, and
# return it.
result = self._device.readList(ADS1x15_POINTER_CONVERSION, 2)
return self._conversion_value(result[1], result[0]) |
def createRooms( rm ):
"""
create rooms, using multiline string showing map layout
string contains symbols for the following:
A-Z, a-z indicate rooms, and rooms will be stored in a dictionary by
reference letter
-, | symbols indicate connection between rooms
<, >, ^, . symbols indicate one-way connection between rooms
"""
# start with empty dictionary of rooms
ret = {}
# look for room symbols, and initialize dictionary
# - exit room is always marked 'Z'
for c in rm:
if c in string.ascii_letters:
if c != "Z":
ret[c] = Room(c)
else:
ret[c] = Exit()
# scan through input string looking for connections between rooms
rows = rm.split("\n")
for row,line in enumerate(rows):
for col,c in enumerate(line):
if c in string.ascii_letters:
room = ret[c]
n = None
s = None
e = None
w = None
# look in neighboring cells for connection symbols (must take
# care to guard that neighboring cells exist before testing
# contents)
if col > 0 and line[col-1] in "<-":
other = line[col-2]
w = ret[other]
if col < len(line)-1 and line[col+1] in "->":
other = line[col+2]
e = ret[other]
if row > 1 and col < len(rows[row-1]) and rows[row-1][col] in '|^':
other = rows[row-2][col]
n = ret[other]
if row < len(rows)-1 and col < len(rows[row+1]) and rows[row+1][col] in '|.':
other = rows[row+2][col]
s = ret[other]
# set connections to neighboring rooms
room.doors=[n,s,e,w]
return ret | create rooms, using multiline string showing map layout
string contains symbols for the following:
A-Z, a-z indicate rooms, and rooms will be stored in a dictionary by
reference letter
-, | symbols indicate connection between rooms
<, >, ^, . symbols indicate one-way connection between rooms | Below is the the instruction that describes the task:
### Input:
create rooms, using multiline string showing map layout
string contains symbols for the following:
A-Z, a-z indicate rooms, and rooms will be stored in a dictionary by
reference letter
-, | symbols indicate connection between rooms
<, >, ^, . symbols indicate one-way connection between rooms
### Response:
def createRooms( rm ):
"""
create rooms, using multiline string showing map layout
string contains symbols for the following:
A-Z, a-z indicate rooms, and rooms will be stored in a dictionary by
reference letter
-, | symbols indicate connection between rooms
<, >, ^, . symbols indicate one-way connection between rooms
"""
# start with empty dictionary of rooms
ret = {}
# look for room symbols, and initialize dictionary
# - exit room is always marked 'Z'
for c in rm:
if c in string.ascii_letters:
if c != "Z":
ret[c] = Room(c)
else:
ret[c] = Exit()
# scan through input string looking for connections between rooms
rows = rm.split("\n")
for row,line in enumerate(rows):
for col,c in enumerate(line):
if c in string.ascii_letters:
room = ret[c]
n = None
s = None
e = None
w = None
# look in neighboring cells for connection symbols (must take
# care to guard that neighboring cells exist before testing
# contents)
if col > 0 and line[col-1] in "<-":
other = line[col-2]
w = ret[other]
if col < len(line)-1 and line[col+1] in "->":
other = line[col+2]
e = ret[other]
if row > 1 and col < len(rows[row-1]) and rows[row-1][col] in '|^':
other = rows[row-2][col]
n = ret[other]
if row < len(rows)-1 and col < len(rows[row+1]) and rows[row+1][col] in '|.':
other = rows[row+2][col]
s = ret[other]
# set connections to neighboring rooms
room.doors=[n,s,e,w]
return ret |
def write_chunk(outfile, tag, data=b''):
"""
Write a PNG chunk to the output file, including length and
checksum.
"""
data = bytes(data)
# http://www.w3.org/TR/PNG/#5Chunk-layout
outfile.write(struct.pack("!I", len(data)))
outfile.write(tag)
outfile.write(data)
checksum = zlib.crc32(tag)
checksum = zlib.crc32(data, checksum)
checksum &= 2 ** 32 - 1
outfile.write(struct.pack("!I", checksum)) | Write a PNG chunk to the output file, including length and
checksum. | Below is the the instruction that describes the task:
### Input:
Write a PNG chunk to the output file, including length and
checksum.
### Response:
def write_chunk(outfile, tag, data=b''):
"""
Write a PNG chunk to the output file, including length and
checksum.
"""
data = bytes(data)
# http://www.w3.org/TR/PNG/#5Chunk-layout
outfile.write(struct.pack("!I", len(data)))
outfile.write(tag)
outfile.write(data)
checksum = zlib.crc32(tag)
checksum = zlib.crc32(data, checksum)
checksum &= 2 ** 32 - 1
outfile.write(struct.pack("!I", checksum)) |
def extend_path(path, name):
"""Extend a package's path.
Intended use is to place the following code in a package's __init__.py:
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
This will add to the package's __path__ all subdirectories of
directories on sys.path named after the package. This is useful
if one wants to distribute different parts of a single logical
package as multiple directories.
It also looks for *.pkg files beginning where * matches the name
argument. This feature is similar to *.pth files (see site.py),
except that it doesn't special-case lines starting with 'import'.
A *.pkg file is trusted at face value: apart from checking for
duplicates, all entries found in a *.pkg file are added to the
path, regardless of whether they are exist the filesystem. (This
is a feature.)
If the input path is not a list (as is the case for frozen
packages) it is returned unchanged. The input path is not
modified; an extended copy is returned. Items are only appended
to the copy at the end.
It is assumed that sys.path is a sequence. Items of sys.path that
are not (unicode or 8-bit) strings referring to existing
directories are ignored. Unicode items of sys.path that cause
errors when used as filenames may cause this function to raise an
exception (in line with os.path.isdir() behavior).
"""
if not isinstance(path, list):
# This could happen e.g. when this is called from inside a
# frozen package. Return the path unchanged in that case.
return path
pname = os.path.join(*name.split('.')) # Reconstitute as relative path
# Just in case os.extsep != '.'
sname = os.extsep.join(name.split('.'))
sname_pkg = sname + os.extsep + "pkg"
init_py = "__init__" + os.extsep + "py"
path = path[:] # Start with a copy of the existing path
for dir in sys.path:
if not isinstance(dir, basestring) or not os.path.isdir(dir):
continue
subdir = os.path.join(dir, pname)
# XXX This may still add duplicate entries to path on
# case-insensitive filesystems
initfile = os.path.join(subdir, init_py)
if subdir not in path and os.path.isfile(initfile):
path.append(subdir)
# XXX Is this the right thing for subpackages like zope.app?
# It looks for a file named "zope.app.pkg"
pkgfile = os.path.join(dir, sname_pkg)
if os.path.isfile(pkgfile):
try:
f = open(pkgfile)
except IOError, msg:
sys.stderr.write("Can't open %s: %s\n" %
(pkgfile, msg))
else:
for line in f:
line = line.rstrip('\n')
if not line or line.startswith('#'):
continue
path.append(line) # Don't check for existence!
f.close()
return path | Extend a package's path.
Intended use is to place the following code in a package's __init__.py:
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
This will add to the package's __path__ all subdirectories of
directories on sys.path named after the package. This is useful
if one wants to distribute different parts of a single logical
package as multiple directories.
It also looks for *.pkg files beginning where * matches the name
argument. This feature is similar to *.pth files (see site.py),
except that it doesn't special-case lines starting with 'import'.
A *.pkg file is trusted at face value: apart from checking for
duplicates, all entries found in a *.pkg file are added to the
path, regardless of whether they are exist the filesystem. (This
is a feature.)
If the input path is not a list (as is the case for frozen
packages) it is returned unchanged. The input path is not
modified; an extended copy is returned. Items are only appended
to the copy at the end.
It is assumed that sys.path is a sequence. Items of sys.path that
are not (unicode or 8-bit) strings referring to existing
directories are ignored. Unicode items of sys.path that cause
errors when used as filenames may cause this function to raise an
exception (in line with os.path.isdir() behavior). | Below is the the instruction that describes the task:
### Input:
Extend a package's path.
Intended use is to place the following code in a package's __init__.py:
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
This will add to the package's __path__ all subdirectories of
directories on sys.path named after the package. This is useful
if one wants to distribute different parts of a single logical
package as multiple directories.
It also looks for *.pkg files beginning where * matches the name
argument. This feature is similar to *.pth files (see site.py),
except that it doesn't special-case lines starting with 'import'.
A *.pkg file is trusted at face value: apart from checking for
duplicates, all entries found in a *.pkg file are added to the
path, regardless of whether they are exist the filesystem. (This
is a feature.)
If the input path is not a list (as is the case for frozen
packages) it is returned unchanged. The input path is not
modified; an extended copy is returned. Items are only appended
to the copy at the end.
It is assumed that sys.path is a sequence. Items of sys.path that
are not (unicode or 8-bit) strings referring to existing
directories are ignored. Unicode items of sys.path that cause
errors when used as filenames may cause this function to raise an
exception (in line with os.path.isdir() behavior).
### Response:
def extend_path(path, name):
"""Extend a package's path.
Intended use is to place the following code in a package's __init__.py:
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
This will add to the package's __path__ all subdirectories of
directories on sys.path named after the package. This is useful
if one wants to distribute different parts of a single logical
package as multiple directories.
It also looks for *.pkg files beginning where * matches the name
argument. This feature is similar to *.pth files (see site.py),
except that it doesn't special-case lines starting with 'import'.
A *.pkg file is trusted at face value: apart from checking for
duplicates, all entries found in a *.pkg file are added to the
path, regardless of whether they are exist the filesystem. (This
is a feature.)
If the input path is not a list (as is the case for frozen
packages) it is returned unchanged. The input path is not
modified; an extended copy is returned. Items are only appended
to the copy at the end.
It is assumed that sys.path is a sequence. Items of sys.path that
are not (unicode or 8-bit) strings referring to existing
directories are ignored. Unicode items of sys.path that cause
errors when used as filenames may cause this function to raise an
exception (in line with os.path.isdir() behavior).
"""
if not isinstance(path, list):
# This could happen e.g. when this is called from inside a
# frozen package. Return the path unchanged in that case.
return path
pname = os.path.join(*name.split('.')) # Reconstitute as relative path
# Just in case os.extsep != '.'
sname = os.extsep.join(name.split('.'))
sname_pkg = sname + os.extsep + "pkg"
init_py = "__init__" + os.extsep + "py"
path = path[:] # Start with a copy of the existing path
for dir in sys.path:
if not isinstance(dir, basestring) or not os.path.isdir(dir):
continue
subdir = os.path.join(dir, pname)
# XXX This may still add duplicate entries to path on
# case-insensitive filesystems
initfile = os.path.join(subdir, init_py)
if subdir not in path and os.path.isfile(initfile):
path.append(subdir)
# XXX Is this the right thing for subpackages like zope.app?
# It looks for a file named "zope.app.pkg"
pkgfile = os.path.join(dir, sname_pkg)
if os.path.isfile(pkgfile):
try:
f = open(pkgfile)
except IOError, msg:
sys.stderr.write("Can't open %s: %s\n" %
(pkgfile, msg))
else:
for line in f:
line = line.rstrip('\n')
if not line or line.startswith('#'):
continue
path.append(line) # Don't check for existence!
f.close()
return path |
def syncItems(self):
""" Returns an instance of :class:`plexapi.sync.SyncList` for current device.
Raises:
:class:`plexapi.exceptions.BadRequest`: when the device doesn`t provides `sync-target`.
"""
if 'sync-target' not in self.provides:
raise BadRequest('Requested syncList for device which do not provides sync-target')
return self._server.syncItems(client=self) | Returns an instance of :class:`plexapi.sync.SyncList` for current device.
Raises:
:class:`plexapi.exceptions.BadRequest`: when the device doesn`t provides `sync-target`. | Below is the the instruction that describes the task:
### Input:
Returns an instance of :class:`plexapi.sync.SyncList` for current device.
Raises:
:class:`plexapi.exceptions.BadRequest`: when the device doesn`t provides `sync-target`.
### Response:
def syncItems(self):
""" Returns an instance of :class:`plexapi.sync.SyncList` for current device.
Raises:
:class:`plexapi.exceptions.BadRequest`: when the device doesn`t provides `sync-target`.
"""
if 'sync-target' not in self.provides:
raise BadRequest('Requested syncList for device which do not provides sync-target')
return self._server.syncItems(client=self) |
def initialize_schema(connection):
"""Initializes the TensorBoard sqlite schema using the given connection.
Args:
connection: A sqlite DB connection.
"""
cursor = connection.cursor()
cursor.execute("PRAGMA application_id={}".format(_TENSORBOARD_APPLICATION_ID))
cursor.execute("PRAGMA user_version={}".format(_TENSORBOARD_USER_VERSION))
with connection:
for statement in _SCHEMA_STATEMENTS:
lines = statement.strip('\n').split('\n')
message = lines[0] + ('...' if len(lines) > 1 else '')
logger.debug('Running DB init statement: %s', message)
cursor.execute(statement) | Initializes the TensorBoard sqlite schema using the given connection.
Args:
connection: A sqlite DB connection. | Below is the the instruction that describes the task:
### Input:
Initializes the TensorBoard sqlite schema using the given connection.
Args:
connection: A sqlite DB connection.
### Response:
def initialize_schema(connection):
"""Initializes the TensorBoard sqlite schema using the given connection.
Args:
connection: A sqlite DB connection.
"""
cursor = connection.cursor()
cursor.execute("PRAGMA application_id={}".format(_TENSORBOARD_APPLICATION_ID))
cursor.execute("PRAGMA user_version={}".format(_TENSORBOARD_USER_VERSION))
with connection:
for statement in _SCHEMA_STATEMENTS:
lines = statement.strip('\n').split('\n')
message = lines[0] + ('...' if len(lines) > 1 else '')
logger.debug('Running DB init statement: %s', message)
cursor.execute(statement) |
def update_book(self, id, body, doc_type='book'):
''' Update a book
The "body" is merged with the current one.
Yes, it is NOT overwritten.
In case of concurrency conflict
this function could raise `elasticsearch.ConflictError`
'''
# note that we are NOT overwriting all the _source, just merging
book = self.get_book_by_id(id)
book['_source'].update(body)
validated = validate_book(book['_source'])
ret = self.es.index(index=self.index_name, id=id,
doc_type=doc_type, body=validated, version=book['_version'])
return ret | Update a book
The "body" is merged with the current one.
Yes, it is NOT overwritten.
In case of concurrency conflict
this function could raise `elasticsearch.ConflictError` | Below is the the instruction that describes the task:
### Input:
Update a book
The "body" is merged with the current one.
Yes, it is NOT overwritten.
In case of concurrency conflict
this function could raise `elasticsearch.ConflictError`
### Response:
def update_book(self, id, body, doc_type='book'):
''' Update a book
The "body" is merged with the current one.
Yes, it is NOT overwritten.
In case of concurrency conflict
this function could raise `elasticsearch.ConflictError`
'''
# note that we are NOT overwriting all the _source, just merging
book = self.get_book_by_id(id)
book['_source'].update(body)
validated = validate_book(book['_source'])
ret = self.es.index(index=self.index_name, id=id,
doc_type=doc_type, body=validated, version=book['_version'])
return ret |
def deblock(f):
"""Decompress a single block from a compressed Plan 9 image file.
Each block starts with 2 decimal strings of 12 bytes each. Yields a
sequence of (row, data) pairs where row is the total number of rows
processed according to the file format and data is the decompressed
data for a set of rows."""
row = int(f.read(12))
size = int(f.read(12))
if not (0 <= size <= 6000):
raise Error('block has invalid size; not a Plan 9 image file?')
# Since each block is at most 6000 bytes we may as well read it all in
# one go.
d = f.read(size)
i = 0
o = []
while i < size:
x = ord(d[i])
i += 1
if x & 0x80:
x = (x & 0x7f) + 1
lit = d[i: i + x]
i += x
o.extend(lit)
continue
# x's high-order bit is 0
length = (x >> 2) + 3
# Offset is made from bottom 2 bits of x and all 8 bits of next
# byte. http://plan9.bell-labs.com/magic/man2html/6/image doesn't
# say whether x's 2 bits are most significant or least significant.
# But it is clear from inspecting a random file,
# http://plan9.bell-labs.com/sources/plan9/sys/games/lib/sokoban/images/cargo.bit
# that x's 2 bits are most significant.
offset = (x & 3) << 8
offset |= ord(d[i])
i += 1
# Note: complement operator neatly maps (0 to 1023) to (-1 to
# -1024). Adding len(o) gives a (non-negative) offset into o from
# which to start indexing.
offset = ~offset + len(o)
if offset < 0:
raise Error('byte offset indexes off the begininning of '
'the output buffer; not a Plan 9 image file?')
for j in range(length):
o.append(o[offset + j])
return row, ''.join(o) | Decompress a single block from a compressed Plan 9 image file.
Each block starts with 2 decimal strings of 12 bytes each. Yields a
sequence of (row, data) pairs where row is the total number of rows
processed according to the file format and data is the decompressed
data for a set of rows. | Below is the the instruction that describes the task:
### Input:
Decompress a single block from a compressed Plan 9 image file.
Each block starts with 2 decimal strings of 12 bytes each. Yields a
sequence of (row, data) pairs where row is the total number of rows
processed according to the file format and data is the decompressed
data for a set of rows.
### Response:
def deblock(f):
"""Decompress a single block from a compressed Plan 9 image file.
Each block starts with 2 decimal strings of 12 bytes each. Yields a
sequence of (row, data) pairs where row is the total number of rows
processed according to the file format and data is the decompressed
data for a set of rows."""
row = int(f.read(12))
size = int(f.read(12))
if not (0 <= size <= 6000):
raise Error('block has invalid size; not a Plan 9 image file?')
# Since each block is at most 6000 bytes we may as well read it all in
# one go.
d = f.read(size)
i = 0
o = []
while i < size:
x = ord(d[i])
i += 1
if x & 0x80:
x = (x & 0x7f) + 1
lit = d[i: i + x]
i += x
o.extend(lit)
continue
# x's high-order bit is 0
length = (x >> 2) + 3
# Offset is made from bottom 2 bits of x and all 8 bits of next
# byte. http://plan9.bell-labs.com/magic/man2html/6/image doesn't
# say whether x's 2 bits are most significant or least significant.
# But it is clear from inspecting a random file,
# http://plan9.bell-labs.com/sources/plan9/sys/games/lib/sokoban/images/cargo.bit
# that x's 2 bits are most significant.
offset = (x & 3) << 8
offset |= ord(d[i])
i += 1
# Note: complement operator neatly maps (0 to 1023) to (-1 to
# -1024). Adding len(o) gives a (non-negative) offset into o from
# which to start indexing.
offset = ~offset + len(o)
if offset < 0:
raise Error('byte offset indexes off the begininning of '
'the output buffer; not a Plan 9 image file?')
for j in range(length):
o.append(o[offset + j])
return row, ''.join(o) |
def login(self, user=None, password=None, **kwargs):
"""
登陆接口
:param user: 用户名
:param password: 密码
:param kwargs: 其他参数
:return:
"""
headers = self._generate_headers()
self.s.headers.update(headers)
# init cookie
self.s.get(self.LOGIN_PAGE)
# post for login
params = self.create_login_params(user, password, **kwargs)
rep = self.s.post(self.LOGIN_API, data=params)
self.check_login_success(rep)
log.info("登录成功") | 登陆接口
:param user: 用户名
:param password: 密码
:param kwargs: 其他参数
:return: | Below is the the instruction that describes the task:
### Input:
登陆接口
:param user: 用户名
:param password: 密码
:param kwargs: 其他参数
:return:
### Response:
def login(self, user=None, password=None, **kwargs):
"""
登陆接口
:param user: 用户名
:param password: 密码
:param kwargs: 其他参数
:return:
"""
headers = self._generate_headers()
self.s.headers.update(headers)
# init cookie
self.s.get(self.LOGIN_PAGE)
# post for login
params = self.create_login_params(user, password, **kwargs)
rep = self.s.post(self.LOGIN_API, data=params)
self.check_login_success(rep)
log.info("登录成功") |
def ensure_str(s, encoding='utf-8', errors='strict'):
"""Coerce *s* to `str`.
For Python 2:
- `unicode` -> encoded to `str`
- `str` -> `str`
For Python 3:
- `str` -> `str`
- `bytes` -> decoded to `str`
"""
if not isinstance(s, (text_type, binary_type)):
raise TypeError("not expecting type '%s'" % type(s))
if PY2 and isinstance(s, text_type):
s = s.encode(encoding, errors)
elif PY3 and isinstance(s, binary_type):
s = s.decode(encoding, errors)
return s | Coerce *s* to `str`.
For Python 2:
- `unicode` -> encoded to `str`
- `str` -> `str`
For Python 3:
- `str` -> `str`
- `bytes` -> decoded to `str` | Below is the the instruction that describes the task:
### Input:
Coerce *s* to `str`.
For Python 2:
- `unicode` -> encoded to `str`
- `str` -> `str`
For Python 3:
- `str` -> `str`
- `bytes` -> decoded to `str`
### Response:
def ensure_str(s, encoding='utf-8', errors='strict'):
"""Coerce *s* to `str`.
For Python 2:
- `unicode` -> encoded to `str`
- `str` -> `str`
For Python 3:
- `str` -> `str`
- `bytes` -> decoded to `str`
"""
if not isinstance(s, (text_type, binary_type)):
raise TypeError("not expecting type '%s'" % type(s))
if PY2 and isinstance(s, text_type):
s = s.encode(encoding, errors)
elif PY3 and isinstance(s, binary_type):
s = s.decode(encoding, errors)
return s |
async def _match_idens(self, core, prefix):
'''
Returns the iden that starts with prefix. Prints out error and returns None if it doesn't match
exactly one.
'''
idens = [iden for iden, trig in await core.listTriggers()]
matches = [iden for iden in idens if iden.startswith(prefix)]
if len(matches) == 1:
return matches[0]
elif len(matches) == 0:
self.printf('Error: provided iden does not match any valid authorized triggers')
else:
self.printf('Error: provided iden matches more than one trigger')
return None | Returns the iden that starts with prefix. Prints out error and returns None if it doesn't match
exactly one. | Below is the the instruction that describes the task:
### Input:
Returns the iden that starts with prefix. Prints out error and returns None if it doesn't match
exactly one.
### Response:
async def _match_idens(self, core, prefix):
'''
Returns the iden that starts with prefix. Prints out error and returns None if it doesn't match
exactly one.
'''
idens = [iden for iden, trig in await core.listTriggers()]
matches = [iden for iden in idens if iden.startswith(prefix)]
if len(matches) == 1:
return matches[0]
elif len(matches) == 0:
self.printf('Error: provided iden does not match any valid authorized triggers')
else:
self.printf('Error: provided iden matches more than one trigger')
return None |
def tx_max(tasmax, freq='YS'):
r"""Highest max temperature
The maximum value of daily maximum temperature.
Parameters
----------
tasmax : xarray.DataArray
Maximum daily temperature [℃] or [K]
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
Maximum value of daily maximum temperature.
Notes
-----
Let :math:`TX_{ij}` be the maximum temperature at day :math:`i` of period :math:`j`. Then the maximum
daily maximum temperature for period :math:`j` is:
.. math::
TXx_j = max(TX_{ij})
"""
return tasmax.resample(time=freq).max(dim='time', keep_attrs=True) | r"""Highest max temperature
The maximum value of daily maximum temperature.
Parameters
----------
tasmax : xarray.DataArray
Maximum daily temperature [℃] or [K]
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
Maximum value of daily maximum temperature.
Notes
-----
Let :math:`TX_{ij}` be the maximum temperature at day :math:`i` of period :math:`j`. Then the maximum
daily maximum temperature for period :math:`j` is:
.. math::
TXx_j = max(TX_{ij}) | Below is the the instruction that describes the task:
### Input:
r"""Highest max temperature
The maximum value of daily maximum temperature.
Parameters
----------
tasmax : xarray.DataArray
Maximum daily temperature [℃] or [K]
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
Maximum value of daily maximum temperature.
Notes
-----
Let :math:`TX_{ij}` be the maximum temperature at day :math:`i` of period :math:`j`. Then the maximum
daily maximum temperature for period :math:`j` is:
.. math::
TXx_j = max(TX_{ij})
### Response:
def tx_max(tasmax, freq='YS'):
r"""Highest max temperature
The maximum value of daily maximum temperature.
Parameters
----------
tasmax : xarray.DataArray
Maximum daily temperature [℃] or [K]
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
Maximum value of daily maximum temperature.
Notes
-----
Let :math:`TX_{ij}` be the maximum temperature at day :math:`i` of period :math:`j`. Then the maximum
daily maximum temperature for period :math:`j` is:
.. math::
TXx_j = max(TX_{ij})
"""
return tasmax.resample(time=freq).max(dim='time', keep_attrs=True) |
def set_current_module(self, modname, filepath=None):
"""set the name of the currently analyzed module and
init statistics for it
"""
if not modname and filepath is None:
return
self.reporter.on_set_current_module(modname, filepath)
self.current_name = modname
self.current_file = filepath or modname
self.stats["by_module"][modname] = {}
self.stats["by_module"][modname]["statement"] = 0
for msg_cat in MSG_TYPES.values():
self.stats["by_module"][modname][msg_cat] = 0 | set the name of the currently analyzed module and
init statistics for it | Below is the the instruction that describes the task:
### Input:
set the name of the currently analyzed module and
init statistics for it
### Response:
def set_current_module(self, modname, filepath=None):
"""set the name of the currently analyzed module and
init statistics for it
"""
if not modname and filepath is None:
return
self.reporter.on_set_current_module(modname, filepath)
self.current_name = modname
self.current_file = filepath or modname
self.stats["by_module"][modname] = {}
self.stats["by_module"][modname]["statement"] = 0
for msg_cat in MSG_TYPES.values():
self.stats["by_module"][modname][msg_cat] = 0 |
def clone(self, parent=None):
"""
Clone this object.
@param parent: The parent for the clone.
@type parent: L{element.Element}
@return: A copy of this object assigned to the new parent.
@rtype: L{Attribute}
"""
a = Attribute(self.qname(), self.value)
a.parent = parent
return a | Clone this object.
@param parent: The parent for the clone.
@type parent: L{element.Element}
@return: A copy of this object assigned to the new parent.
@rtype: L{Attribute} | Below is the the instruction that describes the task:
### Input:
Clone this object.
@param parent: The parent for the clone.
@type parent: L{element.Element}
@return: A copy of this object assigned to the new parent.
@rtype: L{Attribute}
### Response:
def clone(self, parent=None):
"""
Clone this object.
@param parent: The parent for the clone.
@type parent: L{element.Element}
@return: A copy of this object assigned to the new parent.
@rtype: L{Attribute}
"""
a = Attribute(self.qname(), self.value)
a.parent = parent
return a |
def accuracy(self):
"""Calculates accuracy
:return: Accuracy
"""
true_pos = self.matrix[0][0]
false_pos = self.matrix[1][0]
false_neg = self.matrix[0][1]
true_neg = self.matrix[1][1]
num = 1.0 * (true_pos + true_neg)
den = true_pos + true_neg + false_pos + false_neg
return divide(num, den) | Calculates accuracy
:return: Accuracy | Below is the the instruction that describes the task:
### Input:
Calculates accuracy
:return: Accuracy
### Response:
def accuracy(self):
"""Calculates accuracy
:return: Accuracy
"""
true_pos = self.matrix[0][0]
false_pos = self.matrix[1][0]
false_neg = self.matrix[0][1]
true_neg = self.matrix[1][1]
num = 1.0 * (true_pos + true_neg)
den = true_pos + true_neg + false_pos + false_neg
return divide(num, den) |
def generate_daterange(report):
"""
Creates a date_range timestamp with format YYYY-MM-DD-T-HH:MM:SS
based on begin and end dates for easier parsing in Kibana.
Move to utils to avoid duplication w/ elastic?
"""
metadata = report["report_metadata"]
begin_date = human_timestamp_to_datetime(metadata["begin_date"])
end_date = human_timestamp_to_datetime(metadata["end_date"])
begin_date_human = begin_date.strftime("%Y-%m-%dT%H:%M:%S")
end_date_human = end_date.strftime("%Y-%m-%dT%H:%M:%S")
date_range = [begin_date_human,
end_date_human]
logger.debug("date_range is {}".format(date_range))
return date_range | Creates a date_range timestamp with format YYYY-MM-DD-T-HH:MM:SS
based on begin and end dates for easier parsing in Kibana.
Move to utils to avoid duplication w/ elastic? | Below is the the instruction that describes the task:
### Input:
Creates a date_range timestamp with format YYYY-MM-DD-T-HH:MM:SS
based on begin and end dates for easier parsing in Kibana.
Move to utils to avoid duplication w/ elastic?
### Response:
def generate_daterange(report):
"""
Creates a date_range timestamp with format YYYY-MM-DD-T-HH:MM:SS
based on begin and end dates for easier parsing in Kibana.
Move to utils to avoid duplication w/ elastic?
"""
metadata = report["report_metadata"]
begin_date = human_timestamp_to_datetime(metadata["begin_date"])
end_date = human_timestamp_to_datetime(metadata["end_date"])
begin_date_human = begin_date.strftime("%Y-%m-%dT%H:%M:%S")
end_date_human = end_date.strftime("%Y-%m-%dT%H:%M:%S")
date_range = [begin_date_human,
end_date_human]
logger.debug("date_range is {}".format(date_range))
return date_range |
def from_timestamp(
timestamp, tz=UTC # type: Union[int, float] # type: Union[str, _Timezone]
): # type: (...) -> DateTime
"""
Create a DateTime instance from a timestamp.
"""
dt = _datetime.datetime.utcfromtimestamp(timestamp)
dt = datetime(
dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.microsecond
)
if tz is not UTC or tz != "UTC":
dt = dt.in_timezone(tz)
return dt | Create a DateTime instance from a timestamp. | Below is the the instruction that describes the task:
### Input:
Create a DateTime instance from a timestamp.
### Response:
def from_timestamp(
timestamp, tz=UTC # type: Union[int, float] # type: Union[str, _Timezone]
): # type: (...) -> DateTime
"""
Create a DateTime instance from a timestamp.
"""
dt = _datetime.datetime.utcfromtimestamp(timestamp)
dt = datetime(
dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.microsecond
)
if tz is not UTC or tz != "UTC":
dt = dt.in_timezone(tz)
return dt |
def seek(self, position, whence=os.SEEK_SET):
"""
Seek to ``position`` in file.
:type position: int
:param position: offset in bytes to seek to
:type whence: int
:param whence: defaults to ``os.SEEK_SET`` (absolute); other
values are ``os.SEEK_CUR`` (relative to the current position)
and ``os.SEEK_END`` (relative to the file's end).
"""
_complain_ifclosed(self.closed)
return self.f.seek(position, whence) | Seek to ``position`` in file.
:type position: int
:param position: offset in bytes to seek to
:type whence: int
:param whence: defaults to ``os.SEEK_SET`` (absolute); other
values are ``os.SEEK_CUR`` (relative to the current position)
and ``os.SEEK_END`` (relative to the file's end). | Below is the the instruction that describes the task:
### Input:
Seek to ``position`` in file.
:type position: int
:param position: offset in bytes to seek to
:type whence: int
:param whence: defaults to ``os.SEEK_SET`` (absolute); other
values are ``os.SEEK_CUR`` (relative to the current position)
and ``os.SEEK_END`` (relative to the file's end).
### Response:
def seek(self, position, whence=os.SEEK_SET):
"""
Seek to ``position`` in file.
:type position: int
:param position: offset in bytes to seek to
:type whence: int
:param whence: defaults to ``os.SEEK_SET`` (absolute); other
values are ``os.SEEK_CUR`` (relative to the current position)
and ``os.SEEK_END`` (relative to the file's end).
"""
_complain_ifclosed(self.closed)
return self.f.seek(position, whence) |
def getModulePath(project_path,module_name,verbose):
'''Searches for module_name in searchpath and returns the filepath.
If no filepath was found, returns None.'''
if not module_name:
return None
sys.path.append(project_path)
try:
package = pkgutil.get_loader(module_name)
except ImportError:
if verbose:
print("Parent module for "+module_name+" not found.")
return None
except:
if verbose:
print(module_name+" not loaded for bizarre reasons")
try:
if package:
if package.get_code(module_name):
filename = package.get_code(module_name).co_filename
return filename
elif package.find_spec(module_name).has_location==False:
return None #built-in module such as itertools
else:
pass #perhaps filename is in package.find_spec(module_name).origin?
pass #a good reference is https://www.python.org/dev/peps/pep-0302/
except ImportError:
if verbose:
print("Code object unavailable for "+module_name)
return None
except AttributeError:
if verbose:
print(module_name+" is an ExtensionFileLoader object")
return None
except:
if verbose:
print(module_name+" not loaded for bizarre reasons")
return None
else:
if verbose:
print ("Module "+module_name+" not found.")
return None | Searches for module_name in searchpath and returns the filepath.
If no filepath was found, returns None. | Below is the the instruction that describes the task:
### Input:
Searches for module_name in searchpath and returns the filepath.
If no filepath was found, returns None.
### Response:
def getModulePath(project_path,module_name,verbose):
'''Searches for module_name in searchpath and returns the filepath.
If no filepath was found, returns None.'''
if not module_name:
return None
sys.path.append(project_path)
try:
package = pkgutil.get_loader(module_name)
except ImportError:
if verbose:
print("Parent module for "+module_name+" not found.")
return None
except:
if verbose:
print(module_name+" not loaded for bizarre reasons")
try:
if package:
if package.get_code(module_name):
filename = package.get_code(module_name).co_filename
return filename
elif package.find_spec(module_name).has_location==False:
return None #built-in module such as itertools
else:
pass #perhaps filename is in package.find_spec(module_name).origin?
pass #a good reference is https://www.python.org/dev/peps/pep-0302/
except ImportError:
if verbose:
print("Code object unavailable for "+module_name)
return None
except AttributeError:
if verbose:
print(module_name+" is an ExtensionFileLoader object")
return None
except:
if verbose:
print(module_name+" not loaded for bizarre reasons")
return None
else:
if verbose:
print ("Module "+module_name+" not found.")
return None |
def _split_index(self, key):
"""
Partitions key into key and deep dimension groups. If only key
indices are supplied, the data is indexed with an empty tuple.
Keys with indices than there are dimensions will be padded.
"""
if not isinstance(key, tuple):
key = (key,)
elif key == ():
return (), ()
if key[0] is Ellipsis:
num_pad = self.ndims - len(key) + 1
key = (slice(None),) * num_pad + key[1:]
elif len(key) < self.ndims:
num_pad = self.ndims - len(key)
key = key + (slice(None),) * num_pad
map_slice = key[:self.ndims]
if self._check_key_type:
map_slice = self._apply_key_type(map_slice)
if len(key) == self.ndims:
return map_slice, ()
else:
return map_slice, key[self.ndims:] | Partitions key into key and deep dimension groups. If only key
indices are supplied, the data is indexed with an empty tuple.
Keys with indices than there are dimensions will be padded. | Below is the the instruction that describes the task:
### Input:
Partitions key into key and deep dimension groups. If only key
indices are supplied, the data is indexed with an empty tuple.
Keys with indices than there are dimensions will be padded.
### Response:
def _split_index(self, key):
"""
Partitions key into key and deep dimension groups. If only key
indices are supplied, the data is indexed with an empty tuple.
Keys with indices than there are dimensions will be padded.
"""
if not isinstance(key, tuple):
key = (key,)
elif key == ():
return (), ()
if key[0] is Ellipsis:
num_pad = self.ndims - len(key) + 1
key = (slice(None),) * num_pad + key[1:]
elif len(key) < self.ndims:
num_pad = self.ndims - len(key)
key = key + (slice(None),) * num_pad
map_slice = key[:self.ndims]
if self._check_key_type:
map_slice = self._apply_key_type(map_slice)
if len(key) == self.ndims:
return map_slice, ()
else:
return map_slice, key[self.ndims:] |
def get_instance_assignment(self, ctx):
"""
Gets the integer expression in any of the four instance assignment
operators ('=' '@' '+=' '%=')
"""
if ctx is None:
return None
visitor = ExprVisitor(self.compiler)
expr = visitor.visit(ctx.expr())
expr = expressions.AssignmentCast(self.compiler.env, SourceRef.from_antlr(ctx.op), expr, int)
expr.predict_type()
return expr | Gets the integer expression in any of the four instance assignment
operators ('=' '@' '+=' '%=') | Below is the the instruction that describes the task:
### Input:
Gets the integer expression in any of the four instance assignment
operators ('=' '@' '+=' '%=')
### Response:
def get_instance_assignment(self, ctx):
"""
Gets the integer expression in any of the four instance assignment
operators ('=' '@' '+=' '%=')
"""
if ctx is None:
return None
visitor = ExprVisitor(self.compiler)
expr = visitor.visit(ctx.expr())
expr = expressions.AssignmentCast(self.compiler.env, SourceRef.from_antlr(ctx.op), expr, int)
expr.predict_type()
return expr |
def color(text, color=None, background=None, light=False, enabled="auto"):
"""
Return text in desired color if coloring enabled
Available colors: black red green yellow blue magenta cyan white.
Alternatively color can be prefixed with "light", e.g. lightgreen.
"""
colors = {"black": 30, "red": 31, "green": 32, "yellow": 33,
"blue": 34, "magenta": 35, "cyan": 36, "white": 37}
# Nothing do do if coloring disabled
if enabled == "auto":
enabled = Coloring().enabled()
if not enabled:
return text
# Prepare colors (strip 'light' if present in color)
if color and color.startswith("light"):
light = True
color = color[5:]
color = color and ";{0}".format(colors[color]) or ""
background = background and ";{0}".format(colors[background] + 10) or ""
light = light and 1 or 0
# Starting and finishing sequence
start = "\033[{0}{1}{2}m".format(light, color, background)
finish = "\033[1;m"
return "".join([start, text, finish]) | Return text in desired color if coloring enabled
Available colors: black red green yellow blue magenta cyan white.
Alternatively color can be prefixed with "light", e.g. lightgreen. | Below is the the instruction that describes the task:
### Input:
Return text in desired color if coloring enabled
Available colors: black red green yellow blue magenta cyan white.
Alternatively color can be prefixed with "light", e.g. lightgreen.
### Response:
def color(text, color=None, background=None, light=False, enabled="auto"):
"""
Return text in desired color if coloring enabled
Available colors: black red green yellow blue magenta cyan white.
Alternatively color can be prefixed with "light", e.g. lightgreen.
"""
colors = {"black": 30, "red": 31, "green": 32, "yellow": 33,
"blue": 34, "magenta": 35, "cyan": 36, "white": 37}
# Nothing do do if coloring disabled
if enabled == "auto":
enabled = Coloring().enabled()
if not enabled:
return text
# Prepare colors (strip 'light' if present in color)
if color and color.startswith("light"):
light = True
color = color[5:]
color = color and ";{0}".format(colors[color]) or ""
background = background and ";{0}".format(colors[background] + 10) or ""
light = light and 1 or 0
# Starting and finishing sequence
start = "\033[{0}{1}{2}m".format(light, color, background)
finish = "\033[1;m"
return "".join([start, text, finish]) |
def verify_authentication_data(self, key):
'''
Verify the current authentication data based on the current key-id and
the given key.
'''
correct_authentication_data = self.calculate_authentication_data(key)
return self.authentication_data == correct_authentication_data | Verify the current authentication data based on the current key-id and
the given key. | Below is the the instruction that describes the task:
### Input:
Verify the current authentication data based on the current key-id and
the given key.
### Response:
def verify_authentication_data(self, key):
'''
Verify the current authentication data based on the current key-id and
the given key.
'''
correct_authentication_data = self.calculate_authentication_data(key)
return self.authentication_data == correct_authentication_data |
async def download_profile_photo(
self, entity, file=None, *, download_big=True):
"""
Downloads the profile photo of the given entity (user/chat/channel).
Args:
entity (`entity`):
From who the photo will be downloaded.
.. note::
This method expects the full entity (which has the data
to download the photo), not an input variant.
It's possible that sometimes you can't fetch the entity
from its input (since you can get errors like
``ChannelPrivateError``) but you already have it through
another call, like getting a forwarded message from it.
file (`str` | `file`, optional):
The output file path, directory, or stream-like object.
If the path exists and is a file, it will be overwritten.
If file is the type `bytes`, it will be downloaded in-memory
as a bytestring (e.g. ``file=bytes``).
download_big (`bool`, optional):
Whether to use the big version of the available photos.
Returns:
``None`` if no photo was provided, or if it was Empty. On success
the file path is returned since it may differ from the one given.
"""
# hex(crc32(x.encode('ascii'))) for x in
# ('User', 'Chat', 'UserFull', 'ChatFull')
ENTITIES = (0x2da17977, 0xc5af5d94, 0x1f4661b9, 0xd49a2697)
# ('InputPeer', 'InputUser', 'InputChannel')
INPUTS = (0xc91c90b6, 0xe669bf46, 0x40f202fd)
if not isinstance(entity, TLObject) or entity.SUBCLASS_OF_ID in INPUTS:
entity = await self.get_entity(entity)
possible_names = []
if entity.SUBCLASS_OF_ID not in ENTITIES:
photo = entity
else:
if not hasattr(entity, 'photo'):
# Special case: may be a ChatFull with photo:Photo
# This is different from a normal UserProfilePhoto and Chat
if not hasattr(entity, 'chat_photo'):
return None
return await self._download_photo(
entity.chat_photo, file, date=None, progress_callback=None)
for attr in ('username', 'first_name', 'title'):
possible_names.append(getattr(entity, attr, None))
photo = entity.photo
if isinstance(photo, (types.UserProfilePhoto, types.ChatPhoto)):
dc_id = photo.dc_id
which = photo.photo_big if download_big else photo.photo_small
loc = types.InputPeerPhotoFileLocation(
peer=await self.get_input_entity(entity),
local_id=which.local_id,
volume_id=which.volume_id,
big=download_big
)
else:
# It doesn't make any sense to check if `photo` can be used
# as input location, because then this method would be able
# to "download the profile photo of a message", i.e. its
# media which should be done with `download_media` instead.
return None
file = self._get_proper_filename(
file, 'profile_photo', '.jpg',
possible_names=possible_names
)
try:
result = await self.download_file(loc, file, dc_id=dc_id)
return result if file is bytes else file
except errors.LocationInvalidError:
# See issue #500, Android app fails as of v4.6.0 (1155).
# The fix seems to be using the full channel chat photo.
ie = await self.get_input_entity(entity)
if isinstance(ie, types.InputPeerChannel):
full = await self(functions.channels.GetFullChannelRequest(ie))
return await self._download_photo(
full.full_chat.chat_photo, file,
date=None, progress_callback=None,
thumb=-1 if download_big else 0
)
else:
# Until there's a report for chats, no need to.
return None | Downloads the profile photo of the given entity (user/chat/channel).
Args:
entity (`entity`):
From who the photo will be downloaded.
.. note::
This method expects the full entity (which has the data
to download the photo), not an input variant.
It's possible that sometimes you can't fetch the entity
from its input (since you can get errors like
``ChannelPrivateError``) but you already have it through
another call, like getting a forwarded message from it.
file (`str` | `file`, optional):
The output file path, directory, or stream-like object.
If the path exists and is a file, it will be overwritten.
If file is the type `bytes`, it will be downloaded in-memory
as a bytestring (e.g. ``file=bytes``).
download_big (`bool`, optional):
Whether to use the big version of the available photos.
Returns:
``None`` if no photo was provided, or if it was Empty. On success
the file path is returned since it may differ from the one given. | Below is the the instruction that describes the task:
### Input:
Downloads the profile photo of the given entity (user/chat/channel).
Args:
entity (`entity`):
From who the photo will be downloaded.
.. note::
This method expects the full entity (which has the data
to download the photo), not an input variant.
It's possible that sometimes you can't fetch the entity
from its input (since you can get errors like
``ChannelPrivateError``) but you already have it through
another call, like getting a forwarded message from it.
file (`str` | `file`, optional):
The output file path, directory, or stream-like object.
If the path exists and is a file, it will be overwritten.
If file is the type `bytes`, it will be downloaded in-memory
as a bytestring (e.g. ``file=bytes``).
download_big (`bool`, optional):
Whether to use the big version of the available photos.
Returns:
``None`` if no photo was provided, or if it was Empty. On success
the file path is returned since it may differ from the one given.
### Response:
async def download_profile_photo(
self, entity, file=None, *, download_big=True):
"""
Downloads the profile photo of the given entity (user/chat/channel).
Args:
entity (`entity`):
From who the photo will be downloaded.
.. note::
This method expects the full entity (which has the data
to download the photo), not an input variant.
It's possible that sometimes you can't fetch the entity
from its input (since you can get errors like
``ChannelPrivateError``) but you already have it through
another call, like getting a forwarded message from it.
file (`str` | `file`, optional):
The output file path, directory, or stream-like object.
If the path exists and is a file, it will be overwritten.
If file is the type `bytes`, it will be downloaded in-memory
as a bytestring (e.g. ``file=bytes``).
download_big (`bool`, optional):
Whether to use the big version of the available photos.
Returns:
``None`` if no photo was provided, or if it was Empty. On success
the file path is returned since it may differ from the one given.
"""
# hex(crc32(x.encode('ascii'))) for x in
# ('User', 'Chat', 'UserFull', 'ChatFull')
ENTITIES = (0x2da17977, 0xc5af5d94, 0x1f4661b9, 0xd49a2697)
# ('InputPeer', 'InputUser', 'InputChannel')
INPUTS = (0xc91c90b6, 0xe669bf46, 0x40f202fd)
if not isinstance(entity, TLObject) or entity.SUBCLASS_OF_ID in INPUTS:
entity = await self.get_entity(entity)
possible_names = []
if entity.SUBCLASS_OF_ID not in ENTITIES:
photo = entity
else:
if not hasattr(entity, 'photo'):
# Special case: may be a ChatFull with photo:Photo
# This is different from a normal UserProfilePhoto and Chat
if not hasattr(entity, 'chat_photo'):
return None
return await self._download_photo(
entity.chat_photo, file, date=None, progress_callback=None)
for attr in ('username', 'first_name', 'title'):
possible_names.append(getattr(entity, attr, None))
photo = entity.photo
if isinstance(photo, (types.UserProfilePhoto, types.ChatPhoto)):
dc_id = photo.dc_id
which = photo.photo_big if download_big else photo.photo_small
loc = types.InputPeerPhotoFileLocation(
peer=await self.get_input_entity(entity),
local_id=which.local_id,
volume_id=which.volume_id,
big=download_big
)
else:
# It doesn't make any sense to check if `photo` can be used
# as input location, because then this method would be able
# to "download the profile photo of a message", i.e. its
# media which should be done with `download_media` instead.
return None
file = self._get_proper_filename(
file, 'profile_photo', '.jpg',
possible_names=possible_names
)
try:
result = await self.download_file(loc, file, dc_id=dc_id)
return result if file is bytes else file
except errors.LocationInvalidError:
# See issue #500, Android app fails as of v4.6.0 (1155).
# The fix seems to be using the full channel chat photo.
ie = await self.get_input_entity(entity)
if isinstance(ie, types.InputPeerChannel):
full = await self(functions.channels.GetFullChannelRequest(ie))
return await self._download_photo(
full.full_chat.chat_photo, file,
date=None, progress_callback=None,
thumb=-1 if download_big else 0
)
else:
# Until there's a report for chats, no need to.
return None |
def get_tensor_dependencies(tensor):
"""
Utility method to get all dependencies (including placeholders) of a tensor (backwards through the graph).
Args:
tensor (tf.Tensor): The input tensor.
Returns: Set of all dependencies (including needed placeholders) for the input tensor.
"""
dependencies = set()
dependencies.update(tensor.op.inputs)
for sub_op in tensor.op.inputs:
dependencies.update(get_tensor_dependencies(sub_op))
return dependencies | Utility method to get all dependencies (including placeholders) of a tensor (backwards through the graph).
Args:
tensor (tf.Tensor): The input tensor.
Returns: Set of all dependencies (including needed placeholders) for the input tensor. | Below is the the instruction that describes the task:
### Input:
Utility method to get all dependencies (including placeholders) of a tensor (backwards through the graph).
Args:
tensor (tf.Tensor): The input tensor.
Returns: Set of all dependencies (including needed placeholders) for the input tensor.
### Response:
def get_tensor_dependencies(tensor):
"""
Utility method to get all dependencies (including placeholders) of a tensor (backwards through the graph).
Args:
tensor (tf.Tensor): The input tensor.
Returns: Set of all dependencies (including needed placeholders) for the input tensor.
"""
dependencies = set()
dependencies.update(tensor.op.inputs)
for sub_op in tensor.op.inputs:
dependencies.update(get_tensor_dependencies(sub_op))
return dependencies |
def DbGetClassAttributeProperty(self, argin):
""" Get Tango class property(ies) value
:param argin: Str[0] = Tango class name
Str[1] = Attribute name
Str[n] = Attribute name
:type: tango.DevVarStringArray
:return: Str[0] = Tango class name
Str[1] = Attribute property number
Str[2] = Attribute property 1 name
Str[3] = Attribute property 1 value
Str[n + 1] = Attribute property 2 name
Str[n + 2] = Attribute property 2 value
:rtype: tango.DevVarStringArray """
self._log.debug("In DbGetClassAttributeProperty()")
class_name = argin[0]
return self.db.get_class_attribute_property(class_name, argin[1:]) | Get Tango class property(ies) value
:param argin: Str[0] = Tango class name
Str[1] = Attribute name
Str[n] = Attribute name
:type: tango.DevVarStringArray
:return: Str[0] = Tango class name
Str[1] = Attribute property number
Str[2] = Attribute property 1 name
Str[3] = Attribute property 1 value
Str[n + 1] = Attribute property 2 name
Str[n + 2] = Attribute property 2 value
:rtype: tango.DevVarStringArray | Below is the the instruction that describes the task:
### Input:
Get Tango class property(ies) value
:param argin: Str[0] = Tango class name
Str[1] = Attribute name
Str[n] = Attribute name
:type: tango.DevVarStringArray
:return: Str[0] = Tango class name
Str[1] = Attribute property number
Str[2] = Attribute property 1 name
Str[3] = Attribute property 1 value
Str[n + 1] = Attribute property 2 name
Str[n + 2] = Attribute property 2 value
:rtype: tango.DevVarStringArray
### Response:
def DbGetClassAttributeProperty(self, argin):
""" Get Tango class property(ies) value
:param argin: Str[0] = Tango class name
Str[1] = Attribute name
Str[n] = Attribute name
:type: tango.DevVarStringArray
:return: Str[0] = Tango class name
Str[1] = Attribute property number
Str[2] = Attribute property 1 name
Str[3] = Attribute property 1 value
Str[n + 1] = Attribute property 2 name
Str[n + 2] = Attribute property 2 value
:rtype: tango.DevVarStringArray """
self._log.debug("In DbGetClassAttributeProperty()")
class_name = argin[0]
return self.db.get_class_attribute_property(class_name, argin[1:]) |
def prepare_mainsubstituter():
"""Prepare and return a |Substituter| object for the main `__init__`
file of *HydPy*."""
substituter = Substituter()
for module in (builtins, numpy, datetime, unittest, doctest, inspect, io,
os, sys, time, collections, itertools, subprocess, scipy,
typing):
substituter.add_module(module)
for subpackage in (auxs, core, cythons, exe):
for dummy, name, dummy in pkgutil.walk_packages(subpackage.__path__):
full_name = subpackage.__name__ + '.' + name
substituter.add_module(importlib.import_module(full_name))
substituter.add_modules(models)
for cymodule in (annutils, smoothutils, pointerutils):
substituter.add_module(cymodule, cython=True)
substituter._short2long['|pub|'] = ':mod:`~hydpy.pub`'
substituter._short2long['|config|'] = ':mod:`~hydpy.config`'
return substituter | Prepare and return a |Substituter| object for the main `__init__`
file of *HydPy*. | Below is the the instruction that describes the task:
### Input:
Prepare and return a |Substituter| object for the main `__init__`
file of *HydPy*.
### Response:
def prepare_mainsubstituter():
"""Prepare and return a |Substituter| object for the main `__init__`
file of *HydPy*."""
substituter = Substituter()
for module in (builtins, numpy, datetime, unittest, doctest, inspect, io,
os, sys, time, collections, itertools, subprocess, scipy,
typing):
substituter.add_module(module)
for subpackage in (auxs, core, cythons, exe):
for dummy, name, dummy in pkgutil.walk_packages(subpackage.__path__):
full_name = subpackage.__name__ + '.' + name
substituter.add_module(importlib.import_module(full_name))
substituter.add_modules(models)
for cymodule in (annutils, smoothutils, pointerutils):
substituter.add_module(cymodule, cython=True)
substituter._short2long['|pub|'] = ':mod:`~hydpy.pub`'
substituter._short2long['|config|'] = ':mod:`~hydpy.config`'
return substituter |
def rgba_to_int(cls, red, green, blue, alpha):
"""
Encodes the color as an Integer in RGBA encoding
Returns None if any of red, green or blue are None.
If alpha is None we use 255 by default.
:return: Integer
:rtype: int
"""
red = unwrap(red)
green = unwrap(green)
blue = unwrap(blue)
alpha = unwrap(alpha)
if red is None or green is None or blue is None:
return None
if alpha is None:
alpha = 255
r = red << 24
g = green << 16
b = blue << 8
a = alpha << 0
rgba_int = r+g+b+a
if (rgba_int > (2**31-1)): # convert to signed 32-bit int
rgba_int = rgba_int - 2**32
return rgba_int | Encodes the color as an Integer in RGBA encoding
Returns None if any of red, green or blue are None.
If alpha is None we use 255 by default.
:return: Integer
:rtype: int | Below is the the instruction that describes the task:
### Input:
Encodes the color as an Integer in RGBA encoding
Returns None if any of red, green or blue are None.
If alpha is None we use 255 by default.
:return: Integer
:rtype: int
### Response:
def rgba_to_int(cls, red, green, blue, alpha):
"""
Encodes the color as an Integer in RGBA encoding
Returns None if any of red, green or blue are None.
If alpha is None we use 255 by default.
:return: Integer
:rtype: int
"""
red = unwrap(red)
green = unwrap(green)
blue = unwrap(blue)
alpha = unwrap(alpha)
if red is None or green is None or blue is None:
return None
if alpha is None:
alpha = 255
r = red << 24
g = green << 16
b = blue << 8
a = alpha << 0
rgba_int = r+g+b+a
if (rgba_int > (2**31-1)): # convert to signed 32-bit int
rgba_int = rgba_int - 2**32
return rgba_int |
def detach_remote(self, id, name):
"""
destroy remote instance of widget
Arguments:
- id (str): widget id
- name (str): widget type name
"""
if name in self.widgets:
if id in self.widgets[name]:
del self.widgets[name] | destroy remote instance of widget
Arguments:
- id (str): widget id
- name (str): widget type name | Below is the the instruction that describes the task:
### Input:
destroy remote instance of widget
Arguments:
- id (str): widget id
- name (str): widget type name
### Response:
def detach_remote(self, id, name):
"""
destroy remote instance of widget
Arguments:
- id (str): widget id
- name (str): widget type name
"""
if name in self.widgets:
if id in self.widgets[name]:
del self.widgets[name] |
def recv(self):
"""Non-blocking network receive.
Return list of (response, future) tuples
"""
responses = self._recv()
if not responses and self.requests_timed_out():
log.warning('%s timed out after %s ms. Closing connection.',
self, self.config['request_timeout_ms'])
self.close(error=Errors.RequestTimedOutError(
'Request timed out after %s ms' %
self.config['request_timeout_ms']))
return ()
# augment respones w/ correlation_id, future, and timestamp
for i, (correlation_id, response) in enumerate(responses):
try:
with self._lock:
(future, timestamp) = self.in_flight_requests.pop(correlation_id)
except KeyError:
self.close(Errors.KafkaConnectionError('Received unrecognized correlation id'))
return ()
latency_ms = (time.time() - timestamp) * 1000
if self._sensors:
self._sensors.request_time.record(latency_ms)
log.debug('%s Response %d (%s ms): %s', self, correlation_id, latency_ms, response)
responses[i] = (response, future)
return responses | Non-blocking network receive.
Return list of (response, future) tuples | Below is the the instruction that describes the task:
### Input:
Non-blocking network receive.
Return list of (response, future) tuples
### Response:
def recv(self):
"""Non-blocking network receive.
Return list of (response, future) tuples
"""
responses = self._recv()
if not responses and self.requests_timed_out():
log.warning('%s timed out after %s ms. Closing connection.',
self, self.config['request_timeout_ms'])
self.close(error=Errors.RequestTimedOutError(
'Request timed out after %s ms' %
self.config['request_timeout_ms']))
return ()
# augment respones w/ correlation_id, future, and timestamp
for i, (correlation_id, response) in enumerate(responses):
try:
with self._lock:
(future, timestamp) = self.in_flight_requests.pop(correlation_id)
except KeyError:
self.close(Errors.KafkaConnectionError('Received unrecognized correlation id'))
return ()
latency_ms = (time.time() - timestamp) * 1000
if self._sensors:
self._sensors.request_time.record(latency_ms)
log.debug('%s Response %d (%s ms): %s', self, correlation_id, latency_ms, response)
responses[i] = (response, future)
return responses |
def _first(self, **spec):
""" Get the earliest entry in this category, optionally including subcategories """
for record in self._entries(spec).order_by(model.Entry.local_date,
model.Entry.id)[:1]:
return entry.Entry(record)
return None | Get the earliest entry in this category, optionally including subcategories | Below is the the instruction that describes the task:
### Input:
Get the earliest entry in this category, optionally including subcategories
### Response:
def _first(self, **spec):
""" Get the earliest entry in this category, optionally including subcategories """
for record in self._entries(spec).order_by(model.Entry.local_date,
model.Entry.id)[:1]:
return entry.Entry(record)
return None |
def frame_at_coordinates_jit(coordinates, mask, mask_index_array, psf):
""" Compute the frame (indexes of pixels light is blurred into) and psf_frame (psf kernel values of those \
pixels) for a given coordinate in a masks and its PSF.
Parameters
----------
coordinates: (int, int)
The coordinates of mask_index_array on which the frame should be centred
psf_shape: (int, int)
The shape of the psf for which this frame will be used
"""
psf_shape = psf.shape
psf_max_size = psf_shape[0] * psf_shape[1]
half_x = int(psf_shape[0] / 2)
half_y = int(psf_shape[1] / 2)
frame = -1 * np.ones((psf_max_size))
psf_frame = -1.0 * np.ones((psf_max_size))
count = 0
for i in range(psf_shape[0]):
for j in range(psf_shape[1]):
x = coordinates[0] - half_x + i
y = coordinates[1] - half_y + j
if 0 <= x < mask_index_array.shape[0] and 0 <= y < mask_index_array.shape[1]:
value = mask_index_array[x, y]
if value >= 0 and not mask[x, y]:
frame[count] = value
psf_frame[count] = psf[i, j]
count += 1
return frame, psf_frame | Compute the frame (indexes of pixels light is blurred into) and psf_frame (psf kernel values of those \
pixels) for a given coordinate in a masks and its PSF.
Parameters
----------
coordinates: (int, int)
The coordinates of mask_index_array on which the frame should be centred
psf_shape: (int, int)
The shape of the psf for which this frame will be used | Below is the the instruction that describes the task:
### Input:
Compute the frame (indexes of pixels light is blurred into) and psf_frame (psf kernel values of those \
pixels) for a given coordinate in a masks and its PSF.
Parameters
----------
coordinates: (int, int)
The coordinates of mask_index_array on which the frame should be centred
psf_shape: (int, int)
The shape of the psf for which this frame will be used
### Response:
def frame_at_coordinates_jit(coordinates, mask, mask_index_array, psf):
""" Compute the frame (indexes of pixels light is blurred into) and psf_frame (psf kernel values of those \
pixels) for a given coordinate in a masks and its PSF.
Parameters
----------
coordinates: (int, int)
The coordinates of mask_index_array on which the frame should be centred
psf_shape: (int, int)
The shape of the psf for which this frame will be used
"""
psf_shape = psf.shape
psf_max_size = psf_shape[0] * psf_shape[1]
half_x = int(psf_shape[0] / 2)
half_y = int(psf_shape[1] / 2)
frame = -1 * np.ones((psf_max_size))
psf_frame = -1.0 * np.ones((psf_max_size))
count = 0
for i in range(psf_shape[0]):
for j in range(psf_shape[1]):
x = coordinates[0] - half_x + i
y = coordinates[1] - half_y + j
if 0 <= x < mask_index_array.shape[0] and 0 <= y < mask_index_array.shape[1]:
value = mask_index_array[x, y]
if value >= 0 and not mask[x, y]:
frame[count] = value
psf_frame[count] = psf[i, j]
count += 1
return frame, psf_frame |
def create_soil(phi=0.0, cohesion=0.0, unit_dry_weight=0.0, pw=9800):
"""
Can define a Soil object.
:param phi: Internal friction angle
:param cohesion: Cohesion of soil
:param unit_dry_weight: The dry unit weight of the soil.
:param pw: specific weight of water
:return: A Soil object.
"""
soil = Soil(pw=pw)
soil.phi = phi
soil.cohesion = cohesion
soil.unit_dry_weight = unit_dry_weight
return soil | Can define a Soil object.
:param phi: Internal friction angle
:param cohesion: Cohesion of soil
:param unit_dry_weight: The dry unit weight of the soil.
:param pw: specific weight of water
:return: A Soil object. | Below is the the instruction that describes the task:
### Input:
Can define a Soil object.
:param phi: Internal friction angle
:param cohesion: Cohesion of soil
:param unit_dry_weight: The dry unit weight of the soil.
:param pw: specific weight of water
:return: A Soil object.
### Response:
def create_soil(phi=0.0, cohesion=0.0, unit_dry_weight=0.0, pw=9800):
"""
Can define a Soil object.
:param phi: Internal friction angle
:param cohesion: Cohesion of soil
:param unit_dry_weight: The dry unit weight of the soil.
:param pw: specific weight of water
:return: A Soil object.
"""
soil = Soil(pw=pw)
soil.phi = phi
soil.cohesion = cohesion
soil.unit_dry_weight = unit_dry_weight
return soil |
def load_synapses(path=HOME + "/Downloads/pinky100_final.df",
scaling=(1, 1, 1)):
""" Test scenario using real synapses """
scaling = np.array(list(scaling))
df = pd.read_csv(path)
locs = np.array(df[["presyn_x", "centroid_x", "postsyn_x"]])
mask = ~np.any(np.isnan(locs), axis=1)
df = df[mask]
df['pre_pt.position'] = list((np.array(df[['presyn_x', 'presyn_y', 'presyn_z']]) / scaling).astype(np.int))
df['ctr_pt.position'] = list((np.array(df[['centroid_x', 'centroid_y', 'centroid_z']]) / scaling).astype(np.int))
df['post_pt.position'] = list((np.array(df[['postsyn_x', 'postsyn_y', 'postsyn_z']]) / scaling).astype(np.int))
df = df[['pre_pt.position', 'ctr_pt.position', 'post_pt.position', 'size']]
return df | Test scenario using real synapses | Below is the the instruction that describes the task:
### Input:
Test scenario using real synapses
### Response:
def load_synapses(path=HOME + "/Downloads/pinky100_final.df",
scaling=(1, 1, 1)):
""" Test scenario using real synapses """
scaling = np.array(list(scaling))
df = pd.read_csv(path)
locs = np.array(df[["presyn_x", "centroid_x", "postsyn_x"]])
mask = ~np.any(np.isnan(locs), axis=1)
df = df[mask]
df['pre_pt.position'] = list((np.array(df[['presyn_x', 'presyn_y', 'presyn_z']]) / scaling).astype(np.int))
df['ctr_pt.position'] = list((np.array(df[['centroid_x', 'centroid_y', 'centroid_z']]) / scaling).astype(np.int))
df['post_pt.position'] = list((np.array(df[['postsyn_x', 'postsyn_y', 'postsyn_z']]) / scaling).astype(np.int))
df = df[['pre_pt.position', 'ctr_pt.position', 'post_pt.position', 'size']]
return df |
def _on_client_disconnect(self, data):
"""Handle client disconnect."""
self._clients[data.get('id')].update_connected(False)
_LOGGER.info('client %s disconnected', self._clients[data.get('id')].friendly_name) | Handle client disconnect. | Below is the the instruction that describes the task:
### Input:
Handle client disconnect.
### Response:
def _on_client_disconnect(self, data):
"""Handle client disconnect."""
self._clients[data.get('id')].update_connected(False)
_LOGGER.info('client %s disconnected', self._clients[data.get('id')].friendly_name) |
def getHighOrderSequenceChunk(it, switchover=1000, w=40, n=2048):
"""
Given an iteration index, returns a list of vectors to be appended to the
input stream, as well as a string label identifying the sequence. This
version generates a bunch of high order sequences. The first element always
provides sufficient context to predict the rest of the elements.
After switchover iterations, it will generate a different set of sequences.
"""
if it%10==3:
s = numpy.random.randint(5)
if it <= switchover:
if s==0:
label="XABCDE"
elif s==1:
label="YCBEAF"
elif s==2:
label="GHIJKL"
elif s==3:
label="WABCMN"
else:
label="ZDBCAE"
else:
if s==0:
label="XCBEAF"
elif s==1:
label="YABCDE"
elif s==2:
label="GABCMN"
elif s==3:
label="WHIJKL"
else:
label="ZDHICF"
vecs = letterSequence(label)
else:
vecs= [getRandomVector(w, n)]
label="."
return vecs,label | Given an iteration index, returns a list of vectors to be appended to the
input stream, as well as a string label identifying the sequence. This
version generates a bunch of high order sequences. The first element always
provides sufficient context to predict the rest of the elements.
After switchover iterations, it will generate a different set of sequences. | Below is the the instruction that describes the task:
### Input:
Given an iteration index, returns a list of vectors to be appended to the
input stream, as well as a string label identifying the sequence. This
version generates a bunch of high order sequences. The first element always
provides sufficient context to predict the rest of the elements.
After switchover iterations, it will generate a different set of sequences.
### Response:
def getHighOrderSequenceChunk(it, switchover=1000, w=40, n=2048):
"""
Given an iteration index, returns a list of vectors to be appended to the
input stream, as well as a string label identifying the sequence. This
version generates a bunch of high order sequences. The first element always
provides sufficient context to predict the rest of the elements.
After switchover iterations, it will generate a different set of sequences.
"""
if it%10==3:
s = numpy.random.randint(5)
if it <= switchover:
if s==0:
label="XABCDE"
elif s==1:
label="YCBEAF"
elif s==2:
label="GHIJKL"
elif s==3:
label="WABCMN"
else:
label="ZDBCAE"
else:
if s==0:
label="XCBEAF"
elif s==1:
label="YABCDE"
elif s==2:
label="GABCMN"
elif s==3:
label="WHIJKL"
else:
label="ZDHICF"
vecs = letterSequence(label)
else:
vecs= [getRandomVector(w, n)]
label="."
return vecs,label |
def detect_erc20_unindexed_event_params(contract):
"""
Detect un-indexed ERC20 event parameters in a given contract.
:param contract: The contract to check ERC20 events for un-indexed parameters in.
:return: A list of tuple(event, parameter) of parameters which should be indexed.
"""
# Create our result array
results = []
# If this contract isn't an ERC20 token, we return our empty results.
if not contract.is_erc20():
return results
# Loop through all events to look for poor form.
for event in contract.events:
# Only handle events which are declared in this contract.
if event.contract != contract:
continue
# If this is transfer/approval events, expect the first two parameters to be indexed.
if event.full_name in ["Transfer(address,address,uint256)",
"Approval(address,address,uint256)"]:
if not event.elems[0].indexed:
results.append((event, event.elems[0]))
if not event.elems[1].indexed:
results.append((event, event.elems[1]))
# Return the results.
return results | Detect un-indexed ERC20 event parameters in a given contract.
:param contract: The contract to check ERC20 events for un-indexed parameters in.
:return: A list of tuple(event, parameter) of parameters which should be indexed. | Below is the the instruction that describes the task:
### Input:
Detect un-indexed ERC20 event parameters in a given contract.
:param contract: The contract to check ERC20 events for un-indexed parameters in.
:return: A list of tuple(event, parameter) of parameters which should be indexed.
### Response:
def detect_erc20_unindexed_event_params(contract):
"""
Detect un-indexed ERC20 event parameters in a given contract.
:param contract: The contract to check ERC20 events for un-indexed parameters in.
:return: A list of tuple(event, parameter) of parameters which should be indexed.
"""
# Create our result array
results = []
# If this contract isn't an ERC20 token, we return our empty results.
if not contract.is_erc20():
return results
# Loop through all events to look for poor form.
for event in contract.events:
# Only handle events which are declared in this contract.
if event.contract != contract:
continue
# If this is transfer/approval events, expect the first two parameters to be indexed.
if event.full_name in ["Transfer(address,address,uint256)",
"Approval(address,address,uint256)"]:
if not event.elems[0].indexed:
results.append((event, event.elems[0]))
if not event.elems[1].indexed:
results.append((event, event.elems[1]))
# Return the results.
return results |
def handle_m2m(self, sender, instance, **kwargs):
""" Handle many to many relationships """
self.handle_save(instance.__class__, instance) | Handle many to many relationships | Below is the the instruction that describes the task:
### Input:
Handle many to many relationships
### Response:
def handle_m2m(self, sender, instance, **kwargs):
""" Handle many to many relationships """
self.handle_save(instance.__class__, instance) |
def _extract_local_histogram(image, mask=slice(None), bins=19, rang="image", cutoffp=(0.0, 100.0), size=None, footprint=None, output=None, mode="ignore", origin=0):
"""
Internal, single-image version of @see local_histogram
Note: Values outside of the histograms range are not considered.
Note: Mode constant is not available, instead a mode "ignore" is provided.
Note: Default dtype of returned values is float.
"""
if "constant" == mode:
raise RuntimeError('boundary mode not supported')
elif "ignore" == mode:
mode = "constant"
if 'image' == rang:
rang = tuple(numpy.percentile(image[mask], cutoffp))
elif not 2 == len(rang):
raise RuntimeError('the rang must contain exactly two elements or the string "image"')
_, bin_edges = numpy.histogram([], bins=bins, range=rang)
output = _get_output(numpy.float if None == output else output, image, shape = [bins] + list(image.shape))
# threshold the image into the histogram bins represented by the output images first dimension, treat last bin separately, since upper border is inclusive
for i in range(bins - 1):
output[i] = (image >= bin_edges[i]) & (image < bin_edges[i + 1])
output[-1] = (image >= bin_edges[-2]) & (image <= bin_edges[-1])
# apply the sum filter to each dimension, then normalize by dividing through the sum of elements in the bins of each histogram
for i in range(bins):
output[i] = sum_filter(output[i], size=size, footprint=footprint, output=None, mode=mode, cval=0.0, origin=origin)
divident = numpy.sum(output, 0)
divident[0 == divident] = 1
output /= divident
# Notes on modes:
# mode=constant with a cval outside histogram range for the histogram equals a mode=constant with a cval = 0 for the sum_filter
# mode=constant with a cval inside histogram range for the histogram has no equal for the sum_filter (and does not make much sense)
# mode=X for the histogram equals mode=X for the sum_filter
# treat as multi-spectral image which intensities to extracted
return _extract_feature(_extract_intensities, [h for h in output], mask) | Internal, single-image version of @see local_histogram
Note: Values outside of the histograms range are not considered.
Note: Mode constant is not available, instead a mode "ignore" is provided.
Note: Default dtype of returned values is float. | Below is the the instruction that describes the task:
### Input:
Internal, single-image version of @see local_histogram
Note: Values outside of the histograms range are not considered.
Note: Mode constant is not available, instead a mode "ignore" is provided.
Note: Default dtype of returned values is float.
### Response:
def _extract_local_histogram(image, mask=slice(None), bins=19, rang="image", cutoffp=(0.0, 100.0), size=None, footprint=None, output=None, mode="ignore", origin=0):
"""
Internal, single-image version of @see local_histogram
Note: Values outside of the histograms range are not considered.
Note: Mode constant is not available, instead a mode "ignore" is provided.
Note: Default dtype of returned values is float.
"""
if "constant" == mode:
raise RuntimeError('boundary mode not supported')
elif "ignore" == mode:
mode = "constant"
if 'image' == rang:
rang = tuple(numpy.percentile(image[mask], cutoffp))
elif not 2 == len(rang):
raise RuntimeError('the rang must contain exactly two elements or the string "image"')
_, bin_edges = numpy.histogram([], bins=bins, range=rang)
output = _get_output(numpy.float if None == output else output, image, shape = [bins] + list(image.shape))
# threshold the image into the histogram bins represented by the output images first dimension, treat last bin separately, since upper border is inclusive
for i in range(bins - 1):
output[i] = (image >= bin_edges[i]) & (image < bin_edges[i + 1])
output[-1] = (image >= bin_edges[-2]) & (image <= bin_edges[-1])
# apply the sum filter to each dimension, then normalize by dividing through the sum of elements in the bins of each histogram
for i in range(bins):
output[i] = sum_filter(output[i], size=size, footprint=footprint, output=None, mode=mode, cval=0.0, origin=origin)
divident = numpy.sum(output, 0)
divident[0 == divident] = 1
output /= divident
# Notes on modes:
# mode=constant with a cval outside histogram range for the histogram equals a mode=constant with a cval = 0 for the sum_filter
# mode=constant with a cval inside histogram range for the histogram has no equal for the sum_filter (and does not make much sense)
# mode=X for the histogram equals mode=X for the sum_filter
# treat as multi-spectral image which intensities to extracted
return _extract_feature(_extract_intensities, [h for h in output], mask) |
def _create_check_config(cls, options, use_defaults=True):
"""Create a `CheckConfiguration` object from `options`.
If `use_defaults`, any of the match options that are `None` will
be replaced with their default value and the default convention will be
set for the checked codes.
"""
checked_codes = None
if cls._has_exclusive_option(options) or use_defaults:
checked_codes = cls._get_checked_errors(options)
kwargs = dict(checked_codes=checked_codes)
for key in ('match', 'match_dir', 'ignore_decorators'):
kwargs[key] = getattr(cls, 'DEFAULT_{}_RE'.format(key.upper())) \
if getattr(options, key) is None and use_defaults \
else getattr(options, key)
return CheckConfiguration(**kwargs) | Create a `CheckConfiguration` object from `options`.
If `use_defaults`, any of the match options that are `None` will
be replaced with their default value and the default convention will be
set for the checked codes. | Below is the the instruction that describes the task:
### Input:
Create a `CheckConfiguration` object from `options`.
If `use_defaults`, any of the match options that are `None` will
be replaced with their default value and the default convention will be
set for the checked codes.
### Response:
def _create_check_config(cls, options, use_defaults=True):
"""Create a `CheckConfiguration` object from `options`.
If `use_defaults`, any of the match options that are `None` will
be replaced with their default value and the default convention will be
set for the checked codes.
"""
checked_codes = None
if cls._has_exclusive_option(options) or use_defaults:
checked_codes = cls._get_checked_errors(options)
kwargs = dict(checked_codes=checked_codes)
for key in ('match', 'match_dir', 'ignore_decorators'):
kwargs[key] = getattr(cls, 'DEFAULT_{}_RE'.format(key.upper())) \
if getattr(options, key) is None and use_defaults \
else getattr(options, key)
return CheckConfiguration(**kwargs) |
def _decode_subelements(self):
"""Decode the stanza subelements."""
for child in self._element:
if child.tag == self._show_tag:
self._show = child.text
elif child.tag == self._status_tag:
self._status = child.text
elif child.tag == self._priority_tag:
try:
self._priority = int(child.text.strip())
if self._priority < -128 or self._priority > 127:
raise ValueError
except ValueError:
raise BadRequestProtocolError(
"Presence priority not an integer") | Decode the stanza subelements. | Below is the the instruction that describes the task:
### Input:
Decode the stanza subelements.
### Response:
def _decode_subelements(self):
"""Decode the stanza subelements."""
for child in self._element:
if child.tag == self._show_tag:
self._show = child.text
elif child.tag == self._status_tag:
self._status = child.text
elif child.tag == self._priority_tag:
try:
self._priority = int(child.text.strip())
if self._priority < -128 or self._priority > 127:
raise ValueError
except ValueError:
raise BadRequestProtocolError(
"Presence priority not an integer") |
def clear_allow_repeat_items(self):
"""reset allow repeat itmes to default value"""
if (self.get_allow_repeat_items_metadata().is_read_only() or
self.get_allow_repeat_items_metadata().is_required()):
raise NoAccess()
self.my_osid_object_form._my_map['allowRepeatItems'] = \
bool(self._allow_repeat_items_metadata['default_boolean_values'][0]) | reset allow repeat itmes to default value | Below is the the instruction that describes the task:
### Input:
reset allow repeat itmes to default value
### Response:
def clear_allow_repeat_items(self):
"""reset allow repeat itmes to default value"""
if (self.get_allow_repeat_items_metadata().is_read_only() or
self.get_allow_repeat_items_metadata().is_required()):
raise NoAccess()
self.my_osid_object_form._my_map['allowRepeatItems'] = \
bool(self._allow_repeat_items_metadata['default_boolean_values'][0]) |
def select_directory(self):
"""Select directory"""
self.redirect_stdio.emit(False)
directory = getexistingdirectory(self.main, _("Select directory"),
getcwd_or_home())
if directory:
self.chdir(directory)
self.redirect_stdio.emit(True) | Select directory | Below is the the instruction that describes the task:
### Input:
Select directory
### Response:
def select_directory(self):
"""Select directory"""
self.redirect_stdio.emit(False)
directory = getexistingdirectory(self.main, _("Select directory"),
getcwd_or_home())
if directory:
self.chdir(directory)
self.redirect_stdio.emit(True) |
def rmon_event_entry_event_description(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
rmon = ET.SubElement(config, "rmon", xmlns="urn:brocade.com:mgmt:brocade-rmon")
event_entry = ET.SubElement(rmon, "event-entry")
event_index_key = ET.SubElement(event_entry, "event-index")
event_index_key.text = kwargs.pop('event_index')
event_description = ET.SubElement(event_entry, "event-description")
event_description.text = kwargs.pop('event_description')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def rmon_event_entry_event_description(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
rmon = ET.SubElement(config, "rmon", xmlns="urn:brocade.com:mgmt:brocade-rmon")
event_entry = ET.SubElement(rmon, "event-entry")
event_index_key = ET.SubElement(event_entry, "event-index")
event_index_key.text = kwargs.pop('event_index')
event_description = ET.SubElement(event_entry, "event-description")
event_description.text = kwargs.pop('event_description')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def delete(self, record_key):
''' a method to delete a file
:param record_key: string with name of file
:return: string reporting outcome
'''
title = '%s.delete' % self.__class__.__name__
# validate inputs
input_fields = {
'record_key': record_key
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# validate existence of file
file_id, parent_id = self._get_id(record_key)
if not file_id:
exit_msg = '%s does not exist.' % record_key
return exit_msg
# remove file
try:
self.drive.delete(fileId=file_id).execute()
except:
raise DriveConnectionError(title)
# determine file directory
current_dir = os.path.split(record_key)[0]
# remove empty parent folders
try:
while current_dir:
folder_id, parent_id = self._get_id(current_dir)
count = 0
for id, name, mimetype in self._list_directory(folder_id):
count += 1
break
if count:
self.drive.delete(fileId=folder_id).execute()
current_dir = os.path.split(current_dir)[0]
else:
break
except:
raise DriveConnectionError(title)
# return exit message
exit_msg = '%s has been deleted.' % record_key
return exit_msg | a method to delete a file
:param record_key: string with name of file
:return: string reporting outcome | Below is the the instruction that describes the task:
### Input:
a method to delete a file
:param record_key: string with name of file
:return: string reporting outcome
### Response:
def delete(self, record_key):
''' a method to delete a file
:param record_key: string with name of file
:return: string reporting outcome
'''
title = '%s.delete' % self.__class__.__name__
# validate inputs
input_fields = {
'record_key': record_key
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# validate existence of file
file_id, parent_id = self._get_id(record_key)
if not file_id:
exit_msg = '%s does not exist.' % record_key
return exit_msg
# remove file
try:
self.drive.delete(fileId=file_id).execute()
except:
raise DriveConnectionError(title)
# determine file directory
current_dir = os.path.split(record_key)[0]
# remove empty parent folders
try:
while current_dir:
folder_id, parent_id = self._get_id(current_dir)
count = 0
for id, name, mimetype in self._list_directory(folder_id):
count += 1
break
if count:
self.drive.delete(fileId=folder_id).execute()
current_dir = os.path.split(current_dir)[0]
else:
break
except:
raise DriveConnectionError(title)
# return exit message
exit_msg = '%s has been deleted.' % record_key
return exit_msg |
def generate2(func, args_gen, kw_gen=None, ntasks=None, ordered=True,
force_serial=False, use_pool=False, chunksize=None, nprocs=None,
progkw={}, nTasks=None, verbose=None):
r"""
Interfaces to either multiprocessing or futures.
Esentially maps ``args_gen`` onto ``func`` using pool.imap.
However, args_gen must be a tuple of args that will be unpacked and send to
the function. Thus, the function can take multiple args. Also specifing
keyword args is supported.
Useful for embarrassingly parallel loops. Currently does not work with
opencv3
CommandLine:
python -m utool.util_parallel generate2
Args:
func (function): live python function
args_gen (?):
kw_gen (None): (default = None)
ntasks (None): (default = None)
ordered (bool): (default = True)
force_serial (bool): (default = False)
verbose (bool): verbosity flag(default = None)
CommandLine:
python -m utool.util_parallel generate2
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_parallel import * # NOQA
>>> from utool.util_parallel import _kw_wrap_worker # NOQA
>>> import utool as ut
>>> args_gen = list(zip(range(10000)))
>>> kw_gen = [{}] * len(args_gen)
>>> func = ut.is_prime
>>> _ = list(generate2(func, args_gen))
>>> _ = list(generate2(func, args_gen, ordered=False))
>>> _ = list(generate2(func, args_gen, force_serial=True))
>>> _ = list(generate2(func, args_gen, use_pool=True))
>>> _ = list(generate2(func, args_gen, ordered=False, verbose=False))
Example0:
>>> # ENABLE_DOCTEST
>>> import utool as ut
>>> #num = 8700 # parallel is slower for smaller numbers
>>> num = 500 # parallel has an initial (~.1 second startup overhead)
>>> print('TESTING SERIAL')
>>> func = ut.is_prime
>>> args_list = list(range(0, num))
>>> flag_generator0 = ut.generate2(ut.is_prime, zip(range(0, num)), force_serial=True)
>>> flag_list0 = list(flag_generator0)
>>> print('TESTING PARALLEL')
>>> flag_generator1 = ut.generate2(ut.is_prime, zip(range(0, num)))
>>> flag_list1 = list(flag_generator1)
>>> print('ASSERTING')
>>> assert len(flag_list1) == num
>>> assert flag_list0 == flag_list1
Example1:
>>> # ENABLE_DOCTEST
>>> # Trying to recreate the freeze seen in IBEIS
>>> import utool as ut
>>> print('TESTING SERIAL')
>>> flag_generator0 = ut.generate2(ut.is_prime, zip(range(0, 1)))
>>> flag_list0 = list(flag_generator0)
>>> flag_generator1 = ut.generate2(ut.fibonacci_recursive, zip(range(0, 1)))
>>> flag_list1 = list(flag_generator1)
>>> print('TESTING PARALLEL')
>>> flag_generator2 = ut.generate2(ut.is_prime, zip(range(0, 12)))
>>> flag_list2 = list(flag_generator2)
>>> flag_generator3 = ut.generate2(ut.fibonacci_recursive, zip(range(0, 12)))
>>> flag_list3 = list(flag_generator3)
>>> print('flag_list0 = %r' % (flag_list0,))
>>> print('flag_list1 = %r' % (flag_list1,))
>>> print('flag_list2 = %r' % (flag_list1,))
>>> print('flag_list3 = %r' % (flag_list1,))
Example2:
>>> # DISABLE_DOCTEST
>>> # UNSTABLE_DOCTEST
>>> # Trying to recreate the freeze seen in IBEIS
>>> import vtool as vt
>>> #def gen_chip(tup):
>>> # import vtool as vt
>>> # cfpath, gfpath, bbox, theta, new_size, filter_list = tup
>>> # chipBGR = vt.compute_chip(gfpath, bbox, theta, new_size, filter_list)
>>> # height, width = chipBGR.shape[0:2]
>>> # vt.imwrite(cfpath, chipBGR)
>>> # return cfpath, width, height
>>> import utool as ut
>>> from ibeis.algo.preproc.preproc_chip import gen_chip
>>> #from ibeis.algo.preproc.preproc_feat import gen_feat_worker
>>> key_list = ['grace.jpg', 'easy1.png', 'ada2.jpg', 'easy3.png',
>>> 'hard3.png', 'zebra.png', 'patsy.jpg', 'ada.jpg',
>>> 'carl.jpg', 'lena.png', 'easy2.png']
>>> img_fpath_list = [ut.grab_test_imgpath(key) for key in key_list]
>>> arg_list1 = [(ut.augpath(img_fpath, '_testgen'), img_fpath, (0, 0, 100, 100), 0.0, (545, 372), []) for img_fpath in img_fpath_list[0:1]]
>>> arg_list2 = [(ut.augpath(img_fpath, '_testgen'), img_fpath, (0, 0, 100, 100), 0.0, (545, 372), []) for img_fpath in img_fpath_list]
>>> #arg_list3 = [(count, fpath, {}) for count, fpath in enumerate(ut.get_list_column(arg_list1, 0))]
>>> #arg_list4 = [(count, fpath, {}) for count, fpath in enumerate(ut.get_list_column(arg_list2, 0))]
>>> ut.remove_file_list(ut.get_list_column(arg_list2, 0))
>>> chips1 = [x for x in ut.generate2(gen_chip, arg_list1)]
>>> chips2 = [y for y in ut.generate2(gen_chip, arg_list2, force_serial=True)]
>>> #feats3 = [z for z in ut.generate2(gen_feat_worker, arg_list3)]
>>> #feats4 = [w for w in ut.generate2(gen_feat_worker, arg_list4)]
Example3:
>>> # DISABLE_DOCTEST
>>> # FAILING_DOCTEST
>>> # Trying to recreate the freeze seen in IBEIS
>>> # Extremely weird case: freezes only if dsize > (313, 313) AND __testwarp was called beforehand.
>>> # otherwise the parallel loop works fine. Could be an opencv 3.0.0-dev issue.
>>> import vtool as vt
>>> import utool as ut
>>> from ibeis.algo.preproc.preproc_chip import gen_chip
>>> import cv2
>>> from utool.util_parallel import __testwarp
>>> key_list = ['grace.jpg', 'easy1.png', 'ada2.jpg', 'easy3.png',
>>> 'hard3.png', 'zebra.png', 'patsy.jpg', 'ada.jpg',
>>> 'carl.jpg', 'lena.png', 'easy2.png']
>>> img_fpath_list = [ut.grab_test_imgpath(key) for key in key_list]
>>> arg_list1 = [(vt.imread(fpath),) for fpath in img_fpath_list[0:1]]
>>> arg_list2 = [(vt.imread(fpath),) for fpath in img_fpath_list]
>>> #new1 = [x for x in ut.generate2(__testwarp, arg_list1)]
>>> new1 = __testwarp(arg_list1[0])
>>> new2 = [y for y in ut.generate2(__testwarp, arg_list2, force_serial=False)]
>>> #print('new2 = %r' % (new2,))
#Example4:
# >>> # Freakin weird. When IBEIS Runs generate it doesn't close the processes
# >>> # UNSTABLE_DOCTEST
# >>> # python -m utool.util_parallel --test-generate:4
# >>> # Trying to see if we can recreate the problem where there are
# >>> # defunct python processes
# >>> import utool as ut
# >>> #num = 8700 # parallel is slower for smaller numbers
# >>> num = 70000 # parallel has an initial (~.1 second startup overhead)
# >>> print('TESTING PARALLEL')
# >>> flag_generator1 = list(ut.generate2(ut.is_prime, range(0, num)))
# >>> flag_generator1 = list(ut.generate2(ut.is_prime, range(0, num)))
# >>> import time
# >>> time.sleep(10)
"""
if verbose is None:
verbose = 2
if ntasks is None:
ntasks = nTasks
if ntasks is None:
try:
ntasks = len(args_gen)
except TypeError:
# Cast to a list
args_gen = list(args_gen)
ntasks = len(args_gen)
if ntasks == 1 or ntasks < __MIN_PARALLEL_TASKS__:
force_serial = True
if __FORCE_SERIAL__:
force_serial = __FORCE_SERIAL__
if ntasks == 0:
if verbose:
print('[ut.generate2] submitted 0 tasks')
raise StopIteration
if nprocs is None:
nprocs = min(ntasks, get_default_numprocs())
if nprocs == 1:
force_serial = True
if kw_gen is None:
kw_gen = [{}] * ntasks
if isinstance(kw_gen, dict):
# kw_gen can be a single dict applied to everything
kw_gen = [kw_gen] * ntasks
if force_serial:
for result in _generate_serial2(func, args_gen, kw_gen,
ntasks=ntasks, progkw=progkw,
verbose=verbose):
yield result
else:
if verbose:
gentype = 'mp' if use_pool else 'futures'
fmtstr = '[generate2] executing {} {} tasks using {} {} procs'
print(fmtstr.format(ntasks, get_funcname(func), nprocs, gentype))
if verbose > 1:
lbl = '(pargen) %s: ' % (get_funcname(func),)
progkw_ = dict(freq=None, bs=True, adjust=False, freq_est='absolute')
progkw_.update(progkw)
# print('progkw_.update = {!r}'.format(progkw_.update))
progpart = util_progress.ProgPartial(length=ntasks, lbl=lbl, **progkw_)
if use_pool:
# Use multiprocessing
if chunksize is None:
chunksize = max(min(4, ntasks), min(8, ntasks // (nprocs ** 2)))
try:
pool = multiprocessing.Pool(nprocs)
if ordered:
pmap_func = pool.imap
else:
pmap_func = pool.imap_unordered
wrapped_arg_gen = zip([func] * len(args_gen), args_gen, kw_gen)
res_gen = pmap_func(_kw_wrap_worker, wrapped_arg_gen,
chunksize)
if verbose > 1:
res_gen = progpart(res_gen)
for res in res_gen:
yield res
finally:
pool.close()
pool.join()
else:
# Use futures
executor = futures.ProcessPoolExecutor(nprocs)
try:
fs_list = [executor.submit(func, *a, **k)
for a, k in zip(args_gen, kw_gen)]
fs_gen = fs_list
if not ordered:
fs_gen = futures.as_completed(fs_gen)
if verbose > 1:
fs_gen = progpart(fs_gen)
for fs in fs_gen:
yield fs.result()
finally:
executor.shutdown(wait=True) | r"""
Interfaces to either multiprocessing or futures.
Esentially maps ``args_gen`` onto ``func`` using pool.imap.
However, args_gen must be a tuple of args that will be unpacked and send to
the function. Thus, the function can take multiple args. Also specifing
keyword args is supported.
Useful for embarrassingly parallel loops. Currently does not work with
opencv3
CommandLine:
python -m utool.util_parallel generate2
Args:
func (function): live python function
args_gen (?):
kw_gen (None): (default = None)
ntasks (None): (default = None)
ordered (bool): (default = True)
force_serial (bool): (default = False)
verbose (bool): verbosity flag(default = None)
CommandLine:
python -m utool.util_parallel generate2
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_parallel import * # NOQA
>>> from utool.util_parallel import _kw_wrap_worker # NOQA
>>> import utool as ut
>>> args_gen = list(zip(range(10000)))
>>> kw_gen = [{}] * len(args_gen)
>>> func = ut.is_prime
>>> _ = list(generate2(func, args_gen))
>>> _ = list(generate2(func, args_gen, ordered=False))
>>> _ = list(generate2(func, args_gen, force_serial=True))
>>> _ = list(generate2(func, args_gen, use_pool=True))
>>> _ = list(generate2(func, args_gen, ordered=False, verbose=False))
Example0:
>>> # ENABLE_DOCTEST
>>> import utool as ut
>>> #num = 8700 # parallel is slower for smaller numbers
>>> num = 500 # parallel has an initial (~.1 second startup overhead)
>>> print('TESTING SERIAL')
>>> func = ut.is_prime
>>> args_list = list(range(0, num))
>>> flag_generator0 = ut.generate2(ut.is_prime, zip(range(0, num)), force_serial=True)
>>> flag_list0 = list(flag_generator0)
>>> print('TESTING PARALLEL')
>>> flag_generator1 = ut.generate2(ut.is_prime, zip(range(0, num)))
>>> flag_list1 = list(flag_generator1)
>>> print('ASSERTING')
>>> assert len(flag_list1) == num
>>> assert flag_list0 == flag_list1
Example1:
>>> # ENABLE_DOCTEST
>>> # Trying to recreate the freeze seen in IBEIS
>>> import utool as ut
>>> print('TESTING SERIAL')
>>> flag_generator0 = ut.generate2(ut.is_prime, zip(range(0, 1)))
>>> flag_list0 = list(flag_generator0)
>>> flag_generator1 = ut.generate2(ut.fibonacci_recursive, zip(range(0, 1)))
>>> flag_list1 = list(flag_generator1)
>>> print('TESTING PARALLEL')
>>> flag_generator2 = ut.generate2(ut.is_prime, zip(range(0, 12)))
>>> flag_list2 = list(flag_generator2)
>>> flag_generator3 = ut.generate2(ut.fibonacci_recursive, zip(range(0, 12)))
>>> flag_list3 = list(flag_generator3)
>>> print('flag_list0 = %r' % (flag_list0,))
>>> print('flag_list1 = %r' % (flag_list1,))
>>> print('flag_list2 = %r' % (flag_list1,))
>>> print('flag_list3 = %r' % (flag_list1,))
Example2:
>>> # DISABLE_DOCTEST
>>> # UNSTABLE_DOCTEST
>>> # Trying to recreate the freeze seen in IBEIS
>>> import vtool as vt
>>> #def gen_chip(tup):
>>> # import vtool as vt
>>> # cfpath, gfpath, bbox, theta, new_size, filter_list = tup
>>> # chipBGR = vt.compute_chip(gfpath, bbox, theta, new_size, filter_list)
>>> # height, width = chipBGR.shape[0:2]
>>> # vt.imwrite(cfpath, chipBGR)
>>> # return cfpath, width, height
>>> import utool as ut
>>> from ibeis.algo.preproc.preproc_chip import gen_chip
>>> #from ibeis.algo.preproc.preproc_feat import gen_feat_worker
>>> key_list = ['grace.jpg', 'easy1.png', 'ada2.jpg', 'easy3.png',
>>> 'hard3.png', 'zebra.png', 'patsy.jpg', 'ada.jpg',
>>> 'carl.jpg', 'lena.png', 'easy2.png']
>>> img_fpath_list = [ut.grab_test_imgpath(key) for key in key_list]
>>> arg_list1 = [(ut.augpath(img_fpath, '_testgen'), img_fpath, (0, 0, 100, 100), 0.0, (545, 372), []) for img_fpath in img_fpath_list[0:1]]
>>> arg_list2 = [(ut.augpath(img_fpath, '_testgen'), img_fpath, (0, 0, 100, 100), 0.0, (545, 372), []) for img_fpath in img_fpath_list]
>>> #arg_list3 = [(count, fpath, {}) for count, fpath in enumerate(ut.get_list_column(arg_list1, 0))]
>>> #arg_list4 = [(count, fpath, {}) for count, fpath in enumerate(ut.get_list_column(arg_list2, 0))]
>>> ut.remove_file_list(ut.get_list_column(arg_list2, 0))
>>> chips1 = [x for x in ut.generate2(gen_chip, arg_list1)]
>>> chips2 = [y for y in ut.generate2(gen_chip, arg_list2, force_serial=True)]
>>> #feats3 = [z for z in ut.generate2(gen_feat_worker, arg_list3)]
>>> #feats4 = [w for w in ut.generate2(gen_feat_worker, arg_list4)]
Example3:
>>> # DISABLE_DOCTEST
>>> # FAILING_DOCTEST
>>> # Trying to recreate the freeze seen in IBEIS
>>> # Extremely weird case: freezes only if dsize > (313, 313) AND __testwarp was called beforehand.
>>> # otherwise the parallel loop works fine. Could be an opencv 3.0.0-dev issue.
>>> import vtool as vt
>>> import utool as ut
>>> from ibeis.algo.preproc.preproc_chip import gen_chip
>>> import cv2
>>> from utool.util_parallel import __testwarp
>>> key_list = ['grace.jpg', 'easy1.png', 'ada2.jpg', 'easy3.png',
>>> 'hard3.png', 'zebra.png', 'patsy.jpg', 'ada.jpg',
>>> 'carl.jpg', 'lena.png', 'easy2.png']
>>> img_fpath_list = [ut.grab_test_imgpath(key) for key in key_list]
>>> arg_list1 = [(vt.imread(fpath),) for fpath in img_fpath_list[0:1]]
>>> arg_list2 = [(vt.imread(fpath),) for fpath in img_fpath_list]
>>> #new1 = [x for x in ut.generate2(__testwarp, arg_list1)]
>>> new1 = __testwarp(arg_list1[0])
>>> new2 = [y for y in ut.generate2(__testwarp, arg_list2, force_serial=False)]
>>> #print('new2 = %r' % (new2,))
#Example4:
# >>> # Freakin weird. When IBEIS Runs generate it doesn't close the processes
# >>> # UNSTABLE_DOCTEST
# >>> # python -m utool.util_parallel --test-generate:4
# >>> # Trying to see if we can recreate the problem where there are
# >>> # defunct python processes
# >>> import utool as ut
# >>> #num = 8700 # parallel is slower for smaller numbers
# >>> num = 70000 # parallel has an initial (~.1 second startup overhead)
# >>> print('TESTING PARALLEL')
# >>> flag_generator1 = list(ut.generate2(ut.is_prime, range(0, num)))
# >>> flag_generator1 = list(ut.generate2(ut.is_prime, range(0, num)))
# >>> import time
# >>> time.sleep(10) | Below is the the instruction that describes the task:
### Input:
r"""
Interfaces to either multiprocessing or futures.
Esentially maps ``args_gen`` onto ``func`` using pool.imap.
However, args_gen must be a tuple of args that will be unpacked and send to
the function. Thus, the function can take multiple args. Also specifing
keyword args is supported.
Useful for embarrassingly parallel loops. Currently does not work with
opencv3
CommandLine:
python -m utool.util_parallel generate2
Args:
func (function): live python function
args_gen (?):
kw_gen (None): (default = None)
ntasks (None): (default = None)
ordered (bool): (default = True)
force_serial (bool): (default = False)
verbose (bool): verbosity flag(default = None)
CommandLine:
python -m utool.util_parallel generate2
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_parallel import * # NOQA
>>> from utool.util_parallel import _kw_wrap_worker # NOQA
>>> import utool as ut
>>> args_gen = list(zip(range(10000)))
>>> kw_gen = [{}] * len(args_gen)
>>> func = ut.is_prime
>>> _ = list(generate2(func, args_gen))
>>> _ = list(generate2(func, args_gen, ordered=False))
>>> _ = list(generate2(func, args_gen, force_serial=True))
>>> _ = list(generate2(func, args_gen, use_pool=True))
>>> _ = list(generate2(func, args_gen, ordered=False, verbose=False))
Example0:
>>> # ENABLE_DOCTEST
>>> import utool as ut
>>> #num = 8700 # parallel is slower for smaller numbers
>>> num = 500 # parallel has an initial (~.1 second startup overhead)
>>> print('TESTING SERIAL')
>>> func = ut.is_prime
>>> args_list = list(range(0, num))
>>> flag_generator0 = ut.generate2(ut.is_prime, zip(range(0, num)), force_serial=True)
>>> flag_list0 = list(flag_generator0)
>>> print('TESTING PARALLEL')
>>> flag_generator1 = ut.generate2(ut.is_prime, zip(range(0, num)))
>>> flag_list1 = list(flag_generator1)
>>> print('ASSERTING')
>>> assert len(flag_list1) == num
>>> assert flag_list0 == flag_list1
Example1:
>>> # ENABLE_DOCTEST
>>> # Trying to recreate the freeze seen in IBEIS
>>> import utool as ut
>>> print('TESTING SERIAL')
>>> flag_generator0 = ut.generate2(ut.is_prime, zip(range(0, 1)))
>>> flag_list0 = list(flag_generator0)
>>> flag_generator1 = ut.generate2(ut.fibonacci_recursive, zip(range(0, 1)))
>>> flag_list1 = list(flag_generator1)
>>> print('TESTING PARALLEL')
>>> flag_generator2 = ut.generate2(ut.is_prime, zip(range(0, 12)))
>>> flag_list2 = list(flag_generator2)
>>> flag_generator3 = ut.generate2(ut.fibonacci_recursive, zip(range(0, 12)))
>>> flag_list3 = list(flag_generator3)
>>> print('flag_list0 = %r' % (flag_list0,))
>>> print('flag_list1 = %r' % (flag_list1,))
>>> print('flag_list2 = %r' % (flag_list1,))
>>> print('flag_list3 = %r' % (flag_list1,))
Example2:
>>> # DISABLE_DOCTEST
>>> # UNSTABLE_DOCTEST
>>> # Trying to recreate the freeze seen in IBEIS
>>> import vtool as vt
>>> #def gen_chip(tup):
>>> # import vtool as vt
>>> # cfpath, gfpath, bbox, theta, new_size, filter_list = tup
>>> # chipBGR = vt.compute_chip(gfpath, bbox, theta, new_size, filter_list)
>>> # height, width = chipBGR.shape[0:2]
>>> # vt.imwrite(cfpath, chipBGR)
>>> # return cfpath, width, height
>>> import utool as ut
>>> from ibeis.algo.preproc.preproc_chip import gen_chip
>>> #from ibeis.algo.preproc.preproc_feat import gen_feat_worker
>>> key_list = ['grace.jpg', 'easy1.png', 'ada2.jpg', 'easy3.png',
>>> 'hard3.png', 'zebra.png', 'patsy.jpg', 'ada.jpg',
>>> 'carl.jpg', 'lena.png', 'easy2.png']
>>> img_fpath_list = [ut.grab_test_imgpath(key) for key in key_list]
>>> arg_list1 = [(ut.augpath(img_fpath, '_testgen'), img_fpath, (0, 0, 100, 100), 0.0, (545, 372), []) for img_fpath in img_fpath_list[0:1]]
>>> arg_list2 = [(ut.augpath(img_fpath, '_testgen'), img_fpath, (0, 0, 100, 100), 0.0, (545, 372), []) for img_fpath in img_fpath_list]
>>> #arg_list3 = [(count, fpath, {}) for count, fpath in enumerate(ut.get_list_column(arg_list1, 0))]
>>> #arg_list4 = [(count, fpath, {}) for count, fpath in enumerate(ut.get_list_column(arg_list2, 0))]
>>> ut.remove_file_list(ut.get_list_column(arg_list2, 0))
>>> chips1 = [x for x in ut.generate2(gen_chip, arg_list1)]
>>> chips2 = [y for y in ut.generate2(gen_chip, arg_list2, force_serial=True)]
>>> #feats3 = [z for z in ut.generate2(gen_feat_worker, arg_list3)]
>>> #feats4 = [w for w in ut.generate2(gen_feat_worker, arg_list4)]
Example3:
>>> # DISABLE_DOCTEST
>>> # FAILING_DOCTEST
>>> # Trying to recreate the freeze seen in IBEIS
>>> # Extremely weird case: freezes only if dsize > (313, 313) AND __testwarp was called beforehand.
>>> # otherwise the parallel loop works fine. Could be an opencv 3.0.0-dev issue.
>>> import vtool as vt
>>> import utool as ut
>>> from ibeis.algo.preproc.preproc_chip import gen_chip
>>> import cv2
>>> from utool.util_parallel import __testwarp
>>> key_list = ['grace.jpg', 'easy1.png', 'ada2.jpg', 'easy3.png',
>>> 'hard3.png', 'zebra.png', 'patsy.jpg', 'ada.jpg',
>>> 'carl.jpg', 'lena.png', 'easy2.png']
>>> img_fpath_list = [ut.grab_test_imgpath(key) for key in key_list]
>>> arg_list1 = [(vt.imread(fpath),) for fpath in img_fpath_list[0:1]]
>>> arg_list2 = [(vt.imread(fpath),) for fpath in img_fpath_list]
>>> #new1 = [x for x in ut.generate2(__testwarp, arg_list1)]
>>> new1 = __testwarp(arg_list1[0])
>>> new2 = [y for y in ut.generate2(__testwarp, arg_list2, force_serial=False)]
>>> #print('new2 = %r' % (new2,))
#Example4:
# >>> # Freakin weird. When IBEIS Runs generate it doesn't close the processes
# >>> # UNSTABLE_DOCTEST
# >>> # python -m utool.util_parallel --test-generate:4
# >>> # Trying to see if we can recreate the problem where there are
# >>> # defunct python processes
# >>> import utool as ut
# >>> #num = 8700 # parallel is slower for smaller numbers
# >>> num = 70000 # parallel has an initial (~.1 second startup overhead)
# >>> print('TESTING PARALLEL')
# >>> flag_generator1 = list(ut.generate2(ut.is_prime, range(0, num)))
# >>> flag_generator1 = list(ut.generate2(ut.is_prime, range(0, num)))
# >>> import time
# >>> time.sleep(10)
### Response:
def generate2(func, args_gen, kw_gen=None, ntasks=None, ordered=True,
force_serial=False, use_pool=False, chunksize=None, nprocs=None,
progkw={}, nTasks=None, verbose=None):
r"""
Interfaces to either multiprocessing or futures.
Esentially maps ``args_gen`` onto ``func`` using pool.imap.
However, args_gen must be a tuple of args that will be unpacked and send to
the function. Thus, the function can take multiple args. Also specifing
keyword args is supported.
Useful for embarrassingly parallel loops. Currently does not work with
opencv3
CommandLine:
python -m utool.util_parallel generate2
Args:
func (function): live python function
args_gen (?):
kw_gen (None): (default = None)
ntasks (None): (default = None)
ordered (bool): (default = True)
force_serial (bool): (default = False)
verbose (bool): verbosity flag(default = None)
CommandLine:
python -m utool.util_parallel generate2
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_parallel import * # NOQA
>>> from utool.util_parallel import _kw_wrap_worker # NOQA
>>> import utool as ut
>>> args_gen = list(zip(range(10000)))
>>> kw_gen = [{}] * len(args_gen)
>>> func = ut.is_prime
>>> _ = list(generate2(func, args_gen))
>>> _ = list(generate2(func, args_gen, ordered=False))
>>> _ = list(generate2(func, args_gen, force_serial=True))
>>> _ = list(generate2(func, args_gen, use_pool=True))
>>> _ = list(generate2(func, args_gen, ordered=False, verbose=False))
Example0:
>>> # ENABLE_DOCTEST
>>> import utool as ut
>>> #num = 8700 # parallel is slower for smaller numbers
>>> num = 500 # parallel has an initial (~.1 second startup overhead)
>>> print('TESTING SERIAL')
>>> func = ut.is_prime
>>> args_list = list(range(0, num))
>>> flag_generator0 = ut.generate2(ut.is_prime, zip(range(0, num)), force_serial=True)
>>> flag_list0 = list(flag_generator0)
>>> print('TESTING PARALLEL')
>>> flag_generator1 = ut.generate2(ut.is_prime, zip(range(0, num)))
>>> flag_list1 = list(flag_generator1)
>>> print('ASSERTING')
>>> assert len(flag_list1) == num
>>> assert flag_list0 == flag_list1
Example1:
>>> # ENABLE_DOCTEST
>>> # Trying to recreate the freeze seen in IBEIS
>>> import utool as ut
>>> print('TESTING SERIAL')
>>> flag_generator0 = ut.generate2(ut.is_prime, zip(range(0, 1)))
>>> flag_list0 = list(flag_generator0)
>>> flag_generator1 = ut.generate2(ut.fibonacci_recursive, zip(range(0, 1)))
>>> flag_list1 = list(flag_generator1)
>>> print('TESTING PARALLEL')
>>> flag_generator2 = ut.generate2(ut.is_prime, zip(range(0, 12)))
>>> flag_list2 = list(flag_generator2)
>>> flag_generator3 = ut.generate2(ut.fibonacci_recursive, zip(range(0, 12)))
>>> flag_list3 = list(flag_generator3)
>>> print('flag_list0 = %r' % (flag_list0,))
>>> print('flag_list1 = %r' % (flag_list1,))
>>> print('flag_list2 = %r' % (flag_list1,))
>>> print('flag_list3 = %r' % (flag_list1,))
Example2:
>>> # DISABLE_DOCTEST
>>> # UNSTABLE_DOCTEST
>>> # Trying to recreate the freeze seen in IBEIS
>>> import vtool as vt
>>> #def gen_chip(tup):
>>> # import vtool as vt
>>> # cfpath, gfpath, bbox, theta, new_size, filter_list = tup
>>> # chipBGR = vt.compute_chip(gfpath, bbox, theta, new_size, filter_list)
>>> # height, width = chipBGR.shape[0:2]
>>> # vt.imwrite(cfpath, chipBGR)
>>> # return cfpath, width, height
>>> import utool as ut
>>> from ibeis.algo.preproc.preproc_chip import gen_chip
>>> #from ibeis.algo.preproc.preproc_feat import gen_feat_worker
>>> key_list = ['grace.jpg', 'easy1.png', 'ada2.jpg', 'easy3.png',
>>> 'hard3.png', 'zebra.png', 'patsy.jpg', 'ada.jpg',
>>> 'carl.jpg', 'lena.png', 'easy2.png']
>>> img_fpath_list = [ut.grab_test_imgpath(key) for key in key_list]
>>> arg_list1 = [(ut.augpath(img_fpath, '_testgen'), img_fpath, (0, 0, 100, 100), 0.0, (545, 372), []) for img_fpath in img_fpath_list[0:1]]
>>> arg_list2 = [(ut.augpath(img_fpath, '_testgen'), img_fpath, (0, 0, 100, 100), 0.0, (545, 372), []) for img_fpath in img_fpath_list]
>>> #arg_list3 = [(count, fpath, {}) for count, fpath in enumerate(ut.get_list_column(arg_list1, 0))]
>>> #arg_list4 = [(count, fpath, {}) for count, fpath in enumerate(ut.get_list_column(arg_list2, 0))]
>>> ut.remove_file_list(ut.get_list_column(arg_list2, 0))
>>> chips1 = [x for x in ut.generate2(gen_chip, arg_list1)]
>>> chips2 = [y for y in ut.generate2(gen_chip, arg_list2, force_serial=True)]
>>> #feats3 = [z for z in ut.generate2(gen_feat_worker, arg_list3)]
>>> #feats4 = [w for w in ut.generate2(gen_feat_worker, arg_list4)]
Example3:
>>> # DISABLE_DOCTEST
>>> # FAILING_DOCTEST
>>> # Trying to recreate the freeze seen in IBEIS
>>> # Extremely weird case: freezes only if dsize > (313, 313) AND __testwarp was called beforehand.
>>> # otherwise the parallel loop works fine. Could be an opencv 3.0.0-dev issue.
>>> import vtool as vt
>>> import utool as ut
>>> from ibeis.algo.preproc.preproc_chip import gen_chip
>>> import cv2
>>> from utool.util_parallel import __testwarp
>>> key_list = ['grace.jpg', 'easy1.png', 'ada2.jpg', 'easy3.png',
>>> 'hard3.png', 'zebra.png', 'patsy.jpg', 'ada.jpg',
>>> 'carl.jpg', 'lena.png', 'easy2.png']
>>> img_fpath_list = [ut.grab_test_imgpath(key) for key in key_list]
>>> arg_list1 = [(vt.imread(fpath),) for fpath in img_fpath_list[0:1]]
>>> arg_list2 = [(vt.imread(fpath),) for fpath in img_fpath_list]
>>> #new1 = [x for x in ut.generate2(__testwarp, arg_list1)]
>>> new1 = __testwarp(arg_list1[0])
>>> new2 = [y for y in ut.generate2(__testwarp, arg_list2, force_serial=False)]
>>> #print('new2 = %r' % (new2,))
#Example4:
# >>> # Freakin weird. When IBEIS Runs generate it doesn't close the processes
# >>> # UNSTABLE_DOCTEST
# >>> # python -m utool.util_parallel --test-generate:4
# >>> # Trying to see if we can recreate the problem where there are
# >>> # defunct python processes
# >>> import utool as ut
# >>> #num = 8700 # parallel is slower for smaller numbers
# >>> num = 70000 # parallel has an initial (~.1 second startup overhead)
# >>> print('TESTING PARALLEL')
# >>> flag_generator1 = list(ut.generate2(ut.is_prime, range(0, num)))
# >>> flag_generator1 = list(ut.generate2(ut.is_prime, range(0, num)))
# >>> import time
# >>> time.sleep(10)
"""
if verbose is None:
verbose = 2
if ntasks is None:
ntasks = nTasks
if ntasks is None:
try:
ntasks = len(args_gen)
except TypeError:
# Cast to a list
args_gen = list(args_gen)
ntasks = len(args_gen)
if ntasks == 1 or ntasks < __MIN_PARALLEL_TASKS__:
force_serial = True
if __FORCE_SERIAL__:
force_serial = __FORCE_SERIAL__
if ntasks == 0:
if verbose:
print('[ut.generate2] submitted 0 tasks')
raise StopIteration
if nprocs is None:
nprocs = min(ntasks, get_default_numprocs())
if nprocs == 1:
force_serial = True
if kw_gen is None:
kw_gen = [{}] * ntasks
if isinstance(kw_gen, dict):
# kw_gen can be a single dict applied to everything
kw_gen = [kw_gen] * ntasks
if force_serial:
for result in _generate_serial2(func, args_gen, kw_gen,
ntasks=ntasks, progkw=progkw,
verbose=verbose):
yield result
else:
if verbose:
gentype = 'mp' if use_pool else 'futures'
fmtstr = '[generate2] executing {} {} tasks using {} {} procs'
print(fmtstr.format(ntasks, get_funcname(func), nprocs, gentype))
if verbose > 1:
lbl = '(pargen) %s: ' % (get_funcname(func),)
progkw_ = dict(freq=None, bs=True, adjust=False, freq_est='absolute')
progkw_.update(progkw)
# print('progkw_.update = {!r}'.format(progkw_.update))
progpart = util_progress.ProgPartial(length=ntasks, lbl=lbl, **progkw_)
if use_pool:
# Use multiprocessing
if chunksize is None:
chunksize = max(min(4, ntasks), min(8, ntasks // (nprocs ** 2)))
try:
pool = multiprocessing.Pool(nprocs)
if ordered:
pmap_func = pool.imap
else:
pmap_func = pool.imap_unordered
wrapped_arg_gen = zip([func] * len(args_gen), args_gen, kw_gen)
res_gen = pmap_func(_kw_wrap_worker, wrapped_arg_gen,
chunksize)
if verbose > 1:
res_gen = progpart(res_gen)
for res in res_gen:
yield res
finally:
pool.close()
pool.join()
else:
# Use futures
executor = futures.ProcessPoolExecutor(nprocs)
try:
fs_list = [executor.submit(func, *a, **k)
for a, k in zip(args_gen, kw_gen)]
fs_gen = fs_list
if not ordered:
fs_gen = futures.as_completed(fs_gen)
if verbose > 1:
fs_gen = progpart(fs_gen)
for fs in fs_gen:
yield fs.result()
finally:
executor.shutdown(wait=True) |
def render(self, context, instance, placeholder):
"""
Update the context with plugin's data
"""
context = super(CMSRandomEntriesPlugin, self).render(
context, instance, placeholder)
context['template_to_render'] = (str(instance.template_to_render) or
'zinnia/tags/entries_random.html')
return context | Update the context with plugin's data | Below is the the instruction that describes the task:
### Input:
Update the context with plugin's data
### Response:
def render(self, context, instance, placeholder):
"""
Update the context with plugin's data
"""
context = super(CMSRandomEntriesPlugin, self).render(
context, instance, placeholder)
context['template_to_render'] = (str(instance.template_to_render) or
'zinnia/tags/entries_random.html')
return context |
def _dissociate(self, c, context, hm, pool_id):
"""Remove a pool association"""
pool_name = self._pool_name(context, pool_id)
c.client.slb.service_group.update(pool_name, health_monitor="",
health_check_disable=True) | Remove a pool association | Below is the the instruction that describes the task:
### Input:
Remove a pool association
### Response:
def _dissociate(self, c, context, hm, pool_id):
"""Remove a pool association"""
pool_name = self._pool_name(context, pool_id)
c.client.slb.service_group.update(pool_name, health_monitor="",
health_check_disable=True) |
def _mom(self, keys, left, right, cache):
"""
Statistical moments.
Example:
>>> print(numpy.around(chaospy.Uniform().mom([0, 1, 2, 3]), 4))
[1. 0.5 0.3333 0.25 ]
>>> print(numpy.around(chaospy.Add(chaospy.Uniform(), 2).mom([0, 1, 2, 3]), 4))
[ 1. 2.5 6.3333 16.25 ]
>>> print(numpy.around(chaospy.Add(2, chaospy.Uniform()).mom([0, 1, 2, 3]), 4))
[ 1. 2.5 6.3333 16.25 ]
>>> print(numpy.around(chaospy.Add(1, 1).mom([0, 1, 2, 3]), 4))
[1. 2. 4. 8.]
"""
if evaluation.get_dependencies(left, right):
raise evaluation.DependencyError(
"sum of dependent distributions not feasible: "
"{} and {}".format(left, right)
)
keys_ = numpy.mgrid[tuple(slice(0, key+1, 1) for key in keys)]
keys_ = keys_.reshape(len(self), -1)
if isinstance(left, Dist):
left = [
evaluation.evaluate_moment(left, key, cache=cache)
for key in keys_.T
]
else:
left = list(reversed(numpy.array(left).T**keys_.T))
if isinstance(right, Dist):
right = [
evaluation.evaluate_moment(right, key, cache=cache)
for key in keys_.T
]
else:
right = list(reversed(numpy.array(right).T**keys_.T))
out = numpy.zeros(keys.shape)
for idx in range(keys_.shape[1]):
key = keys_.T[idx]
coef = comb(keys.T, key)
out += coef*left[idx]*right[idx]*(key <= keys.T)
if len(self) > 1:
out = numpy.prod(out, 1)
return out | Statistical moments.
Example:
>>> print(numpy.around(chaospy.Uniform().mom([0, 1, 2, 3]), 4))
[1. 0.5 0.3333 0.25 ]
>>> print(numpy.around(chaospy.Add(chaospy.Uniform(), 2).mom([0, 1, 2, 3]), 4))
[ 1. 2.5 6.3333 16.25 ]
>>> print(numpy.around(chaospy.Add(2, chaospy.Uniform()).mom([0, 1, 2, 3]), 4))
[ 1. 2.5 6.3333 16.25 ]
>>> print(numpy.around(chaospy.Add(1, 1).mom([0, 1, 2, 3]), 4))
[1. 2. 4. 8.] | Below is the the instruction that describes the task:
### Input:
Statistical moments.
Example:
>>> print(numpy.around(chaospy.Uniform().mom([0, 1, 2, 3]), 4))
[1. 0.5 0.3333 0.25 ]
>>> print(numpy.around(chaospy.Add(chaospy.Uniform(), 2).mom([0, 1, 2, 3]), 4))
[ 1. 2.5 6.3333 16.25 ]
>>> print(numpy.around(chaospy.Add(2, chaospy.Uniform()).mom([0, 1, 2, 3]), 4))
[ 1. 2.5 6.3333 16.25 ]
>>> print(numpy.around(chaospy.Add(1, 1).mom([0, 1, 2, 3]), 4))
[1. 2. 4. 8.]
### Response:
def _mom(self, keys, left, right, cache):
"""
Statistical moments.
Example:
>>> print(numpy.around(chaospy.Uniform().mom([0, 1, 2, 3]), 4))
[1. 0.5 0.3333 0.25 ]
>>> print(numpy.around(chaospy.Add(chaospy.Uniform(), 2).mom([0, 1, 2, 3]), 4))
[ 1. 2.5 6.3333 16.25 ]
>>> print(numpy.around(chaospy.Add(2, chaospy.Uniform()).mom([0, 1, 2, 3]), 4))
[ 1. 2.5 6.3333 16.25 ]
>>> print(numpy.around(chaospy.Add(1, 1).mom([0, 1, 2, 3]), 4))
[1. 2. 4. 8.]
"""
if evaluation.get_dependencies(left, right):
raise evaluation.DependencyError(
"sum of dependent distributions not feasible: "
"{} and {}".format(left, right)
)
keys_ = numpy.mgrid[tuple(slice(0, key+1, 1) for key in keys)]
keys_ = keys_.reshape(len(self), -1)
if isinstance(left, Dist):
left = [
evaluation.evaluate_moment(left, key, cache=cache)
for key in keys_.T
]
else:
left = list(reversed(numpy.array(left).T**keys_.T))
if isinstance(right, Dist):
right = [
evaluation.evaluate_moment(right, key, cache=cache)
for key in keys_.T
]
else:
right = list(reversed(numpy.array(right).T**keys_.T))
out = numpy.zeros(keys.shape)
for idx in range(keys_.shape[1]):
key = keys_.T[idx]
coef = comb(keys.T, key)
out += coef*left[idx]*right[idx]*(key <= keys.T)
if len(self) > 1:
out = numpy.prod(out, 1)
return out |
def download_file(file_id, file_name):
'''Download a file from UPLOAD_FOLDER'''
extracted_out_dir = os.path.join(app.config['UPLOAD_FOLDER'], file_id)
return send_file(os.path.join(extracted_out_dir, file_name)) | Download a file from UPLOAD_FOLDER | Below is the the instruction that describes the task:
### Input:
Download a file from UPLOAD_FOLDER
### Response:
def download_file(file_id, file_name):
'''Download a file from UPLOAD_FOLDER'''
extracted_out_dir = os.path.join(app.config['UPLOAD_FOLDER'], file_id)
return send_file(os.path.join(extracted_out_dir, file_name)) |
def errors(source, model, wcshelper):
"""
Convert pixel based errors into sky coord errors
Parameters
----------
source : :class:`AegeanTools.models.SimpleSource`
The source which was fit.
model : lmfit.Parameters
The model which was fit.
wcshelper : :class:`AegeanTools.wcs_helpers.WCSHelper`
WCS information.
Returns
-------
source : :class:`AegeanTools.models.SimpleSource`
The modified source obejct.
"""
# if the source wasn't fit then all errors are -1
if source.flags & (flags.NOTFIT | flags.FITERR):
source.err_peak_flux = source.err_a = source.err_b = source.err_pa = ERR_MASK
source.err_ra = source.err_dec = source.err_int_flux = ERR_MASK
return source
# copy the errors from the model
prefix = "c{0}_".format(source.source)
err_amp = model[prefix + 'amp'].stderr
xo, yo = model[prefix + 'xo'].value, model[prefix + 'yo'].value
err_xo = model[prefix + 'xo'].stderr
err_yo = model[prefix + 'yo'].stderr
sx, sy = model[prefix + 'sx'].value, model[prefix + 'sy'].value
err_sx = model[prefix + 'sx'].stderr
err_sy = model[prefix + 'sy'].stderr
theta = model[prefix + 'theta'].value
err_theta = model[prefix + 'theta'].stderr
source.err_peak_flux = err_amp
pix_errs = [err_xo, err_yo, err_sx, err_sy, err_theta]
log.debug("Pix errs: {0}".format(pix_errs))
ref = wcshelper.pix2sky([xo, yo])
# check to see if the reference position has a valid WCS coordinate
# It is possible for this to fail, even if the ra/dec conversion works elsewhere
if not all(np.isfinite(ref)):
source.flags |= flags.WCSERR
source.err_peak_flux = source.err_a = source.err_b = source.err_pa = ERR_MASK
source.err_ra = source.err_dec = source.err_int_flux = ERR_MASK
return source
# position errors
if model[prefix + 'xo'].vary and model[prefix + 'yo'].vary \
and all(np.isfinite([err_xo, err_yo])):
offset = wcshelper.pix2sky([xo + err_xo, yo + err_yo])
source.err_ra = gcd(ref[0], ref[1], offset[0], ref[1])
source.err_dec = gcd(ref[0], ref[1], ref[0], offset[1])
else:
source.err_ra = source.err_dec = -1
if model[prefix + 'theta'].vary and np.isfinite(err_theta):
# pa error
off1 = wcshelper.pix2sky([xo + sx * np.cos(np.radians(theta)), yo + sy * np.sin(np.radians(theta))])
off2 = wcshelper.pix2sky(
[xo + sx * np.cos(np.radians(theta + err_theta)), yo + sy * np.sin(np.radians(theta + err_theta))])
source.err_pa = abs(bear(ref[0], ref[1], off1[0], off1[1]) - bear(ref[0], ref[1], off2[0], off2[1]))
else:
source.err_pa = ERR_MASK
if model[prefix + 'sx'].vary and model[prefix + 'sy'].vary \
and all(np.isfinite([err_sx, err_sy])):
# major axis error
ref = wcshelper.pix2sky([xo + sx * np.cos(np.radians(theta)), yo + sy * np.sin(np.radians(theta))])
offset = wcshelper.pix2sky(
[xo + (sx + err_sx) * np.cos(np.radians(theta)), yo + sy * np.sin(np.radians(theta))])
source.err_a = gcd(ref[0], ref[1], offset[0], offset[1]) * 3600
# minor axis error
ref = wcshelper.pix2sky([xo + sx * np.cos(np.radians(theta + 90)), yo + sy * np.sin(np.radians(theta + 90))])
offset = wcshelper.pix2sky(
[xo + sx * np.cos(np.radians(theta + 90)), yo + (sy + err_sy) * np.sin(np.radians(theta + 90))])
source.err_b = gcd(ref[0], ref[1], offset[0], offset[1]) * 3600
else:
source.err_a = source.err_b = ERR_MASK
sqerr = 0
sqerr += (source.err_peak_flux / source.peak_flux) ** 2 if source.err_peak_flux > 0 else 0
sqerr += (source.err_a / source.a) ** 2 if source.err_a > 0 else 0
sqerr += (source.err_b / source.b) ** 2 if source.err_b > 0 else 0
if sqerr == 0:
source.err_int_flux = ERR_MASK
else:
source.err_int_flux = abs(source.int_flux * np.sqrt(sqerr))
return source | Convert pixel based errors into sky coord errors
Parameters
----------
source : :class:`AegeanTools.models.SimpleSource`
The source which was fit.
model : lmfit.Parameters
The model which was fit.
wcshelper : :class:`AegeanTools.wcs_helpers.WCSHelper`
WCS information.
Returns
-------
source : :class:`AegeanTools.models.SimpleSource`
The modified source obejct. | Below is the the instruction that describes the task:
### Input:
Convert pixel based errors into sky coord errors
Parameters
----------
source : :class:`AegeanTools.models.SimpleSource`
The source which was fit.
model : lmfit.Parameters
The model which was fit.
wcshelper : :class:`AegeanTools.wcs_helpers.WCSHelper`
WCS information.
Returns
-------
source : :class:`AegeanTools.models.SimpleSource`
The modified source obejct.
### Response:
def errors(source, model, wcshelper):
"""
Convert pixel based errors into sky coord errors
Parameters
----------
source : :class:`AegeanTools.models.SimpleSource`
The source which was fit.
model : lmfit.Parameters
The model which was fit.
wcshelper : :class:`AegeanTools.wcs_helpers.WCSHelper`
WCS information.
Returns
-------
source : :class:`AegeanTools.models.SimpleSource`
The modified source obejct.
"""
# if the source wasn't fit then all errors are -1
if source.flags & (flags.NOTFIT | flags.FITERR):
source.err_peak_flux = source.err_a = source.err_b = source.err_pa = ERR_MASK
source.err_ra = source.err_dec = source.err_int_flux = ERR_MASK
return source
# copy the errors from the model
prefix = "c{0}_".format(source.source)
err_amp = model[prefix + 'amp'].stderr
xo, yo = model[prefix + 'xo'].value, model[prefix + 'yo'].value
err_xo = model[prefix + 'xo'].stderr
err_yo = model[prefix + 'yo'].stderr
sx, sy = model[prefix + 'sx'].value, model[prefix + 'sy'].value
err_sx = model[prefix + 'sx'].stderr
err_sy = model[prefix + 'sy'].stderr
theta = model[prefix + 'theta'].value
err_theta = model[prefix + 'theta'].stderr
source.err_peak_flux = err_amp
pix_errs = [err_xo, err_yo, err_sx, err_sy, err_theta]
log.debug("Pix errs: {0}".format(pix_errs))
ref = wcshelper.pix2sky([xo, yo])
# check to see if the reference position has a valid WCS coordinate
# It is possible for this to fail, even if the ra/dec conversion works elsewhere
if not all(np.isfinite(ref)):
source.flags |= flags.WCSERR
source.err_peak_flux = source.err_a = source.err_b = source.err_pa = ERR_MASK
source.err_ra = source.err_dec = source.err_int_flux = ERR_MASK
return source
# position errors
if model[prefix + 'xo'].vary and model[prefix + 'yo'].vary \
and all(np.isfinite([err_xo, err_yo])):
offset = wcshelper.pix2sky([xo + err_xo, yo + err_yo])
source.err_ra = gcd(ref[0], ref[1], offset[0], ref[1])
source.err_dec = gcd(ref[0], ref[1], ref[0], offset[1])
else:
source.err_ra = source.err_dec = -1
if model[prefix + 'theta'].vary and np.isfinite(err_theta):
# pa error
off1 = wcshelper.pix2sky([xo + sx * np.cos(np.radians(theta)), yo + sy * np.sin(np.radians(theta))])
off2 = wcshelper.pix2sky(
[xo + sx * np.cos(np.radians(theta + err_theta)), yo + sy * np.sin(np.radians(theta + err_theta))])
source.err_pa = abs(bear(ref[0], ref[1], off1[0], off1[1]) - bear(ref[0], ref[1], off2[0], off2[1]))
else:
source.err_pa = ERR_MASK
if model[prefix + 'sx'].vary and model[prefix + 'sy'].vary \
and all(np.isfinite([err_sx, err_sy])):
# major axis error
ref = wcshelper.pix2sky([xo + sx * np.cos(np.radians(theta)), yo + sy * np.sin(np.radians(theta))])
offset = wcshelper.pix2sky(
[xo + (sx + err_sx) * np.cos(np.radians(theta)), yo + sy * np.sin(np.radians(theta))])
source.err_a = gcd(ref[0], ref[1], offset[0], offset[1]) * 3600
# minor axis error
ref = wcshelper.pix2sky([xo + sx * np.cos(np.radians(theta + 90)), yo + sy * np.sin(np.radians(theta + 90))])
offset = wcshelper.pix2sky(
[xo + sx * np.cos(np.radians(theta + 90)), yo + (sy + err_sy) * np.sin(np.radians(theta + 90))])
source.err_b = gcd(ref[0], ref[1], offset[0], offset[1]) * 3600
else:
source.err_a = source.err_b = ERR_MASK
sqerr = 0
sqerr += (source.err_peak_flux / source.peak_flux) ** 2 if source.err_peak_flux > 0 else 0
sqerr += (source.err_a / source.a) ** 2 if source.err_a > 0 else 0
sqerr += (source.err_b / source.b) ** 2 if source.err_b > 0 else 0
if sqerr == 0:
source.err_int_flux = ERR_MASK
else:
source.err_int_flux = abs(source.int_flux * np.sqrt(sqerr))
return source |
def get_by_name(self, name, namespace=None):
"""
name: can be either <namespace>/<dataset_name> or just <dataset_name>
namespace: if specified, will skip name parsing, defaults to current user's username
"""
if not namespace:
namespace, name = get_namespace_from_name(name)
if not namespace:
namespace = AuthConfigManager.get_access_token().username
try:
response = self.request('GET', '%s/%s/%s' % (self.url, namespace, name))
return Dataset.from_dict(response.json())
except NotFoundException:
return None | name: can be either <namespace>/<dataset_name> or just <dataset_name>
namespace: if specified, will skip name parsing, defaults to current user's username | Below is the the instruction that describes the task:
### Input:
name: can be either <namespace>/<dataset_name> or just <dataset_name>
namespace: if specified, will skip name parsing, defaults to current user's username
### Response:
def get_by_name(self, name, namespace=None):
"""
name: can be either <namespace>/<dataset_name> or just <dataset_name>
namespace: if specified, will skip name parsing, defaults to current user's username
"""
if not namespace:
namespace, name = get_namespace_from_name(name)
if not namespace:
namespace = AuthConfigManager.get_access_token().username
try:
response = self.request('GET', '%s/%s/%s' % (self.url, namespace, name))
return Dataset.from_dict(response.json())
except NotFoundException:
return None |
def load(self, filename):
"""
Load AEAD from a file.
@param filename: File to read AEAD from
@type filename: string
"""
aead_f = open(filename, "rb")
buf = aead_f.read(1024)
if buf.startswith(YHSM_AEAD_CRLF_File_Marker):
buf = YHSM_AEAD_File_Marker + buf[len(YHSM_AEAD_CRLF_File_Marker):]
if buf.startswith(YHSM_AEAD_File_Marker):
if buf[len(YHSM_AEAD_File_Marker)] == chr(1):
# version 1 format
fmt = "< I %is" % (pyhsm.defines.YSM_AEAD_NONCE_SIZE)
self.key_handle, self.nonce = struct.unpack_from(fmt, buf, len(YHSM_AEAD_File_Marker) + 1)
self.data = buf[len(YHSM_AEAD_File_Marker) + 1 + struct.calcsize(fmt):]
else:
raise pyhsm.exception.YHSM_Error('Unknown AEAD file format')
else:
# version 0 format, just AEAD data
self.data = buf[:pyhsm.defines.YSM_MAX_KEY_SIZE + pyhsm.defines.YSM_BLOCK_SIZE]
aead_f.close() | Load AEAD from a file.
@param filename: File to read AEAD from
@type filename: string | Below is the the instruction that describes the task:
### Input:
Load AEAD from a file.
@param filename: File to read AEAD from
@type filename: string
### Response:
def load(self, filename):
"""
Load AEAD from a file.
@param filename: File to read AEAD from
@type filename: string
"""
aead_f = open(filename, "rb")
buf = aead_f.read(1024)
if buf.startswith(YHSM_AEAD_CRLF_File_Marker):
buf = YHSM_AEAD_File_Marker + buf[len(YHSM_AEAD_CRLF_File_Marker):]
if buf.startswith(YHSM_AEAD_File_Marker):
if buf[len(YHSM_AEAD_File_Marker)] == chr(1):
# version 1 format
fmt = "< I %is" % (pyhsm.defines.YSM_AEAD_NONCE_SIZE)
self.key_handle, self.nonce = struct.unpack_from(fmt, buf, len(YHSM_AEAD_File_Marker) + 1)
self.data = buf[len(YHSM_AEAD_File_Marker) + 1 + struct.calcsize(fmt):]
else:
raise pyhsm.exception.YHSM_Error('Unknown AEAD file format')
else:
# version 0 format, just AEAD data
self.data = buf[:pyhsm.defines.YSM_MAX_KEY_SIZE + pyhsm.defines.YSM_BLOCK_SIZE]
aead_f.close() |
def diff(self, other=Index, paths=None, create_patch=False, **kwargs):
"""Creates diffs between two items being trees, trees and index or an
index and the working tree. It will detect renames automatically.
:param other:
Is the item to compare us with.
If None, we will be compared to the working tree.
If Treeish, it will be compared against the respective tree
If Index ( type ), it will be compared against the index.
If git.NULL_TREE, it will compare against the empty tree.
It defaults to Index to assure the method will not by-default fail
on bare repositories.
:param paths:
is a list of paths or a single path to limit the diff to.
It will only include at least one of the given path or paths.
:param create_patch:
If True, the returned Diff contains a detailed patch that if applied
makes the self to other. Patches are somewhat costly as blobs have to be read
and diffed.
:param kwargs:
Additional arguments passed to git-diff, such as
R=True to swap both sides of the diff.
:return: git.DiffIndex
:note:
On a bare repository, 'other' needs to be provided as Index or as
as Tree/Commit, or a git command error will occur"""
args = []
args.append("--abbrev=40") # we need full shas
args.append("--full-index") # get full index paths, not only filenames
args.append("-M") # check for renames, in both formats
if create_patch:
args.append("-p")
else:
args.append("--raw")
# in any way, assure we don't see colored output,
# fixes https://github.com/gitpython-developers/GitPython/issues/172
args.append('--no-color')
if paths is not None and not isinstance(paths, (tuple, list)):
paths = [paths]
diff_cmd = self.repo.git.diff
if other is self.Index:
args.insert(0, '--cached')
elif other is NULL_TREE:
args.insert(0, '-r') # recursive diff-tree
args.insert(0, '--root')
diff_cmd = self.repo.git.diff_tree
elif other is not None:
args.insert(0, '-r') # recursive diff-tree
args.insert(0, other)
diff_cmd = self.repo.git.diff_tree
args.insert(0, self)
# paths is list here or None
if paths:
args.append("--")
args.extend(paths)
# END paths handling
kwargs['as_process'] = True
proc = diff_cmd(*self._process_diff_args(args), **kwargs)
diff_method = (Diff._index_from_patch_format
if create_patch
else Diff._index_from_raw_format)
index = diff_method(self.repo, proc)
proc.wait()
return index | Creates diffs between two items being trees, trees and index or an
index and the working tree. It will detect renames automatically.
:param other:
Is the item to compare us with.
If None, we will be compared to the working tree.
If Treeish, it will be compared against the respective tree
If Index ( type ), it will be compared against the index.
If git.NULL_TREE, it will compare against the empty tree.
It defaults to Index to assure the method will not by-default fail
on bare repositories.
:param paths:
is a list of paths or a single path to limit the diff to.
It will only include at least one of the given path or paths.
:param create_patch:
If True, the returned Diff contains a detailed patch that if applied
makes the self to other. Patches are somewhat costly as blobs have to be read
and diffed.
:param kwargs:
Additional arguments passed to git-diff, such as
R=True to swap both sides of the diff.
:return: git.DiffIndex
:note:
On a bare repository, 'other' needs to be provided as Index or as
as Tree/Commit, or a git command error will occur | Below is the the instruction that describes the task:
### Input:
Creates diffs between two items being trees, trees and index or an
index and the working tree. It will detect renames automatically.
:param other:
Is the item to compare us with.
If None, we will be compared to the working tree.
If Treeish, it will be compared against the respective tree
If Index ( type ), it will be compared against the index.
If git.NULL_TREE, it will compare against the empty tree.
It defaults to Index to assure the method will not by-default fail
on bare repositories.
:param paths:
is a list of paths or a single path to limit the diff to.
It will only include at least one of the given path or paths.
:param create_patch:
If True, the returned Diff contains a detailed patch that if applied
makes the self to other. Patches are somewhat costly as blobs have to be read
and diffed.
:param kwargs:
Additional arguments passed to git-diff, such as
R=True to swap both sides of the diff.
:return: git.DiffIndex
:note:
On a bare repository, 'other' needs to be provided as Index or as
as Tree/Commit, or a git command error will occur
### Response:
def diff(self, other=Index, paths=None, create_patch=False, **kwargs):
"""Creates diffs between two items being trees, trees and index or an
index and the working tree. It will detect renames automatically.
:param other:
Is the item to compare us with.
If None, we will be compared to the working tree.
If Treeish, it will be compared against the respective tree
If Index ( type ), it will be compared against the index.
If git.NULL_TREE, it will compare against the empty tree.
It defaults to Index to assure the method will not by-default fail
on bare repositories.
:param paths:
is a list of paths or a single path to limit the diff to.
It will only include at least one of the given path or paths.
:param create_patch:
If True, the returned Diff contains a detailed patch that if applied
makes the self to other. Patches are somewhat costly as blobs have to be read
and diffed.
:param kwargs:
Additional arguments passed to git-diff, such as
R=True to swap both sides of the diff.
:return: git.DiffIndex
:note:
On a bare repository, 'other' needs to be provided as Index or as
as Tree/Commit, or a git command error will occur"""
args = []
args.append("--abbrev=40") # we need full shas
args.append("--full-index") # get full index paths, not only filenames
args.append("-M") # check for renames, in both formats
if create_patch:
args.append("-p")
else:
args.append("--raw")
# in any way, assure we don't see colored output,
# fixes https://github.com/gitpython-developers/GitPython/issues/172
args.append('--no-color')
if paths is not None and not isinstance(paths, (tuple, list)):
paths = [paths]
diff_cmd = self.repo.git.diff
if other is self.Index:
args.insert(0, '--cached')
elif other is NULL_TREE:
args.insert(0, '-r') # recursive diff-tree
args.insert(0, '--root')
diff_cmd = self.repo.git.diff_tree
elif other is not None:
args.insert(0, '-r') # recursive diff-tree
args.insert(0, other)
diff_cmd = self.repo.git.diff_tree
args.insert(0, self)
# paths is list here or None
if paths:
args.append("--")
args.extend(paths)
# END paths handling
kwargs['as_process'] = True
proc = diff_cmd(*self._process_diff_args(args), **kwargs)
diff_method = (Diff._index_from_patch_format
if create_patch
else Diff._index_from_raw_format)
index = diff_method(self.repo, proc)
proc.wait()
return index |
def stringify_dict_contents(dct):
"""Turn dict keys and values into native strings."""
return {
str_if_nested_or_str(k): str_if_nested_or_str(v)
for k, v in dct.items()
} | Turn dict keys and values into native strings. | Below is the the instruction that describes the task:
### Input:
Turn dict keys and values into native strings.
### Response:
def stringify_dict_contents(dct):
"""Turn dict keys and values into native strings."""
return {
str_if_nested_or_str(k): str_if_nested_or_str(v)
for k, v in dct.items()
} |
def timestamp_any(value):
"""Convert timestamp string to time in seconds since epoch.
Most timestamps strings are supported in fact this wraps the
dateutil.parser.parse() method. This is SLOW use the other timestamp_*()
functions if possible.
Args:
value: A timestamp string.
Returns:
The time in seconds since epoch as an integer.
"""
return int(calendar.timegm(dateutil.parser.parse(value).utctimetuple())) | Convert timestamp string to time in seconds since epoch.
Most timestamps strings are supported in fact this wraps the
dateutil.parser.parse() method. This is SLOW use the other timestamp_*()
functions if possible.
Args:
value: A timestamp string.
Returns:
The time in seconds since epoch as an integer. | Below is the the instruction that describes the task:
### Input:
Convert timestamp string to time in seconds since epoch.
Most timestamps strings are supported in fact this wraps the
dateutil.parser.parse() method. This is SLOW use the other timestamp_*()
functions if possible.
Args:
value: A timestamp string.
Returns:
The time in seconds since epoch as an integer.
### Response:
def timestamp_any(value):
"""Convert timestamp string to time in seconds since epoch.
Most timestamps strings are supported in fact this wraps the
dateutil.parser.parse() method. This is SLOW use the other timestamp_*()
functions if possible.
Args:
value: A timestamp string.
Returns:
The time in seconds since epoch as an integer.
"""
return int(calendar.timegm(dateutil.parser.parse(value).utctimetuple())) |
def get_certifi_file():
"""Get the SSL certifications installed by the certifi package.
@return: the filename to the cert file
@rtype: string
@raises: ImportError when certifi is not installed or ValueError when
the file is not found
"""
import certifi
filename = certifi.where()
if os.path.isfile(filename):
return filename
msg = "%s not found; check your certifi installation" % filename
raise ValueError(msg) | Get the SSL certifications installed by the certifi package.
@return: the filename to the cert file
@rtype: string
@raises: ImportError when certifi is not installed or ValueError when
the file is not found | Below is the the instruction that describes the task:
### Input:
Get the SSL certifications installed by the certifi package.
@return: the filename to the cert file
@rtype: string
@raises: ImportError when certifi is not installed or ValueError when
the file is not found
### Response:
def get_certifi_file():
"""Get the SSL certifications installed by the certifi package.
@return: the filename to the cert file
@rtype: string
@raises: ImportError when certifi is not installed or ValueError when
the file is not found
"""
import certifi
filename = certifi.where()
if os.path.isfile(filename):
return filename
msg = "%s not found; check your certifi installation" % filename
raise ValueError(msg) |
def _filenames_to_modulenames(filenames: Iterable[str], modulename_prefix: str, filename_prefix: str = '') -> Iterable[str]:
'''Convert given filenames to module names.
Any filename that does not have a corresponding module name will be dropped
from the result (i.e. __init__.py).
Parameters
----------
:``filename_prefix``: a prefix to drop from all filenames (typically a
common directory); defaults to ''
:``filenames``: the filenames to transform into module names
:``modulename_prefix``: a prefix to add to all module names
Return Value(s)
---------------
A list of modulenames corresponding to all filenames (for legal module names).
'''
modulenames = [] # type: Iterable[str]
for filename in filenames:
if not filename.endswith('.py'):
continue
name = filename
name = name.replace(filename_prefix, '')
name = name.replace('__init__.py', '')
name = name.replace('.py', '')
name = name.replace('/', '.')
name = name.strip('.')
if not len(name):
continue
if not modulename_prefix.endswith('.'):
modulename_prefix += '.'
name = modulename_prefix + name
known_symbols = set()
name = '.'.join([ _ for _ in name.split('.') if _ not in known_symbols and not known_symbols.add(_) ])
if len(name):
modulenames.append(name)
return modulenames | Convert given filenames to module names.
Any filename that does not have a corresponding module name will be dropped
from the result (i.e. __init__.py).
Parameters
----------
:``filename_prefix``: a prefix to drop from all filenames (typically a
common directory); defaults to ''
:``filenames``: the filenames to transform into module names
:``modulename_prefix``: a prefix to add to all module names
Return Value(s)
---------------
A list of modulenames corresponding to all filenames (for legal module names). | Below is the the instruction that describes the task:
### Input:
Convert given filenames to module names.
Any filename that does not have a corresponding module name will be dropped
from the result (i.e. __init__.py).
Parameters
----------
:``filename_prefix``: a prefix to drop from all filenames (typically a
common directory); defaults to ''
:``filenames``: the filenames to transform into module names
:``modulename_prefix``: a prefix to add to all module names
Return Value(s)
---------------
A list of modulenames corresponding to all filenames (for legal module names).
### Response:
def _filenames_to_modulenames(filenames: Iterable[str], modulename_prefix: str, filename_prefix: str = '') -> Iterable[str]:
'''Convert given filenames to module names.
Any filename that does not have a corresponding module name will be dropped
from the result (i.e. __init__.py).
Parameters
----------
:``filename_prefix``: a prefix to drop from all filenames (typically a
common directory); defaults to ''
:``filenames``: the filenames to transform into module names
:``modulename_prefix``: a prefix to add to all module names
Return Value(s)
---------------
A list of modulenames corresponding to all filenames (for legal module names).
'''
modulenames = [] # type: Iterable[str]
for filename in filenames:
if not filename.endswith('.py'):
continue
name = filename
name = name.replace(filename_prefix, '')
name = name.replace('__init__.py', '')
name = name.replace('.py', '')
name = name.replace('/', '.')
name = name.strip('.')
if not len(name):
continue
if not modulename_prefix.endswith('.'):
modulename_prefix += '.'
name = modulename_prefix + name
known_symbols = set()
name = '.'.join([ _ for _ in name.split('.') if _ not in known_symbols and not known_symbols.add(_) ])
if len(name):
modulenames.append(name)
return modulenames |
def get_all_agile_boards(self, board_name=None, project_key=None, board_type=None, start=0, limit=50):
"""
Returns all boards. This only includes boards that the user has permission to view.
:param board_name:
:param project_key:
:param board_type:
:param start:
:param limit:
:return:
"""
url = 'rest/agile/1.0/board'
params = {}
if board_name:
params['name'] = board_name
if project_key:
params['projectKeyOrId'] = project_key
if board_type:
params['type'] = board_type
if start:
params['startAt'] = int(start)
if limit:
params['maxResults'] = int(limit)
return self.get(url, params=params) | Returns all boards. This only includes boards that the user has permission to view.
:param board_name:
:param project_key:
:param board_type:
:param start:
:param limit:
:return: | Below is the the instruction that describes the task:
### Input:
Returns all boards. This only includes boards that the user has permission to view.
:param board_name:
:param project_key:
:param board_type:
:param start:
:param limit:
:return:
### Response:
def get_all_agile_boards(self, board_name=None, project_key=None, board_type=None, start=0, limit=50):
"""
Returns all boards. This only includes boards that the user has permission to view.
:param board_name:
:param project_key:
:param board_type:
:param start:
:param limit:
:return:
"""
url = 'rest/agile/1.0/board'
params = {}
if board_name:
params['name'] = board_name
if project_key:
params['projectKeyOrId'] = project_key
if board_type:
params['type'] = board_type
if start:
params['startAt'] = int(start)
if limit:
params['maxResults'] = int(limit)
return self.get(url, params=params) |
def get_keywords(self):
"""Obtain the state of the dialog as a keywords dict.
:returns: Keywords reflecting the state of the dialog.
:rtype: dict
"""
keywords = {}
inasafe_fields = {}
keywords['layer_geometry'] = self.get_layer_geometry_key()
if self.step_kw_purpose.selected_purpose():
keywords['layer_purpose'] = self.step_kw_purpose.\
selected_purpose()['key']
if self.step_kw_subcategory.selected_subcategory():
key = self.step_kw_purpose.selected_purpose()['key']
keywords[key] = self.step_kw_subcategory.\
selected_subcategory()['key']
if self.get_layer_geometry_key() == layer_geometry_raster['key']:
if self.step_kw_band_selector.selected_band():
keywords['active_band'] = self.step_kw_band_selector.\
selected_band()
if keywords['layer_purpose'] == layer_purpose_hazard['key']:
if self.step_kw_hazard_category.selected_hazard_category():
keywords['hazard_category'] \
= self.step_kw_hazard_category.\
selected_hazard_category()['key']
if self.step_kw_layermode.selected_layermode():
keywords['layer_mode'] = self.step_kw_layermode.\
selected_layermode()['key']
if self.step_kw_unit.selected_unit():
if self.step_kw_purpose.selected_purpose() == layer_purpose_hazard:
key = continuous_hazard_unit['key']
else:
key = exposure_unit['key']
keywords[key] = self.step_kw_unit.selected_unit()['key']
if self.step_kw_field.selected_fields():
field_key = self.field_keyword_for_the_layer()
inasafe_fields[field_key] = self.step_kw_field.selected_fields()
if self.step_kw_classification.selected_classification():
keywords['classification'] = self.step_kw_classification.\
selected_classification()['key']
if keywords['layer_purpose'] == layer_purpose_hazard['key']:
multi_classifications = self.step_kw_multi_classifications.\
get_current_state()
value_maps = multi_classifications.get('value_maps')
if value_maps is not None:
keywords['value_maps'] = value_maps
thresholds = multi_classifications.get('thresholds')
if thresholds is not None:
keywords['thresholds'] = thresholds
else:
if self.step_kw_layermode.selected_layermode():
layer_mode = self.step_kw_layermode.selected_layermode()
if layer_mode == layer_mode_continuous:
thresholds = self.step_kw_threshold.get_threshold()
if thresholds:
keywords['thresholds'] = thresholds
elif layer_mode == layer_mode_classified:
value_map = self.step_kw_classify.selected_mapping()
if value_map:
keywords['value_map'] = value_map
if self.step_kw_source.leSource.text():
keywords['source'] = self.step_kw_source.leSource.text()
if self.step_kw_source.leSource_url.text():
keywords['url'] = self.step_kw_source.leSource_url.text()
if self.step_kw_source.leSource_scale.text():
keywords['scale'] = self.step_kw_source.leSource_scale.text()
if self.step_kw_source.ckbSource_date.isChecked():
keywords['date'] = self.step_kw_source.dtSource_date.dateTime()
if self.step_kw_source.leSource_license.text():
keywords['license'] = self.step_kw_source.leSource_license.text()
if self.step_kw_title.leTitle.text():
keywords['title'] = self.step_kw_title.leTitle.text()
inasafe_fields.update(self.step_kw_inasafe_fields.get_inasafe_fields())
inasafe_fields.update(
self.step_kw_default_inasafe_fields.get_inasafe_fields())
inasafe_fields.update(
self.step_kw_fields_mapping.get_field_mapping()['fields'])
if inasafe_fields:
keywords['inasafe_fields'] = inasafe_fields
inasafe_default_values = {}
if keywords['layer_geometry'] == layer_geometry_raster['key']:
pass
# Notes(IS): Skipped assigning raster inasafe default value for
# now.
# inasafe_default_values = self.\
# step_kw_inasafe_raster_default_values.\
# get_inasafe_default_values()
else:
inasafe_default_values.update(
self.step_kw_default_inasafe_fields.get_inasafe_default_values(
))
inasafe_default_values.update(
self.step_kw_fields_mapping.get_field_mapping()['values'])
if inasafe_default_values:
keywords['inasafe_default_values'] = inasafe_default_values
if self.step_kw_subcategory.selected_subcategory():
subcategory = self.step_kw_subcategory.selected_subcategory()
if subcategory.get('extra_keywords'):
extra_keywords = self.step_kw_extra_keywords.\
get_extra_keywords()
if extra_keywords:
keywords['extra_keywords'] = extra_keywords
return keywords | Obtain the state of the dialog as a keywords dict.
:returns: Keywords reflecting the state of the dialog.
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Obtain the state of the dialog as a keywords dict.
:returns: Keywords reflecting the state of the dialog.
:rtype: dict
### Response:
def get_keywords(self):
"""Obtain the state of the dialog as a keywords dict.
:returns: Keywords reflecting the state of the dialog.
:rtype: dict
"""
keywords = {}
inasafe_fields = {}
keywords['layer_geometry'] = self.get_layer_geometry_key()
if self.step_kw_purpose.selected_purpose():
keywords['layer_purpose'] = self.step_kw_purpose.\
selected_purpose()['key']
if self.step_kw_subcategory.selected_subcategory():
key = self.step_kw_purpose.selected_purpose()['key']
keywords[key] = self.step_kw_subcategory.\
selected_subcategory()['key']
if self.get_layer_geometry_key() == layer_geometry_raster['key']:
if self.step_kw_band_selector.selected_band():
keywords['active_band'] = self.step_kw_band_selector.\
selected_band()
if keywords['layer_purpose'] == layer_purpose_hazard['key']:
if self.step_kw_hazard_category.selected_hazard_category():
keywords['hazard_category'] \
= self.step_kw_hazard_category.\
selected_hazard_category()['key']
if self.step_kw_layermode.selected_layermode():
keywords['layer_mode'] = self.step_kw_layermode.\
selected_layermode()['key']
if self.step_kw_unit.selected_unit():
if self.step_kw_purpose.selected_purpose() == layer_purpose_hazard:
key = continuous_hazard_unit['key']
else:
key = exposure_unit['key']
keywords[key] = self.step_kw_unit.selected_unit()['key']
if self.step_kw_field.selected_fields():
field_key = self.field_keyword_for_the_layer()
inasafe_fields[field_key] = self.step_kw_field.selected_fields()
if self.step_kw_classification.selected_classification():
keywords['classification'] = self.step_kw_classification.\
selected_classification()['key']
if keywords['layer_purpose'] == layer_purpose_hazard['key']:
multi_classifications = self.step_kw_multi_classifications.\
get_current_state()
value_maps = multi_classifications.get('value_maps')
if value_maps is not None:
keywords['value_maps'] = value_maps
thresholds = multi_classifications.get('thresholds')
if thresholds is not None:
keywords['thresholds'] = thresholds
else:
if self.step_kw_layermode.selected_layermode():
layer_mode = self.step_kw_layermode.selected_layermode()
if layer_mode == layer_mode_continuous:
thresholds = self.step_kw_threshold.get_threshold()
if thresholds:
keywords['thresholds'] = thresholds
elif layer_mode == layer_mode_classified:
value_map = self.step_kw_classify.selected_mapping()
if value_map:
keywords['value_map'] = value_map
if self.step_kw_source.leSource.text():
keywords['source'] = self.step_kw_source.leSource.text()
if self.step_kw_source.leSource_url.text():
keywords['url'] = self.step_kw_source.leSource_url.text()
if self.step_kw_source.leSource_scale.text():
keywords['scale'] = self.step_kw_source.leSource_scale.text()
if self.step_kw_source.ckbSource_date.isChecked():
keywords['date'] = self.step_kw_source.dtSource_date.dateTime()
if self.step_kw_source.leSource_license.text():
keywords['license'] = self.step_kw_source.leSource_license.text()
if self.step_kw_title.leTitle.text():
keywords['title'] = self.step_kw_title.leTitle.text()
inasafe_fields.update(self.step_kw_inasafe_fields.get_inasafe_fields())
inasafe_fields.update(
self.step_kw_default_inasafe_fields.get_inasafe_fields())
inasafe_fields.update(
self.step_kw_fields_mapping.get_field_mapping()['fields'])
if inasafe_fields:
keywords['inasafe_fields'] = inasafe_fields
inasafe_default_values = {}
if keywords['layer_geometry'] == layer_geometry_raster['key']:
pass
# Notes(IS): Skipped assigning raster inasafe default value for
# now.
# inasafe_default_values = self.\
# step_kw_inasafe_raster_default_values.\
# get_inasafe_default_values()
else:
inasafe_default_values.update(
self.step_kw_default_inasafe_fields.get_inasafe_default_values(
))
inasafe_default_values.update(
self.step_kw_fields_mapping.get_field_mapping()['values'])
if inasafe_default_values:
keywords['inasafe_default_values'] = inasafe_default_values
if self.step_kw_subcategory.selected_subcategory():
subcategory = self.step_kw_subcategory.selected_subcategory()
if subcategory.get('extra_keywords'):
extra_keywords = self.step_kw_extra_keywords.\
get_extra_keywords()
if extra_keywords:
keywords['extra_keywords'] = extra_keywords
return keywords |
def __populate_sections(self):
"""Get a list of the section present in the bfd to populate our
internal list.
"""
if not self._ptr:
raise BfdException("BFD not initialized")
for section in _bfd.get_sections_list(self._ptr):
try:
bfd_section = BfdSection(self._ptr, section)
self._sections[bfd_section.name] = bfd_section
except BfdSectionException, err:
#print "Exception during section pasing : %s" % err
pass | Get a list of the section present in the bfd to populate our
internal list. | Below is the the instruction that describes the task:
### Input:
Get a list of the section present in the bfd to populate our
internal list.
### Response:
def __populate_sections(self):
"""Get a list of the section present in the bfd to populate our
internal list.
"""
if not self._ptr:
raise BfdException("BFD not initialized")
for section in _bfd.get_sections_list(self._ptr):
try:
bfd_section = BfdSection(self._ptr, section)
self._sections[bfd_section.name] = bfd_section
except BfdSectionException, err:
#print "Exception during section pasing : %s" % err
pass |
def smart_search_pool(self, auth, query_str, search_options=None, extra_query=None):
""" Perform a smart search on pool list.
* `auth` [BaseAuth]
AAA options.
* `query_str` [string]
Search string
* `search_options` [options_dict]
Search options. See :func:`search_pool`.
* `extra_query` [dict_to_sql]
Extra search terms, will be AND:ed together with what is
extracted from the query string.
Return a dict with three elements:
* :attr:`interpretation` - How the query string was interpreted.
* :attr:`search_options` - Various search_options.
* :attr:`result` - The search result.
The :attr:`interpretation` is given as a list of dicts, each
explaining how a part of the search key was interpreted (ie. what
pool attribute the search operation was performed on).
The :attr:`result` is a list of dicts containing the search result.
The smart search function tries to convert the query from a text
string to a `query` dict which is passed to the
:func:`search_pool` function. If multiple search keys are
detected, they are combined with a logical AND.
It will basically just take each search term and try to match it
against the name or description column with regex match.
See the :func:`search_pool` function for an explanation of the
`search_options` argument.
This is the documentation of the internal backend function. It's
exposed over XML-RPC, please also see the XML-RPC documentation for
:py:func:`nipap.xmlrpc.NipapXMLRPC.smart_search_pool` for full
understanding.
"""
if search_options is None:
search_options = {}
self._logger.debug("smart_search_pool query string: %s" % query_str)
success, query = self._parse_pool_query(query_str)
if not success:
return {
'interpretation': query,
'search_options': search_options,
'result': [],
'error': True,
'error_message': 'query interpretation failed'
}
if extra_query is not None:
query = {
'operator': 'and',
'val1': query,
'val2': extra_query
}
self._logger.debug("smart_search_pool; query expanded to: %s" % unicode(query))
search_result = self.search_pool(auth, query, search_options)
search_result['interpretation'] = query
search_result['error'] = False
return search_result | Perform a smart search on pool list.
* `auth` [BaseAuth]
AAA options.
* `query_str` [string]
Search string
* `search_options` [options_dict]
Search options. See :func:`search_pool`.
* `extra_query` [dict_to_sql]
Extra search terms, will be AND:ed together with what is
extracted from the query string.
Return a dict with three elements:
* :attr:`interpretation` - How the query string was interpreted.
* :attr:`search_options` - Various search_options.
* :attr:`result` - The search result.
The :attr:`interpretation` is given as a list of dicts, each
explaining how a part of the search key was interpreted (ie. what
pool attribute the search operation was performed on).
The :attr:`result` is a list of dicts containing the search result.
The smart search function tries to convert the query from a text
string to a `query` dict which is passed to the
:func:`search_pool` function. If multiple search keys are
detected, they are combined with a logical AND.
It will basically just take each search term and try to match it
against the name or description column with regex match.
See the :func:`search_pool` function for an explanation of the
`search_options` argument.
This is the documentation of the internal backend function. It's
exposed over XML-RPC, please also see the XML-RPC documentation for
:py:func:`nipap.xmlrpc.NipapXMLRPC.smart_search_pool` for full
understanding. | Below is the the instruction that describes the task:
### Input:
Perform a smart search on pool list.
* `auth` [BaseAuth]
AAA options.
* `query_str` [string]
Search string
* `search_options` [options_dict]
Search options. See :func:`search_pool`.
* `extra_query` [dict_to_sql]
Extra search terms, will be AND:ed together with what is
extracted from the query string.
Return a dict with three elements:
* :attr:`interpretation` - How the query string was interpreted.
* :attr:`search_options` - Various search_options.
* :attr:`result` - The search result.
The :attr:`interpretation` is given as a list of dicts, each
explaining how a part of the search key was interpreted (ie. what
pool attribute the search operation was performed on).
The :attr:`result` is a list of dicts containing the search result.
The smart search function tries to convert the query from a text
string to a `query` dict which is passed to the
:func:`search_pool` function. If multiple search keys are
detected, they are combined with a logical AND.
It will basically just take each search term and try to match it
against the name or description column with regex match.
See the :func:`search_pool` function for an explanation of the
`search_options` argument.
This is the documentation of the internal backend function. It's
exposed over XML-RPC, please also see the XML-RPC documentation for
:py:func:`nipap.xmlrpc.NipapXMLRPC.smart_search_pool` for full
understanding.
### Response:
def smart_search_pool(self, auth, query_str, search_options=None, extra_query=None):
""" Perform a smart search on pool list.
* `auth` [BaseAuth]
AAA options.
* `query_str` [string]
Search string
* `search_options` [options_dict]
Search options. See :func:`search_pool`.
* `extra_query` [dict_to_sql]
Extra search terms, will be AND:ed together with what is
extracted from the query string.
Return a dict with three elements:
* :attr:`interpretation` - How the query string was interpreted.
* :attr:`search_options` - Various search_options.
* :attr:`result` - The search result.
The :attr:`interpretation` is given as a list of dicts, each
explaining how a part of the search key was interpreted (ie. what
pool attribute the search operation was performed on).
The :attr:`result` is a list of dicts containing the search result.
The smart search function tries to convert the query from a text
string to a `query` dict which is passed to the
:func:`search_pool` function. If multiple search keys are
detected, they are combined with a logical AND.
It will basically just take each search term and try to match it
against the name or description column with regex match.
See the :func:`search_pool` function for an explanation of the
`search_options` argument.
This is the documentation of the internal backend function. It's
exposed over XML-RPC, please also see the XML-RPC documentation for
:py:func:`nipap.xmlrpc.NipapXMLRPC.smart_search_pool` for full
understanding.
"""
if search_options is None:
search_options = {}
self._logger.debug("smart_search_pool query string: %s" % query_str)
success, query = self._parse_pool_query(query_str)
if not success:
return {
'interpretation': query,
'search_options': search_options,
'result': [],
'error': True,
'error_message': 'query interpretation failed'
}
if extra_query is not None:
query = {
'operator': 'and',
'val1': query,
'val2': extra_query
}
self._logger.debug("smart_search_pool; query expanded to: %s" % unicode(query))
search_result = self.search_pool(auth, query, search_options)
search_result['interpretation'] = query
search_result['error'] = False
return search_result |
def _closest_date(target_dt, date_list, before_target=None):
"""
This function finds the date in a list closest to the target date.
An optional parameter can be given to get the closest before or after.
:param target_dt: The target date
:type target_dt: datetime.date
:param date_list: The list of dates to search
:type date_list: list[datetime.date]
:param before_target: closest before or after the target
:type before_target: bool or None
:returns: The closest date
:rtype: datetime.date or None
"""
fb = lambda d: target_dt - d if d <= target_dt else datetime.timedelta.max
fa = lambda d: d - target_dt if d >= target_dt else datetime.timedelta.max
fnone = lambda d: target_dt - d if d < target_dt else d - target_dt
if before_target is None:
return min(date_list, key=fnone).date()
if before_target:
return min(date_list, key=fb).date()
else:
return min(date_list, key=fa).date() | This function finds the date in a list closest to the target date.
An optional parameter can be given to get the closest before or after.
:param target_dt: The target date
:type target_dt: datetime.date
:param date_list: The list of dates to search
:type date_list: list[datetime.date]
:param before_target: closest before or after the target
:type before_target: bool or None
:returns: The closest date
:rtype: datetime.date or None | Below is the the instruction that describes the task:
### Input:
This function finds the date in a list closest to the target date.
An optional parameter can be given to get the closest before or after.
:param target_dt: The target date
:type target_dt: datetime.date
:param date_list: The list of dates to search
:type date_list: list[datetime.date]
:param before_target: closest before or after the target
:type before_target: bool or None
:returns: The closest date
:rtype: datetime.date or None
### Response:
def _closest_date(target_dt, date_list, before_target=None):
"""
This function finds the date in a list closest to the target date.
An optional parameter can be given to get the closest before or after.
:param target_dt: The target date
:type target_dt: datetime.date
:param date_list: The list of dates to search
:type date_list: list[datetime.date]
:param before_target: closest before or after the target
:type before_target: bool or None
:returns: The closest date
:rtype: datetime.date or None
"""
fb = lambda d: target_dt - d if d <= target_dt else datetime.timedelta.max
fa = lambda d: d - target_dt if d >= target_dt else datetime.timedelta.max
fnone = lambda d: target_dt - d if d < target_dt else d - target_dt
if before_target is None:
return min(date_list, key=fnone).date()
if before_target:
return min(date_list, key=fb).date()
else:
return min(date_list, key=fa).date() |
def _return_feature(self, feature_type, feature_name, new_feature_name=...):
""" Helping function of `get_features`
"""
if self.new_names:
return feature_type, feature_name, (self.rename_function(feature_name) if new_feature_name is ... else
new_feature_name)
return feature_type, feature_name | Helping function of `get_features` | Below is the the instruction that describes the task:
### Input:
Helping function of `get_features`
### Response:
def _return_feature(self, feature_type, feature_name, new_feature_name=...):
""" Helping function of `get_features`
"""
if self.new_names:
return feature_type, feature_name, (self.rename_function(feature_name) if new_feature_name is ... else
new_feature_name)
return feature_type, feature_name |
def exception_to_unicode(e, traceback=False):
"""Convert an `Exception` to an `unicode` object.
In addition to `to_unicode`, this representation of the exception
also contains the class name and optionally the traceback.
"""
message = '%s: %s' % (e.__class__.__name__, to_unicode(e))
if traceback:
from docido_sdk.toolbox import get_last_traceback
traceback_only = get_last_traceback().split('\n')[:-2]
message = '\n%s\n%s' % (to_unicode('\n'.join(traceback_only)), message)
return message | Convert an `Exception` to an `unicode` object.
In addition to `to_unicode`, this representation of the exception
also contains the class name and optionally the traceback. | Below is the the instruction that describes the task:
### Input:
Convert an `Exception` to an `unicode` object.
In addition to `to_unicode`, this representation of the exception
also contains the class name and optionally the traceback.
### Response:
def exception_to_unicode(e, traceback=False):
"""Convert an `Exception` to an `unicode` object.
In addition to `to_unicode`, this representation of the exception
also contains the class name and optionally the traceback.
"""
message = '%s: %s' % (e.__class__.__name__, to_unicode(e))
if traceback:
from docido_sdk.toolbox import get_last_traceback
traceback_only = get_last_traceback().split('\n')[:-2]
message = '\n%s\n%s' % (to_unicode('\n'.join(traceback_only)), message)
return message |
def pairs(args):
"""
%prog pairs pairsfile <fastbfile|fastqfile>
Parse ALLPATHS pairs file, and write pairs IDs and single read IDs in
respective ids files: e.g. `lib1.pairs.fastq`, `lib2.pairs.fastq`,
and single `frags.fastq` (with single reads from lib1/2).
"""
from jcvi.assembly.preprocess import run_FastbAndQualb2Fastq
p = OptionParser(pairs.__doc__)
p.add_option("--header", default=False, action="store_true",
help="Print header only [default: %default]")
p.add_option("--suffix", default=False, action="store_true",
help="Add suffix /1, /2 to read names")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
pairsfile, fastqfile = args
pf = op.basename(fastqfile).split(".")[0]
p = PairsFile(pairsfile)
print(p.header, file=sys.stderr)
if opts.header:
return
if fastqfile.endswith(".fastb"):
fastbfile = fastqfile
fastqfile = fastbfile.replace(".fastb", ".fastq")
run_FastbAndQualb2Fastq(infile=fastbfile, outfile=fastqfile)
p1file = "{0}.1.corr.fastq"
p2file = "{0}.2.corr.fastq"
fragsfile = "{0}.corr.fastq"
p1fw = [open(p1file.format(x), "w") for x in p.libnames]
p2fw = [open(p2file.format(x), "w") for x in p.libnames]
fragsfw = open(fragsfile.format(pf), "w")
extract_pairs(fastqfile, p1fw, p2fw, fragsfw, p, suffix=opts.suffix) | %prog pairs pairsfile <fastbfile|fastqfile>
Parse ALLPATHS pairs file, and write pairs IDs and single read IDs in
respective ids files: e.g. `lib1.pairs.fastq`, `lib2.pairs.fastq`,
and single `frags.fastq` (with single reads from lib1/2). | Below is the the instruction that describes the task:
### Input:
%prog pairs pairsfile <fastbfile|fastqfile>
Parse ALLPATHS pairs file, and write pairs IDs and single read IDs in
respective ids files: e.g. `lib1.pairs.fastq`, `lib2.pairs.fastq`,
and single `frags.fastq` (with single reads from lib1/2).
### Response:
def pairs(args):
"""
%prog pairs pairsfile <fastbfile|fastqfile>
Parse ALLPATHS pairs file, and write pairs IDs and single read IDs in
respective ids files: e.g. `lib1.pairs.fastq`, `lib2.pairs.fastq`,
and single `frags.fastq` (with single reads from lib1/2).
"""
from jcvi.assembly.preprocess import run_FastbAndQualb2Fastq
p = OptionParser(pairs.__doc__)
p.add_option("--header", default=False, action="store_true",
help="Print header only [default: %default]")
p.add_option("--suffix", default=False, action="store_true",
help="Add suffix /1, /2 to read names")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
pairsfile, fastqfile = args
pf = op.basename(fastqfile).split(".")[0]
p = PairsFile(pairsfile)
print(p.header, file=sys.stderr)
if opts.header:
return
if fastqfile.endswith(".fastb"):
fastbfile = fastqfile
fastqfile = fastbfile.replace(".fastb", ".fastq")
run_FastbAndQualb2Fastq(infile=fastbfile, outfile=fastqfile)
p1file = "{0}.1.corr.fastq"
p2file = "{0}.2.corr.fastq"
fragsfile = "{0}.corr.fastq"
p1fw = [open(p1file.format(x), "w") for x in p.libnames]
p2fw = [open(p2file.format(x), "w") for x in p.libnames]
fragsfw = open(fragsfile.format(pf), "w")
extract_pairs(fastqfile, p1fw, p2fw, fragsfw, p, suffix=opts.suffix) |
def dereference_resource_descriptor(descriptor, base_path, base_descriptor=None):
"""Dereference resource descriptor (IN-PLACE FOR NOW).
"""
PROPERTIES = ['schema', 'dialect']
if base_descriptor is None:
base_descriptor = descriptor
for property in PROPERTIES:
value = descriptor.get(property)
# URI -> No
if not isinstance(value, six.string_types):
continue
# URI -> Pointer
if value.startswith('#'):
try:
pointer = jsonpointer.JsonPointer(value[1:])
descriptor[property] = pointer.resolve(base_descriptor)
except Exception as error:
message = 'Not resolved Pointer URI "%s" for resource.%s' % (value, property)
six.raise_from(
exceptions.DataPackageException(message),
error
)
# URI -> Remote
elif value.startswith('http'):
try:
response = requests.get(value)
response.raise_for_status()
descriptor[property] = response.json()
except Exception as error:
message = 'Not resolved Remote URI "%s" for resource.%s' % (value, property)
six.raise_from(
exceptions.DataPackageException(message),
error
)
# URI -> Local
else:
if not is_safe_path(value):
raise exceptions.DataPackageException(
'Not safe path in Local URI "%s" '
'for resource.%s' % (value, property))
if not base_path:
raise exceptions.DataPackageException(
'Local URI "%s" requires base path '
'for resource.%s' % (value, property))
fullpath = os.path.join(base_path, value)
try:
with io.open(fullpath, encoding='utf-8') as file:
descriptor[property] = json.load(file)
except Exception as error:
message = 'Not resolved Local URI "%s" for resource.%s' % (value, property)
six.raise_from(
exceptions.DataPackageException(message),
error
)
return descriptor | Dereference resource descriptor (IN-PLACE FOR NOW). | Below is the the instruction that describes the task:
### Input:
Dereference resource descriptor (IN-PLACE FOR NOW).
### Response:
def dereference_resource_descriptor(descriptor, base_path, base_descriptor=None):
"""Dereference resource descriptor (IN-PLACE FOR NOW).
"""
PROPERTIES = ['schema', 'dialect']
if base_descriptor is None:
base_descriptor = descriptor
for property in PROPERTIES:
value = descriptor.get(property)
# URI -> No
if not isinstance(value, six.string_types):
continue
# URI -> Pointer
if value.startswith('#'):
try:
pointer = jsonpointer.JsonPointer(value[1:])
descriptor[property] = pointer.resolve(base_descriptor)
except Exception as error:
message = 'Not resolved Pointer URI "%s" for resource.%s' % (value, property)
six.raise_from(
exceptions.DataPackageException(message),
error
)
# URI -> Remote
elif value.startswith('http'):
try:
response = requests.get(value)
response.raise_for_status()
descriptor[property] = response.json()
except Exception as error:
message = 'Not resolved Remote URI "%s" for resource.%s' % (value, property)
six.raise_from(
exceptions.DataPackageException(message),
error
)
# URI -> Local
else:
if not is_safe_path(value):
raise exceptions.DataPackageException(
'Not safe path in Local URI "%s" '
'for resource.%s' % (value, property))
if not base_path:
raise exceptions.DataPackageException(
'Local URI "%s" requires base path '
'for resource.%s' % (value, property))
fullpath = os.path.join(base_path, value)
try:
with io.open(fullpath, encoding='utf-8') as file:
descriptor[property] = json.load(file)
except Exception as error:
message = 'Not resolved Local URI "%s" for resource.%s' % (value, property)
six.raise_from(
exceptions.DataPackageException(message),
error
)
return descriptor |
async def get_advanced_settings(request: web.Request) -> web.Response:
"""
Handles a GET request and returns a json body with the key "settings" and a
value that is a list of objects where each object has keys "id", "title",
"description", and "value"
"""
res = _get_adv_settings()
return web.json_response(res) | Handles a GET request and returns a json body with the key "settings" and a
value that is a list of objects where each object has keys "id", "title",
"description", and "value" | Below is the the instruction that describes the task:
### Input:
Handles a GET request and returns a json body with the key "settings" and a
value that is a list of objects where each object has keys "id", "title",
"description", and "value"
### Response:
async def get_advanced_settings(request: web.Request) -> web.Response:
"""
Handles a GET request and returns a json body with the key "settings" and a
value that is a list of objects where each object has keys "id", "title",
"description", and "value"
"""
res = _get_adv_settings()
return web.json_response(res) |
def read(self, offset, length, fh):
"""
Read data.
Parameters
----------
offset : int
Read offset.
length : int
Length of data.
fh : int
File descriptor, ignored.
"""
self.data.seek(offset)
return self.data.read(length) | Read data.
Parameters
----------
offset : int
Read offset.
length : int
Length of data.
fh : int
File descriptor, ignored. | Below is the the instruction that describes the task:
### Input:
Read data.
Parameters
----------
offset : int
Read offset.
length : int
Length of data.
fh : int
File descriptor, ignored.
### Response:
def read(self, offset, length, fh):
"""
Read data.
Parameters
----------
offset : int
Read offset.
length : int
Length of data.
fh : int
File descriptor, ignored.
"""
self.data.seek(offset)
return self.data.read(length) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.