code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def get_data_raw(self, request):
"""The method is getting data by raw request"""
path = '/api/1.0/data/raw/'
res = self._api_post(definition.RawDataResponse, path, request)
token = res.continuation_token
while token is not None:
res2 = self.get_data_raw_with_token(token)
res.series += res2.series
token = res2.continuation_token
return res | The method is getting data by raw request | Below is the the instruction that describes the task:
### Input:
The method is getting data by raw request
### Response:
def get_data_raw(self, request):
"""The method is getting data by raw request"""
path = '/api/1.0/data/raw/'
res = self._api_post(definition.RawDataResponse, path, request)
token = res.continuation_token
while token is not None:
res2 = self.get_data_raw_with_token(token)
res.series += res2.series
token = res2.continuation_token
return res |
def render_json(tree, indent):
"""Converts the tree into a flat json representation.
The json repr will be a list of hashes, each hash having 2 fields:
- package
- dependencies: list of dependencies
:param dict tree: dependency tree
:param int indent: no. of spaces to indent json
:returns: json representation of the tree
:rtype: str
"""
return json.dumps([{'package': k.as_dict(),
'dependencies': [v.as_dict() for v in vs]}
for k, vs in tree.items()],
indent=indent) | Converts the tree into a flat json representation.
The json repr will be a list of hashes, each hash having 2 fields:
- package
- dependencies: list of dependencies
:param dict tree: dependency tree
:param int indent: no. of spaces to indent json
:returns: json representation of the tree
:rtype: str | Below is the the instruction that describes the task:
### Input:
Converts the tree into a flat json representation.
The json repr will be a list of hashes, each hash having 2 fields:
- package
- dependencies: list of dependencies
:param dict tree: dependency tree
:param int indent: no. of spaces to indent json
:returns: json representation of the tree
:rtype: str
### Response:
def render_json(tree, indent):
"""Converts the tree into a flat json representation.
The json repr will be a list of hashes, each hash having 2 fields:
- package
- dependencies: list of dependencies
:param dict tree: dependency tree
:param int indent: no. of spaces to indent json
:returns: json representation of the tree
:rtype: str
"""
return json.dumps([{'package': k.as_dict(),
'dependencies': [v.as_dict() for v in vs]}
for k, vs in tree.items()],
indent=indent) |
def set_choice_order(self, choice_ids, inline_region):
""" reorder choices per the passed in list
:param choice_ids:
:return:
"""
reordered_choices = []
current_choice_ids = [c['id'] for c in self.my_osid_object_form._my_map['choices'][inline_region]]
if set(choice_ids) != set(current_choice_ids):
raise IllegalState('missing choices for choice order')
for choice_id in choice_ids:
for current_choice in self.my_osid_object_form._my_map['choices'][inline_region]:
if choice_id == current_choice['id']:
reordered_choices.append(current_choice)
break
self.my_osid_object_form._my_map['choices'][inline_region] = reordered_choices | reorder choices per the passed in list
:param choice_ids:
:return: | Below is the the instruction that describes the task:
### Input:
reorder choices per the passed in list
:param choice_ids:
:return:
### Response:
def set_choice_order(self, choice_ids, inline_region):
""" reorder choices per the passed in list
:param choice_ids:
:return:
"""
reordered_choices = []
current_choice_ids = [c['id'] for c in self.my_osid_object_form._my_map['choices'][inline_region]]
if set(choice_ids) != set(current_choice_ids):
raise IllegalState('missing choices for choice order')
for choice_id in choice_ids:
for current_choice in self.my_osid_object_form._my_map['choices'][inline_region]:
if choice_id == current_choice['id']:
reordered_choices.append(current_choice)
break
self.my_osid_object_form._my_map['choices'][inline_region] = reordered_choices |
def get(self, call_sid):
"""
Constructs a MemberContext
:param call_sid: The Call SID of the resource(s) to fetch
:returns: twilio.rest.api.v2010.account.queue.member.MemberContext
:rtype: twilio.rest.api.v2010.account.queue.member.MemberContext
"""
return MemberContext(
self._version,
account_sid=self._solution['account_sid'],
queue_sid=self._solution['queue_sid'],
call_sid=call_sid,
) | Constructs a MemberContext
:param call_sid: The Call SID of the resource(s) to fetch
:returns: twilio.rest.api.v2010.account.queue.member.MemberContext
:rtype: twilio.rest.api.v2010.account.queue.member.MemberContext | Below is the the instruction that describes the task:
### Input:
Constructs a MemberContext
:param call_sid: The Call SID of the resource(s) to fetch
:returns: twilio.rest.api.v2010.account.queue.member.MemberContext
:rtype: twilio.rest.api.v2010.account.queue.member.MemberContext
### Response:
def get(self, call_sid):
"""
Constructs a MemberContext
:param call_sid: The Call SID of the resource(s) to fetch
:returns: twilio.rest.api.v2010.account.queue.member.MemberContext
:rtype: twilio.rest.api.v2010.account.queue.member.MemberContext
"""
return MemberContext(
self._version,
account_sid=self._solution['account_sid'],
queue_sid=self._solution['queue_sid'],
call_sid=call_sid,
) |
def get_nn_info(self, structure, n):
"""
Get all near-neighbor sites as well as the associated image locations
and weights of the site with index n using the closest relative
neighbor distance-based method with VIRE atomic/ionic radii.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine near
neighbors.
Returns:
siw (list of tuples (Site, array, float)): tuples, each one
of which represents a neighbor site, its image location,
and its weight.
"""
vire = ValenceIonicRadiusEvaluator(structure)
site = vire.structure[n]
neighs_dists = vire.structure.get_neighbors(site, self.cutoff)
rn = vire.radii[vire.structure[n].species_string]
reldists_neighs = []
for neigh, dist in neighs_dists:
reldists_neighs.append([dist / (
vire.radii[neigh.species_string] + rn), neigh])
siw = []
min_reldist = min([reldist for reldist, neigh in reldists_neighs])
for reldist, s in reldists_neighs:
if reldist < (1.0 + self.tol) * min_reldist:
w = min_reldist / reldist
siw.append({'site': s,
'image': self._get_image(vire.structure, s),
'weight': w,
'site_index': self._get_original_site(
vire.structure, s)})
return siw | Get all near-neighbor sites as well as the associated image locations
and weights of the site with index n using the closest relative
neighbor distance-based method with VIRE atomic/ionic radii.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine near
neighbors.
Returns:
siw (list of tuples (Site, array, float)): tuples, each one
of which represents a neighbor site, its image location,
and its weight. | Below is the the instruction that describes the task:
### Input:
Get all near-neighbor sites as well as the associated image locations
and weights of the site with index n using the closest relative
neighbor distance-based method with VIRE atomic/ionic radii.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine near
neighbors.
Returns:
siw (list of tuples (Site, array, float)): tuples, each one
of which represents a neighbor site, its image location,
and its weight.
### Response:
def get_nn_info(self, structure, n):
"""
Get all near-neighbor sites as well as the associated image locations
and weights of the site with index n using the closest relative
neighbor distance-based method with VIRE atomic/ionic radii.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine near
neighbors.
Returns:
siw (list of tuples (Site, array, float)): tuples, each one
of which represents a neighbor site, its image location,
and its weight.
"""
vire = ValenceIonicRadiusEvaluator(structure)
site = vire.structure[n]
neighs_dists = vire.structure.get_neighbors(site, self.cutoff)
rn = vire.radii[vire.structure[n].species_string]
reldists_neighs = []
for neigh, dist in neighs_dists:
reldists_neighs.append([dist / (
vire.radii[neigh.species_string] + rn), neigh])
siw = []
min_reldist = min([reldist for reldist, neigh in reldists_neighs])
for reldist, s in reldists_neighs:
if reldist < (1.0 + self.tol) * min_reldist:
w = min_reldist / reldist
siw.append({'site': s,
'image': self._get_image(vire.structure, s),
'weight': w,
'site_index': self._get_original_site(
vire.structure, s)})
return siw |
def unapi(request):
"""
This view implements unAPI 1.0 (see http://unapi.info).
"""
id = request.GET.get('id')
format = request.GET.get('format')
if format is not None:
try:
publications = Publication.objects.filter(pk=int(id))
if not publications:
raise ValueError
except ValueError:
# invalid id
return HttpResponse('\n'.join([
'<?xml version="1.0" encoding="UTF-8"?>',
'<error>Invalid ID.</error>']),
content_type="application/xml",
status=404)
if format == 'bibtex':
# return BibTex encoded publication
return render(request, 'publications/publication.bib', {
'publication': publications[0]
},
content_type='text/x-bibtex; charset=UTF-8')
if format == 'mods':
# return MODS encoded publication
return render(request, 'publications/publications.mods', {
'publications': publications
},
content_type='application/xml; charset=UTF-8')
if format == 'ris':
# return MODS encoded publication
return render(request, 'publications/publications.ris', {
'publications': publications
},
content_type='application/x-research-info-systems; charset=UTF-8')
# invalid format
return HttpResponse('\n'.join([
'<?xml version="1.0" encoding="UTF-8"?>',
'<error>Invalid format.</error>']),
content_type="application/xml",
status=406)
if id is not None:
return HttpResponse('\n'.join([
'<?xml version="1.0" encoding="UTF-8"?>',
'<formats id="{0}">'.format(id),
'<format name="bibtex" type="text/x-bibtex" />',
'<format name="ris" type="application/x-research-info-systems" />',
'<format name="mods" type="application/xml" />',
'</formats>']), content_type="application/xml")
return HttpResponse('\n'.join([
'<?xml version="1.0" encoding="UTF-8"?>',
'<formats>',
'<format name="bibtex" type="text/x-bibtex" />',
'<format name="ris" type="application/x-research-info-systems" />',
'<format name="mods" type="application/xml" />',
'</formats>']), content_type="application/xml") | This view implements unAPI 1.0 (see http://unapi.info). | Below is the the instruction that describes the task:
### Input:
This view implements unAPI 1.0 (see http://unapi.info).
### Response:
def unapi(request):
"""
This view implements unAPI 1.0 (see http://unapi.info).
"""
id = request.GET.get('id')
format = request.GET.get('format')
if format is not None:
try:
publications = Publication.objects.filter(pk=int(id))
if not publications:
raise ValueError
except ValueError:
# invalid id
return HttpResponse('\n'.join([
'<?xml version="1.0" encoding="UTF-8"?>',
'<error>Invalid ID.</error>']),
content_type="application/xml",
status=404)
if format == 'bibtex':
# return BibTex encoded publication
return render(request, 'publications/publication.bib', {
'publication': publications[0]
},
content_type='text/x-bibtex; charset=UTF-8')
if format == 'mods':
# return MODS encoded publication
return render(request, 'publications/publications.mods', {
'publications': publications
},
content_type='application/xml; charset=UTF-8')
if format == 'ris':
# return MODS encoded publication
return render(request, 'publications/publications.ris', {
'publications': publications
},
content_type='application/x-research-info-systems; charset=UTF-8')
# invalid format
return HttpResponse('\n'.join([
'<?xml version="1.0" encoding="UTF-8"?>',
'<error>Invalid format.</error>']),
content_type="application/xml",
status=406)
if id is not None:
return HttpResponse('\n'.join([
'<?xml version="1.0" encoding="UTF-8"?>',
'<formats id="{0}">'.format(id),
'<format name="bibtex" type="text/x-bibtex" />',
'<format name="ris" type="application/x-research-info-systems" />',
'<format name="mods" type="application/xml" />',
'</formats>']), content_type="application/xml")
return HttpResponse('\n'.join([
'<?xml version="1.0" encoding="UTF-8"?>',
'<formats>',
'<format name="bibtex" type="text/x-bibtex" />',
'<format name="ris" type="application/x-research-info-systems" />',
'<format name="mods" type="application/xml" />',
'</formats>']), content_type="application/xml") |
def index_model(index_name, adapter):
''' Indel all objects given a model'''
model = adapter.model
log.info('Indexing {0} objects'.format(model.__name__))
qs = model.objects
if hasattr(model.objects, 'visible'):
qs = qs.visible()
if adapter.exclude_fields:
qs = qs.exclude(*adapter.exclude_fields)
docs = iter_qs(qs, adapter)
docs = iter_for_index(docs, index_name)
for ok, info in streaming_bulk(es.client, docs, raise_on_error=False):
if not ok:
log.error('Unable to index %s "%s": %s', model.__name__,
info['index']['_id'], info['index']['error']) | Indel all objects given a model | Below is the the instruction that describes the task:
### Input:
Indel all objects given a model
### Response:
def index_model(index_name, adapter):
''' Indel all objects given a model'''
model = adapter.model
log.info('Indexing {0} objects'.format(model.__name__))
qs = model.objects
if hasattr(model.objects, 'visible'):
qs = qs.visible()
if adapter.exclude_fields:
qs = qs.exclude(*adapter.exclude_fields)
docs = iter_qs(qs, adapter)
docs = iter_for_index(docs, index_name)
for ok, info in streaming_bulk(es.client, docs, raise_on_error=False):
if not ok:
log.error('Unable to index %s "%s": %s', model.__name__,
info['index']['_id'], info['index']['error']) |
def calculate_local_order_parameter(self, oscillatory_network, start_iteration = None, stop_iteration = None):
"""!
@brief Calculates local order parameter.
@details Local order parameter or so-called level of local or partial synchronization is calculated by following expression:
\f[
r_{c}=\left | \sum_{i=0}^{N} \frac{1}{N_{i}} \sum_{j=0}e^{ \theta_{j} - \theta_{i} } \right |;
\f]
where N - total amount of oscillators in the network and \f$N_{i}\f$ - amount of neighbors of oscillator with index \f$i\f$.
@param[in] oscillatory_network (sync): Sync oscillatory network whose structure of connections is required for calculation.
@param[in] start_iteration (uint): The first iteration that is used for calculation, if 'None' then the last iteration is used.
@param[in] stop_iteration (uint): The last iteration that is used for calculation, if 'None' then 'start_iteration' + 1 is used.
@return (list) List of levels of local (partial) synchronization (local order parameter evolution).
"""
(start_iteration, stop_iteration) = self.__get_start_stop_iterations(start_iteration, stop_iteration);
if (self._ccore_sync_dynamic_pointer is not None):
network_pointer = oscillatory_network._ccore_network_pointer;
return wrapper.sync_dynamic_calculate_local_order(self._ccore_sync_dynamic_pointer, network_pointer, start_iteration, stop_iteration);
sequence_local_order = [];
for index in range(start_iteration, stop_iteration):
sequence_local_order.append(order_estimator.calculate_local_sync_order(self.output[index], oscillatory_network));
return sequence_local_order; | !
@brief Calculates local order parameter.
@details Local order parameter or so-called level of local or partial synchronization is calculated by following expression:
\f[
r_{c}=\left | \sum_{i=0}^{N} \frac{1}{N_{i}} \sum_{j=0}e^{ \theta_{j} - \theta_{i} } \right |;
\f]
where N - total amount of oscillators in the network and \f$N_{i}\f$ - amount of neighbors of oscillator with index \f$i\f$.
@param[in] oscillatory_network (sync): Sync oscillatory network whose structure of connections is required for calculation.
@param[in] start_iteration (uint): The first iteration that is used for calculation, if 'None' then the last iteration is used.
@param[in] stop_iteration (uint): The last iteration that is used for calculation, if 'None' then 'start_iteration' + 1 is used.
@return (list) List of levels of local (partial) synchronization (local order parameter evolution). | Below is the the instruction that describes the task:
### Input:
!
@brief Calculates local order parameter.
@details Local order parameter or so-called level of local or partial synchronization is calculated by following expression:
\f[
r_{c}=\left | \sum_{i=0}^{N} \frac{1}{N_{i}} \sum_{j=0}e^{ \theta_{j} - \theta_{i} } \right |;
\f]
where N - total amount of oscillators in the network and \f$N_{i}\f$ - amount of neighbors of oscillator with index \f$i\f$.
@param[in] oscillatory_network (sync): Sync oscillatory network whose structure of connections is required for calculation.
@param[in] start_iteration (uint): The first iteration that is used for calculation, if 'None' then the last iteration is used.
@param[in] stop_iteration (uint): The last iteration that is used for calculation, if 'None' then 'start_iteration' + 1 is used.
@return (list) List of levels of local (partial) synchronization (local order parameter evolution).
### Response:
def calculate_local_order_parameter(self, oscillatory_network, start_iteration = None, stop_iteration = None):
"""!
@brief Calculates local order parameter.
@details Local order parameter or so-called level of local or partial synchronization is calculated by following expression:
\f[
r_{c}=\left | \sum_{i=0}^{N} \frac{1}{N_{i}} \sum_{j=0}e^{ \theta_{j} - \theta_{i} } \right |;
\f]
where N - total amount of oscillators in the network and \f$N_{i}\f$ - amount of neighbors of oscillator with index \f$i\f$.
@param[in] oscillatory_network (sync): Sync oscillatory network whose structure of connections is required for calculation.
@param[in] start_iteration (uint): The first iteration that is used for calculation, if 'None' then the last iteration is used.
@param[in] stop_iteration (uint): The last iteration that is used for calculation, if 'None' then 'start_iteration' + 1 is used.
@return (list) List of levels of local (partial) synchronization (local order parameter evolution).
"""
(start_iteration, stop_iteration) = self.__get_start_stop_iterations(start_iteration, stop_iteration);
if (self._ccore_sync_dynamic_pointer is not None):
network_pointer = oscillatory_network._ccore_network_pointer;
return wrapper.sync_dynamic_calculate_local_order(self._ccore_sync_dynamic_pointer, network_pointer, start_iteration, stop_iteration);
sequence_local_order = [];
for index in range(start_iteration, stop_iteration):
sequence_local_order.append(order_estimator.calculate_local_sync_order(self.output[index], oscillatory_network));
return sequence_local_order; |
def delete_blacklist_entry(self, blacklist_entry_id):
"""Delete an existing blacklist entry.
Keyword arguments:
blacklist_entry_id -- The unique identifier of the blacklist entry to delete.
"""
delete_blacklist_endpoint = Template("${rest_root}/blacklist/${public_key}/${blacklist_entry_id}/delete")
url = delete_blacklist_endpoint.substitute(rest_root=self._rest_root, public_key=self._public_key, blacklist_entry_id=blacklist_entry_id)
self.__post_request(url, {}) | Delete an existing blacklist entry.
Keyword arguments:
blacklist_entry_id -- The unique identifier of the blacklist entry to delete. | Below is the the instruction that describes the task:
### Input:
Delete an existing blacklist entry.
Keyword arguments:
blacklist_entry_id -- The unique identifier of the blacklist entry to delete.
### Response:
def delete_blacklist_entry(self, blacklist_entry_id):
"""Delete an existing blacklist entry.
Keyword arguments:
blacklist_entry_id -- The unique identifier of the blacklist entry to delete.
"""
delete_blacklist_endpoint = Template("${rest_root}/blacklist/${public_key}/${blacklist_entry_id}/delete")
url = delete_blacklist_endpoint.substitute(rest_root=self._rest_root, public_key=self._public_key, blacklist_entry_id=blacklist_entry_id)
self.__post_request(url, {}) |
def bota_gorda(game):
'''
Prefers to play dominoes with higher point values.
:param Game game: game to play
:return: None
'''
game.valid_moves = tuple(sorted(game.valid_moves, key=lambda m: -(m[0].first + m[0].second))) | Prefers to play dominoes with higher point values.
:param Game game: game to play
:return: None | Below is the the instruction that describes the task:
### Input:
Prefers to play dominoes with higher point values.
:param Game game: game to play
:return: None
### Response:
def bota_gorda(game):
'''
Prefers to play dominoes with higher point values.
:param Game game: game to play
:return: None
'''
game.valid_moves = tuple(sorted(game.valid_moves, key=lambda m: -(m[0].first + m[0].second))) |
def _format_char(char):
"""Prepares a single character for passing to ctypes calls, needs to return
an integer but can also pass None which will keep the current character
instead of overwriting it.
This is called often and needs to be optimized whenever possible.
"""
if char is None:
return -1
if isinstance(char, _STRTYPES) and len(char) == 1:
return ord(char)
try:
return int(char) # allow all int-like objects
except:
raise TypeError('char single character string, integer, or None\nReceived: ' + repr(char)) | Prepares a single character for passing to ctypes calls, needs to return
an integer but can also pass None which will keep the current character
instead of overwriting it.
This is called often and needs to be optimized whenever possible. | Below is the the instruction that describes the task:
### Input:
Prepares a single character for passing to ctypes calls, needs to return
an integer but can also pass None which will keep the current character
instead of overwriting it.
This is called often and needs to be optimized whenever possible.
### Response:
def _format_char(char):
"""Prepares a single character for passing to ctypes calls, needs to return
an integer but can also pass None which will keep the current character
instead of overwriting it.
This is called often and needs to be optimized whenever possible.
"""
if char is None:
return -1
if isinstance(char, _STRTYPES) and len(char) == 1:
return ord(char)
try:
return int(char) # allow all int-like objects
except:
raise TypeError('char single character string, integer, or None\nReceived: ' + repr(char)) |
def double_exponential_moving_average(data, period):
"""
Double Exponential Moving Average.
Formula:
DEMA = 2*EMA - EMA(EMA)
"""
catch_errors.check_for_period_error(data, period)
dema = (2 * ema(data, period)) - ema(ema(data, period), period)
return dema | Double Exponential Moving Average.
Formula:
DEMA = 2*EMA - EMA(EMA) | Below is the the instruction that describes the task:
### Input:
Double Exponential Moving Average.
Formula:
DEMA = 2*EMA - EMA(EMA)
### Response:
def double_exponential_moving_average(data, period):
"""
Double Exponential Moving Average.
Formula:
DEMA = 2*EMA - EMA(EMA)
"""
catch_errors.check_for_period_error(data, period)
dema = (2 * ema(data, period)) - ema(ema(data, period), period)
return dema |
def process_transaction(self, transaction):
"""Add a transaction to ledger, updating the current state as needed.
Parameters
----------
transaction : zp.Transaction
The transaction to execute.
"""
asset = transaction.asset
if isinstance(asset, Future):
try:
old_price = self._payout_last_sale_prices[asset]
except KeyError:
self._payout_last_sale_prices[asset] = transaction.price
else:
position = self.position_tracker.positions[asset]
amount = position.amount
price = transaction.price
self._cash_flow(
self._calculate_payout(
asset.price_multiplier,
amount,
old_price,
price,
),
)
if amount + transaction.amount == 0:
del self._payout_last_sale_prices[asset]
else:
self._payout_last_sale_prices[asset] = price
else:
self._cash_flow(-(transaction.price * transaction.amount))
self.position_tracker.execute_transaction(transaction)
# we only ever want the dict form from now on
transaction_dict = transaction.to_dict()
try:
self._processed_transactions[transaction.dt].append(
transaction_dict,
)
except KeyError:
self._processed_transactions[transaction.dt] = [transaction_dict] | Add a transaction to ledger, updating the current state as needed.
Parameters
----------
transaction : zp.Transaction
The transaction to execute. | Below is the the instruction that describes the task:
### Input:
Add a transaction to ledger, updating the current state as needed.
Parameters
----------
transaction : zp.Transaction
The transaction to execute.
### Response:
def process_transaction(self, transaction):
"""Add a transaction to ledger, updating the current state as needed.
Parameters
----------
transaction : zp.Transaction
The transaction to execute.
"""
asset = transaction.asset
if isinstance(asset, Future):
try:
old_price = self._payout_last_sale_prices[asset]
except KeyError:
self._payout_last_sale_prices[asset] = transaction.price
else:
position = self.position_tracker.positions[asset]
amount = position.amount
price = transaction.price
self._cash_flow(
self._calculate_payout(
asset.price_multiplier,
amount,
old_price,
price,
),
)
if amount + transaction.amount == 0:
del self._payout_last_sale_prices[asset]
else:
self._payout_last_sale_prices[asset] = price
else:
self._cash_flow(-(transaction.price * transaction.amount))
self.position_tracker.execute_transaction(transaction)
# we only ever want the dict form from now on
transaction_dict = transaction.to_dict()
try:
self._processed_transactions[transaction.dt].append(
transaction_dict,
)
except KeyError:
self._processed_transactions[transaction.dt] = [transaction_dict] |
def main(loader, name):
"""
Here we iterate through the datasets and score them with a classifier using different encodings.
"""
scores = []
raw_scores_ds = {}
# first get the dataset
X, y, mapping = loader()
clf = linear_model.LogisticRegression(solver='lbfgs', multi_class='auto', max_iter=200, random_state=0)
# try each encoding method available, which works on multiclass problems
encoders = (set(category_encoders.__all__) - {'WOEEncoder'}) # WoE is currently only for binary targets
for encoder_name in encoders:
encoder = getattr(category_encoders, encoder_name)
start_time = time.time()
score, stds, raw_scores, dim = score_models(clf, X, y, encoder)
scores.append([encoder_name, name, dim, score, stds, time.time() - start_time])
raw_scores_ds[encoder_name] = raw_scores
gc.collect()
results = pd.DataFrame(scores, columns=['Encoding', 'Dataset', 'Dimensionality', 'Avg. Score', 'Score StDev', 'Elapsed Time'])
raw = pd.DataFrame.from_dict(raw_scores_ds)
ax = raw.plot(kind='box', return_type='axes')
plt.title('Scores for Encodings on %s Dataset' % (name,))
plt.ylabel('Score (higher is better)')
for tick in ax.get_xticklabels():
tick.set_rotation(90)
plt.grid()
plt.tight_layout()
plt.show()
return results, raw | Here we iterate through the datasets and score them with a classifier using different encodings. | Below is the the instruction that describes the task:
### Input:
Here we iterate through the datasets and score them with a classifier using different encodings.
### Response:
def main(loader, name):
"""
Here we iterate through the datasets and score them with a classifier using different encodings.
"""
scores = []
raw_scores_ds = {}
# first get the dataset
X, y, mapping = loader()
clf = linear_model.LogisticRegression(solver='lbfgs', multi_class='auto', max_iter=200, random_state=0)
# try each encoding method available, which works on multiclass problems
encoders = (set(category_encoders.__all__) - {'WOEEncoder'}) # WoE is currently only for binary targets
for encoder_name in encoders:
encoder = getattr(category_encoders, encoder_name)
start_time = time.time()
score, stds, raw_scores, dim = score_models(clf, X, y, encoder)
scores.append([encoder_name, name, dim, score, stds, time.time() - start_time])
raw_scores_ds[encoder_name] = raw_scores
gc.collect()
results = pd.DataFrame(scores, columns=['Encoding', 'Dataset', 'Dimensionality', 'Avg. Score', 'Score StDev', 'Elapsed Time'])
raw = pd.DataFrame.from_dict(raw_scores_ds)
ax = raw.plot(kind='box', return_type='axes')
plt.title('Scores for Encodings on %s Dataset' % (name,))
plt.ylabel('Score (higher is better)')
for tick in ax.get_xticklabels():
tick.set_rotation(90)
plt.grid()
plt.tight_layout()
plt.show()
return results, raw |
def _unscheduleAction(self):
"""
Unschedule current action
Note that it does not add record to action log and does not do
required steps to resume previous action. If you need this - use
_cancelScheduledAction
"""
logger.trace("{} unscheduling actions".format(self))
self.aqStash = deque()
self.scheduledAction = None | Unschedule current action
Note that it does not add record to action log and does not do
required steps to resume previous action. If you need this - use
_cancelScheduledAction | Below is the the instruction that describes the task:
### Input:
Unschedule current action
Note that it does not add record to action log and does not do
required steps to resume previous action. If you need this - use
_cancelScheduledAction
### Response:
def _unscheduleAction(self):
"""
Unschedule current action
Note that it does not add record to action log and does not do
required steps to resume previous action. If you need this - use
_cancelScheduledAction
"""
logger.trace("{} unscheduling actions".format(self))
self.aqStash = deque()
self.scheduledAction = None |
def spin2y_from_mass1_mass2_xi2_phi_a_phi_s(mass1, mass2, xi2, phi_a, phi_s):
"""Returns y-component spin for secondary mass.
"""
chi_perp = chi_perp_from_mass1_mass2_xi2(mass1, mass2, xi2)
phi2 = phi2_from_phi_a_phi_s(phi_a, phi_s)
return chi_perp * numpy.sin(phi2) | Returns y-component spin for secondary mass. | Below is the the instruction that describes the task:
### Input:
Returns y-component spin for secondary mass.
### Response:
def spin2y_from_mass1_mass2_xi2_phi_a_phi_s(mass1, mass2, xi2, phi_a, phi_s):
"""Returns y-component spin for secondary mass.
"""
chi_perp = chi_perp_from_mass1_mass2_xi2(mass1, mass2, xi2)
phi2 = phi2_from_phi_a_phi_s(phi_a, phi_s)
return chi_perp * numpy.sin(phi2) |
def _render_section(self, output, params, indent=0):
"""
It takes a dictionary and recurses through.
For key-value pair it checks whether the value is a dictionary
and prepends the key with &
It passes the valued to the same function, increasing the indentation
If the value is a list, I assume that this is something the user
wants to store repetitively
eg:
dict['KEY'] = ['val1', 'val2']
===>
KEY val1
KEY val2
or
dict['KIND'] = [{'_': 'Ba', 'ELEMENT':'Ba'},
{'_': 'Ti', 'ELEMENT':'Ti'},
{'_': 'O', 'ELEMENT':'O'}]
====>
&KIND Ba
ELEMENT Ba
&END KIND
&KIND Ti
ELEMENT Ti
&END KIND
&KIND O
ELEMENT O
&END KIND
"""
for key, val in sorted(params.items()):
if key.upper() != key:
raise InputValidationError("keyword '%s' not upper case" % key)
if key.startswith('@') or key.startswith('$'):
raise InputValidationError("CP2K preprocessor not supported")
if isinstance(val, dict):
output.append('%s&%s %s' % (' ' * indent, key, val.pop('_', '')))
self._render_section(output, val, indent + 3)
output.append('%s&END %s' % (' ' * indent, key))
elif isinstance(val, list):
for listitem in val:
self._render_section(output, {key: listitem}, indent)
elif isinstance(val, bool):
val_str = '.true.' if val else '.false.'
output.append('%s%s %s' % (' ' * indent, key, val_str))
else:
output.append('%s%s %s' % (' ' * indent, key, val)) | It takes a dictionary and recurses through.
For key-value pair it checks whether the value is a dictionary
and prepends the key with &
It passes the valued to the same function, increasing the indentation
If the value is a list, I assume that this is something the user
wants to store repetitively
eg:
dict['KEY'] = ['val1', 'val2']
===>
KEY val1
KEY val2
or
dict['KIND'] = [{'_': 'Ba', 'ELEMENT':'Ba'},
{'_': 'Ti', 'ELEMENT':'Ti'},
{'_': 'O', 'ELEMENT':'O'}]
====>
&KIND Ba
ELEMENT Ba
&END KIND
&KIND Ti
ELEMENT Ti
&END KIND
&KIND O
ELEMENT O
&END KIND | Below is the the instruction that describes the task:
### Input:
It takes a dictionary and recurses through.
For key-value pair it checks whether the value is a dictionary
and prepends the key with &
It passes the valued to the same function, increasing the indentation
If the value is a list, I assume that this is something the user
wants to store repetitively
eg:
dict['KEY'] = ['val1', 'val2']
===>
KEY val1
KEY val2
or
dict['KIND'] = [{'_': 'Ba', 'ELEMENT':'Ba'},
{'_': 'Ti', 'ELEMENT':'Ti'},
{'_': 'O', 'ELEMENT':'O'}]
====>
&KIND Ba
ELEMENT Ba
&END KIND
&KIND Ti
ELEMENT Ti
&END KIND
&KIND O
ELEMENT O
&END KIND
### Response:
def _render_section(self, output, params, indent=0):
"""
It takes a dictionary and recurses through.
For key-value pair it checks whether the value is a dictionary
and prepends the key with &
It passes the valued to the same function, increasing the indentation
If the value is a list, I assume that this is something the user
wants to store repetitively
eg:
dict['KEY'] = ['val1', 'val2']
===>
KEY val1
KEY val2
or
dict['KIND'] = [{'_': 'Ba', 'ELEMENT':'Ba'},
{'_': 'Ti', 'ELEMENT':'Ti'},
{'_': 'O', 'ELEMENT':'O'}]
====>
&KIND Ba
ELEMENT Ba
&END KIND
&KIND Ti
ELEMENT Ti
&END KIND
&KIND O
ELEMENT O
&END KIND
"""
for key, val in sorted(params.items()):
if key.upper() != key:
raise InputValidationError("keyword '%s' not upper case" % key)
if key.startswith('@') or key.startswith('$'):
raise InputValidationError("CP2K preprocessor not supported")
if isinstance(val, dict):
output.append('%s&%s %s' % (' ' * indent, key, val.pop('_', '')))
self._render_section(output, val, indent + 3)
output.append('%s&END %s' % (' ' * indent, key))
elif isinstance(val, list):
for listitem in val:
self._render_section(output, {key: listitem}, indent)
elif isinstance(val, bool):
val_str = '.true.' if val else '.false.'
output.append('%s%s %s' % (' ' * indent, key, val_str))
else:
output.append('%s%s %s' % (' ' * indent, key, val)) |
def depth_soil_conductivity(self, value=None):
"""Corresponds to IDD Field `depth_soil_conductivity`
Args:
value (float): value for IDD Field `depth_soil_conductivity`
Unit: W/m-K,
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError(
'value {} need to be of type float '
'for field `depth_soil_conductivity`'.format(value))
self._depth_soil_conductivity = value | Corresponds to IDD Field `depth_soil_conductivity`
Args:
value (float): value for IDD Field `depth_soil_conductivity`
Unit: W/m-K,
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value | Below is the the instruction that describes the task:
### Input:
Corresponds to IDD Field `depth_soil_conductivity`
Args:
value (float): value for IDD Field `depth_soil_conductivity`
Unit: W/m-K,
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
### Response:
def depth_soil_conductivity(self, value=None):
"""Corresponds to IDD Field `depth_soil_conductivity`
Args:
value (float): value for IDD Field `depth_soil_conductivity`
Unit: W/m-K,
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError(
'value {} need to be of type float '
'for field `depth_soil_conductivity`'.format(value))
self._depth_soil_conductivity = value |
def render(self, filename):
"""Perform initialization of render, set quality and size video attributes and then call template method that
is defined in child class.
"""
self.elapsed_time = -time()
dpi = 100
fig = figure(figsize=(16, 9), dpi=dpi)
with self.writer.saving(fig, filename, dpi):
for frame_id in xrange(self.frames + 1):
self.renderFrame(frame_id)
self.writer.grab_frame()
self.elapsed_time += time() | Perform initialization of render, set quality and size video attributes and then call template method that
is defined in child class. | Below is the the instruction that describes the task:
### Input:
Perform initialization of render, set quality and size video attributes and then call template method that
is defined in child class.
### Response:
def render(self, filename):
"""Perform initialization of render, set quality and size video attributes and then call template method that
is defined in child class.
"""
self.elapsed_time = -time()
dpi = 100
fig = figure(figsize=(16, 9), dpi=dpi)
with self.writer.saving(fig, filename, dpi):
for frame_id in xrange(self.frames + 1):
self.renderFrame(frame_id)
self.writer.grab_frame()
self.elapsed_time += time() |
def NewFromJSON(data):
"""
Create a new Comment instance from a JSON dict.
Args:
data (dict): JSON dictionary representing a Comment.
Returns:
A Comment instance.
"""
return Comment(
body=data.get('body', None),
posted_at=data.get('posted_at', None),
user=User.NewFromJSON(data.get('user', None))
) | Create a new Comment instance from a JSON dict.
Args:
data (dict): JSON dictionary representing a Comment.
Returns:
A Comment instance. | Below is the the instruction that describes the task:
### Input:
Create a new Comment instance from a JSON dict.
Args:
data (dict): JSON dictionary representing a Comment.
Returns:
A Comment instance.
### Response:
def NewFromJSON(data):
"""
Create a new Comment instance from a JSON dict.
Args:
data (dict): JSON dictionary representing a Comment.
Returns:
A Comment instance.
"""
return Comment(
body=data.get('body', None),
posted_at=data.get('posted_at', None),
user=User.NewFromJSON(data.get('user', None))
) |
def magictype(text, prompt_template="default", speed=1):
"""Echo each character in ``text`` as keyboard characters are pressed.
Characters are echo'd ``speed`` characters at a time.
"""
echo_prompt(prompt_template)
cursor_position = 0
return_to_regular_type = False
with raw_mode():
while True:
char = text[cursor_position : cursor_position + speed]
in_char = getchar()
if in_char in {ESC, CTRLC}:
echo(carriage_return=True)
raise click.Abort()
elif in_char == TAB:
return_to_regular_type = True
break
elif in_char == BACKSPACE:
if cursor_position > 0:
echo("\b \b", nl=False)
cursor_position -= 1
elif in_char in RETURNS:
# Only return at end of command
if cursor_position >= len(text):
echo("\r", nl=True)
break
elif in_char == CTRLZ and hasattr(signal, "SIGTSTP"):
# Background process
os.kill(0, signal.SIGTSTP)
# When doitlive is back in foreground, clear the terminal
# and resume where we left off
click.clear()
echo_prompt(prompt_template)
echo(text[:cursor_position], nl=False)
else:
if cursor_position < len(text):
echo(char, nl=False)
increment = min([speed, len(text) - cursor_position])
cursor_position += increment
return return_to_regular_type | Echo each character in ``text`` as keyboard characters are pressed.
Characters are echo'd ``speed`` characters at a time. | Below is the the instruction that describes the task:
### Input:
Echo each character in ``text`` as keyboard characters are pressed.
Characters are echo'd ``speed`` characters at a time.
### Response:
def magictype(text, prompt_template="default", speed=1):
"""Echo each character in ``text`` as keyboard characters are pressed.
Characters are echo'd ``speed`` characters at a time.
"""
echo_prompt(prompt_template)
cursor_position = 0
return_to_regular_type = False
with raw_mode():
while True:
char = text[cursor_position : cursor_position + speed]
in_char = getchar()
if in_char in {ESC, CTRLC}:
echo(carriage_return=True)
raise click.Abort()
elif in_char == TAB:
return_to_regular_type = True
break
elif in_char == BACKSPACE:
if cursor_position > 0:
echo("\b \b", nl=False)
cursor_position -= 1
elif in_char in RETURNS:
# Only return at end of command
if cursor_position >= len(text):
echo("\r", nl=True)
break
elif in_char == CTRLZ and hasattr(signal, "SIGTSTP"):
# Background process
os.kill(0, signal.SIGTSTP)
# When doitlive is back in foreground, clear the terminal
# and resume where we left off
click.clear()
echo_prompt(prompt_template)
echo(text[:cursor_position], nl=False)
else:
if cursor_position < len(text):
echo(char, nl=False)
increment = min([speed, len(text) - cursor_position])
cursor_position += increment
return return_to_regular_type |
def until(name,
m_args=None,
m_kwargs=None,
condition=None,
period=0,
timeout=604800):
'''
Loop over an execution module until a condition is met.
name
The name of the execution module
m_args
The execution module's positional arguments
m_kwargs
The execution module's keyword arguments
condition
The condition which must be met for the loop to break. This
should contain ``m_ret`` which is the return from the execution
module.
period
The number of seconds to wait between executions
timeout
The timeout in seconds
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
if name not in __salt__:
ret['comment'] = 'Cannot find module {0}'.format(name)
return ret
if condition is None:
ret['comment'] = 'An exit condition must be specified'
return ret
if not isinstance(period, int):
ret['comment'] = 'Period must be specified as an integer in seconds'
return ret
if not isinstance(timeout, int):
ret['comment'] = 'Timeout must be specified as an integer in seconds'
return ret
if __opts__['test']:
ret['comment'] = 'The execution module {0} will be run'.format(name)
ret['result'] = None
return ret
if not m_args:
m_args = []
if not m_kwargs:
m_kwargs = {}
def timed_out():
if time.time() >= timeout:
return True
return False
timeout = time.time() + timeout
while not timed_out():
m_ret = __salt__[name](*m_args, **m_kwargs)
if eval(condition): # pylint: disable=W0123
ret['result'] = True
ret['comment'] = 'Condition {0} was met'.format(condition)
return ret
time.sleep(period)
ret['comment'] = 'Timed out while waiting for condition {0}'.format(condition)
return ret | Loop over an execution module until a condition is met.
name
The name of the execution module
m_args
The execution module's positional arguments
m_kwargs
The execution module's keyword arguments
condition
The condition which must be met for the loop to break. This
should contain ``m_ret`` which is the return from the execution
module.
period
The number of seconds to wait between executions
timeout
The timeout in seconds | Below is the the instruction that describes the task:
### Input:
Loop over an execution module until a condition is met.
name
The name of the execution module
m_args
The execution module's positional arguments
m_kwargs
The execution module's keyword arguments
condition
The condition which must be met for the loop to break. This
should contain ``m_ret`` which is the return from the execution
module.
period
The number of seconds to wait between executions
timeout
The timeout in seconds
### Response:
def until(name,
m_args=None,
m_kwargs=None,
condition=None,
period=0,
timeout=604800):
'''
Loop over an execution module until a condition is met.
name
The name of the execution module
m_args
The execution module's positional arguments
m_kwargs
The execution module's keyword arguments
condition
The condition which must be met for the loop to break. This
should contain ``m_ret`` which is the return from the execution
module.
period
The number of seconds to wait between executions
timeout
The timeout in seconds
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
if name not in __salt__:
ret['comment'] = 'Cannot find module {0}'.format(name)
return ret
if condition is None:
ret['comment'] = 'An exit condition must be specified'
return ret
if not isinstance(period, int):
ret['comment'] = 'Period must be specified as an integer in seconds'
return ret
if not isinstance(timeout, int):
ret['comment'] = 'Timeout must be specified as an integer in seconds'
return ret
if __opts__['test']:
ret['comment'] = 'The execution module {0} will be run'.format(name)
ret['result'] = None
return ret
if not m_args:
m_args = []
if not m_kwargs:
m_kwargs = {}
def timed_out():
if time.time() >= timeout:
return True
return False
timeout = time.time() + timeout
while not timed_out():
m_ret = __salt__[name](*m_args, **m_kwargs)
if eval(condition): # pylint: disable=W0123
ret['result'] = True
ret['comment'] = 'Condition {0} was met'.format(condition)
return ret
time.sleep(period)
ret['comment'] = 'Timed out while waiting for condition {0}'.format(condition)
return ret |
def get_images(self, results=15, start=0, license=None, cache=True):
"""Get a list of artist images
Args:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
results (int): An integer number of results to return
start (int): An integer starting value for the result set
license (str): A string specifying the desired license type
Returns:
A list of image document dicts; list contains additional attributes 'start' and 'total'
Example:
>>> a = artist.Artist('Captain Beefheart')
>>> images = a.get_images(results=1)
>>> images.total
49
>>> images[0]['url']
u'http://c4.ac-images.myspacecdn.com/images01/5/l_e1a329cdfdb16a848288edc6d578730f.jpg'
>>>
"""
if cache and ('images' in self.cache) and results==15 and start==0 and license==None:
return self.cache['images']
else:
response = self.get_attribute('images', results=results, start=start, license=license)
total = response.get('total') or 0
if results==15 and start==0 and license==None:
self.cache['images'] = ResultList(response['images'], 0, total)
return ResultList(response['images'], start, total) | Get a list of artist images
Args:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
results (int): An integer number of results to return
start (int): An integer starting value for the result set
license (str): A string specifying the desired license type
Returns:
A list of image document dicts; list contains additional attributes 'start' and 'total'
Example:
>>> a = artist.Artist('Captain Beefheart')
>>> images = a.get_images(results=1)
>>> images.total
49
>>> images[0]['url']
u'http://c4.ac-images.myspacecdn.com/images01/5/l_e1a329cdfdb16a848288edc6d578730f.jpg'
>>> | Below is the the instruction that describes the task:
### Input:
Get a list of artist images
Args:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
results (int): An integer number of results to return
start (int): An integer starting value for the result set
license (str): A string specifying the desired license type
Returns:
A list of image document dicts; list contains additional attributes 'start' and 'total'
Example:
>>> a = artist.Artist('Captain Beefheart')
>>> images = a.get_images(results=1)
>>> images.total
49
>>> images[0]['url']
u'http://c4.ac-images.myspacecdn.com/images01/5/l_e1a329cdfdb16a848288edc6d578730f.jpg'
>>>
### Response:
def get_images(self, results=15, start=0, license=None, cache=True):
"""Get a list of artist images
Args:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
results (int): An integer number of results to return
start (int): An integer starting value for the result set
license (str): A string specifying the desired license type
Returns:
A list of image document dicts; list contains additional attributes 'start' and 'total'
Example:
>>> a = artist.Artist('Captain Beefheart')
>>> images = a.get_images(results=1)
>>> images.total
49
>>> images[0]['url']
u'http://c4.ac-images.myspacecdn.com/images01/5/l_e1a329cdfdb16a848288edc6d578730f.jpg'
>>>
"""
if cache and ('images' in self.cache) and results==15 and start==0 and license==None:
return self.cache['images']
else:
response = self.get_attribute('images', results=results, start=start, license=license)
total = response.get('total') or 0
if results==15 and start==0 and license==None:
self.cache['images'] = ResultList(response['images'], 0, total)
return ResultList(response['images'], start, total) |
def write_points(self, data, *args, **kwargs):
"""Write to multiple time series names.
:param data: A dictionary mapping series names to pandas DataFrames
:param time_precision: [Optional, default 's'] Either 's', 'm', 'ms'
or 'u'.
:param batch_size: [Optional] Value to write the points in batches
instead of all at one time. Useful for when doing data dumps from
one database to another or when doing a massive write operation
:type batch_size: int
"""
batch_size = kwargs.get('batch_size')
time_precision = kwargs.get('time_precision', 's')
if batch_size:
kwargs.pop('batch_size') # don't hand over to InfluxDBClient
for key, data_frame in data.items():
number_batches = int(math.ceil(
len(data_frame) / float(batch_size)))
for batch in range(number_batches):
start_index = batch * batch_size
end_index = (batch + 1) * batch_size
outdata = [
self._convert_dataframe_to_json(
name=key,
dataframe=data_frame
.iloc[start_index:end_index].copy(),
time_precision=time_precision)]
InfluxDBClient.write_points(self, outdata, *args, **kwargs)
return True
outdata = [
self._convert_dataframe_to_json(name=key, dataframe=dataframe,
time_precision=time_precision)
for key, dataframe in data.items()]
return InfluxDBClient.write_points(self, outdata, *args, **kwargs) | Write to multiple time series names.
:param data: A dictionary mapping series names to pandas DataFrames
:param time_precision: [Optional, default 's'] Either 's', 'm', 'ms'
or 'u'.
:param batch_size: [Optional] Value to write the points in batches
instead of all at one time. Useful for when doing data dumps from
one database to another or when doing a massive write operation
:type batch_size: int | Below is the the instruction that describes the task:
### Input:
Write to multiple time series names.
:param data: A dictionary mapping series names to pandas DataFrames
:param time_precision: [Optional, default 's'] Either 's', 'm', 'ms'
or 'u'.
:param batch_size: [Optional] Value to write the points in batches
instead of all at one time. Useful for when doing data dumps from
one database to another or when doing a massive write operation
:type batch_size: int
### Response:
def write_points(self, data, *args, **kwargs):
"""Write to multiple time series names.
:param data: A dictionary mapping series names to pandas DataFrames
:param time_precision: [Optional, default 's'] Either 's', 'm', 'ms'
or 'u'.
:param batch_size: [Optional] Value to write the points in batches
instead of all at one time. Useful for when doing data dumps from
one database to another or when doing a massive write operation
:type batch_size: int
"""
batch_size = kwargs.get('batch_size')
time_precision = kwargs.get('time_precision', 's')
if batch_size:
kwargs.pop('batch_size') # don't hand over to InfluxDBClient
for key, data_frame in data.items():
number_batches = int(math.ceil(
len(data_frame) / float(batch_size)))
for batch in range(number_batches):
start_index = batch * batch_size
end_index = (batch + 1) * batch_size
outdata = [
self._convert_dataframe_to_json(
name=key,
dataframe=data_frame
.iloc[start_index:end_index].copy(),
time_precision=time_precision)]
InfluxDBClient.write_points(self, outdata, *args, **kwargs)
return True
outdata = [
self._convert_dataframe_to_json(name=key, dataframe=dataframe,
time_precision=time_precision)
for key, dataframe in data.items()]
return InfluxDBClient.write_points(self, outdata, *args, **kwargs) |
def trapped_signals(cls, new_signal_handler):
"""A contextmanager which temporarily overrides signal handling."""
try:
previous_signal_handler = cls.reset_signal_handler(new_signal_handler)
yield
finally:
cls.reset_signal_handler(previous_signal_handler) | A contextmanager which temporarily overrides signal handling. | Below is the the instruction that describes the task:
### Input:
A contextmanager which temporarily overrides signal handling.
### Response:
def trapped_signals(cls, new_signal_handler):
"""A contextmanager which temporarily overrides signal handling."""
try:
previous_signal_handler = cls.reset_signal_handler(new_signal_handler)
yield
finally:
cls.reset_signal_handler(previous_signal_handler) |
def list(self, where):
'''
List the current schedule items
'''
if where == 'pillar':
schedule = self._get_schedule(include_opts=False)
elif where == 'opts':
schedule = self._get_schedule(include_pillar=False)
else:
schedule = self._get_schedule()
# Fire the complete event back along with the list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': schedule},
tag='/salt/minion/minion_schedule_list_complete') | List the current schedule items | Below is the the instruction that describes the task:
### Input:
List the current schedule items
### Response:
def list(self, where):
'''
List the current schedule items
'''
if where == 'pillar':
schedule = self._get_schedule(include_opts=False)
elif where == 'opts':
schedule = self._get_schedule(include_pillar=False)
else:
schedule = self._get_schedule()
# Fire the complete event back along with the list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': schedule},
tag='/salt/minion/minion_schedule_list_complete') |
def get_most_recent_event(self, originator_id, lt=None, lte=None):
"""
Gets a domain event from the sequence identified by `originator_id`
at the highest position.
:param originator_id: ID of a sequence of events
:param lt: get highest before this position
:param lte: get highest at or before this position
:return: domain event
"""
events = self.get_domain_events(originator_id=originator_id, lt=lt, lte=lte, limit=1, is_ascending=False)
events = list(events)
try:
return events[0]
except IndexError:
pass | Gets a domain event from the sequence identified by `originator_id`
at the highest position.
:param originator_id: ID of a sequence of events
:param lt: get highest before this position
:param lte: get highest at or before this position
:return: domain event | Below is the the instruction that describes the task:
### Input:
Gets a domain event from the sequence identified by `originator_id`
at the highest position.
:param originator_id: ID of a sequence of events
:param lt: get highest before this position
:param lte: get highest at or before this position
:return: domain event
### Response:
def get_most_recent_event(self, originator_id, lt=None, lte=None):
"""
Gets a domain event from the sequence identified by `originator_id`
at the highest position.
:param originator_id: ID of a sequence of events
:param lt: get highest before this position
:param lte: get highest at or before this position
:return: domain event
"""
events = self.get_domain_events(originator_id=originator_id, lt=lt, lte=lte, limit=1, is_ascending=False)
events = list(events)
try:
return events[0]
except IndexError:
pass |
def filter(self, **search_args):
"""
Get a filtered list of resources
:param search_args: To be translated into ?arg1=value1&arg2=value2...
:return: A list of resources
"""
search_args = search_args or {}
raw_resources = []
for url, paginator_params in self.paginator.get_urls(self.get_collection_endpoint()):
search_args.update(paginator_params)
response = self.paginator.process_response(self.send(url, "get", params=search_args))
raw_resources += self.client.get_response_data(response, self.Meta.parse_json)[self.json_collection_attribute] if self.json_collection_attribute is not None else self.client.get_response_data(response, self.Meta.parse_json)
resources = []
for raw_resource in raw_resources:
try:
resource = self.resource_class(self.client)
except (ValueError, TypeError):
continue
else:
resource.update_from_dict(raw_resource)
resources.append(resource)
return resources | Get a filtered list of resources
:param search_args: To be translated into ?arg1=value1&arg2=value2...
:return: A list of resources | Below is the the instruction that describes the task:
### Input:
Get a filtered list of resources
:param search_args: To be translated into ?arg1=value1&arg2=value2...
:return: A list of resources
### Response:
def filter(self, **search_args):
"""
Get a filtered list of resources
:param search_args: To be translated into ?arg1=value1&arg2=value2...
:return: A list of resources
"""
search_args = search_args or {}
raw_resources = []
for url, paginator_params in self.paginator.get_urls(self.get_collection_endpoint()):
search_args.update(paginator_params)
response = self.paginator.process_response(self.send(url, "get", params=search_args))
raw_resources += self.client.get_response_data(response, self.Meta.parse_json)[self.json_collection_attribute] if self.json_collection_attribute is not None else self.client.get_response_data(response, self.Meta.parse_json)
resources = []
for raw_resource in raw_resources:
try:
resource = self.resource_class(self.client)
except (ValueError, TypeError):
continue
else:
resource.update_from_dict(raw_resource)
resources.append(resource)
return resources |
def memoize(func=None, maxlen=None):
"""Cache a function's return value each time it is called.
This function serves as a function decorator to provide a caching of
evaluated fitness values. If called later with the same arguments,
the cached value is returned instead of being re-evaluated.
This decorator assumes that candidates are individually pickleable,
and their pickled values are used for hashing into a dictionary. It
should be used when evaluating an *expensive* fitness
function to avoid costly re-evaluation of those fitnesses. The
typical usage is as follows::
@memoize
def expensive_fitness_function(candidates, args):
# Implementation of expensive fitness calculation
pass
It is also possible to provide the named argument *maxlen*, which
specifies the size of the memoization cache to use. (If *maxlen* is
``None``, then an unbounded cache is used.) Once the size of the cache
has reached *maxlen*, the oldest element is replaced by the newest
element in order to keep the size constant. This usage is as follows::
@memoize(maxlen=100)
def expensive_fitness_function(candidates, args):
# Implementation of expensive fitness calculation
pass
.. warning:: The ``maxlen`` parameter must be passed as a named keyword
argument, or an ``AttributeError`` will be raised (e.g., saying
``@memoize(100)`` will cause an error).
"""
if func is not None:
cache = BoundedOrderedDict(maxlen=maxlen)
@functools.wraps(func)
def memo_target(candidates, args):
fitness = []
for candidate in candidates:
lookup_value = pickle.dumps(candidate, 1)
if lookup_value not in cache:
cache[lookup_value] = func([candidate], args)[0]
fitness.append(cache[lookup_value])
return fitness
return memo_target
else:
def memoize_factory(func):
return memoize(func, maxlen=maxlen)
return memoize_factory | Cache a function's return value each time it is called.
This function serves as a function decorator to provide a caching of
evaluated fitness values. If called later with the same arguments,
the cached value is returned instead of being re-evaluated.
This decorator assumes that candidates are individually pickleable,
and their pickled values are used for hashing into a dictionary. It
should be used when evaluating an *expensive* fitness
function to avoid costly re-evaluation of those fitnesses. The
typical usage is as follows::
@memoize
def expensive_fitness_function(candidates, args):
# Implementation of expensive fitness calculation
pass
It is also possible to provide the named argument *maxlen*, which
specifies the size of the memoization cache to use. (If *maxlen* is
``None``, then an unbounded cache is used.) Once the size of the cache
has reached *maxlen*, the oldest element is replaced by the newest
element in order to keep the size constant. This usage is as follows::
@memoize(maxlen=100)
def expensive_fitness_function(candidates, args):
# Implementation of expensive fitness calculation
pass
.. warning:: The ``maxlen`` parameter must be passed as a named keyword
argument, or an ``AttributeError`` will be raised (e.g., saying
``@memoize(100)`` will cause an error). | Below is the the instruction that describes the task:
### Input:
Cache a function's return value each time it is called.
This function serves as a function decorator to provide a caching of
evaluated fitness values. If called later with the same arguments,
the cached value is returned instead of being re-evaluated.
This decorator assumes that candidates are individually pickleable,
and their pickled values are used for hashing into a dictionary. It
should be used when evaluating an *expensive* fitness
function to avoid costly re-evaluation of those fitnesses. The
typical usage is as follows::
@memoize
def expensive_fitness_function(candidates, args):
# Implementation of expensive fitness calculation
pass
It is also possible to provide the named argument *maxlen*, which
specifies the size of the memoization cache to use. (If *maxlen* is
``None``, then an unbounded cache is used.) Once the size of the cache
has reached *maxlen*, the oldest element is replaced by the newest
element in order to keep the size constant. This usage is as follows::
@memoize(maxlen=100)
def expensive_fitness_function(candidates, args):
# Implementation of expensive fitness calculation
pass
.. warning:: The ``maxlen`` parameter must be passed as a named keyword
argument, or an ``AttributeError`` will be raised (e.g., saying
``@memoize(100)`` will cause an error).
### Response:
def memoize(func=None, maxlen=None):
"""Cache a function's return value each time it is called.
This function serves as a function decorator to provide a caching of
evaluated fitness values. If called later with the same arguments,
the cached value is returned instead of being re-evaluated.
This decorator assumes that candidates are individually pickleable,
and their pickled values are used for hashing into a dictionary. It
should be used when evaluating an *expensive* fitness
function to avoid costly re-evaluation of those fitnesses. The
typical usage is as follows::
@memoize
def expensive_fitness_function(candidates, args):
# Implementation of expensive fitness calculation
pass
It is also possible to provide the named argument *maxlen*, which
specifies the size of the memoization cache to use. (If *maxlen* is
``None``, then an unbounded cache is used.) Once the size of the cache
has reached *maxlen*, the oldest element is replaced by the newest
element in order to keep the size constant. This usage is as follows::
@memoize(maxlen=100)
def expensive_fitness_function(candidates, args):
# Implementation of expensive fitness calculation
pass
.. warning:: The ``maxlen`` parameter must be passed as a named keyword
argument, or an ``AttributeError`` will be raised (e.g., saying
``@memoize(100)`` will cause an error).
"""
if func is not None:
cache = BoundedOrderedDict(maxlen=maxlen)
@functools.wraps(func)
def memo_target(candidates, args):
fitness = []
for candidate in candidates:
lookup_value = pickle.dumps(candidate, 1)
if lookup_value not in cache:
cache[lookup_value] = func([candidate], args)[0]
fitness.append(cache[lookup_value])
return fitness
return memo_target
else:
def memoize_factory(func):
return memoize(func, maxlen=maxlen)
return memoize_factory |
def get_HEAD_SHA1(git_dir):
"""Not locked!
"""
head_file = os.path.join(git_dir, 'HEAD')
with open(head_file, 'r') as hf:
head_contents = hf.read().strip()
assert head_contents.startswith('ref: ')
ref_filename = head_contents[5:] # strip off "ref: "
real_ref = os.path.join(git_dir, ref_filename)
with open(real_ref, 'r') as rf:
return rf.read().strip() | Not locked! | Below is the the instruction that describes the task:
### Input:
Not locked!
### Response:
def get_HEAD_SHA1(git_dir):
"""Not locked!
"""
head_file = os.path.join(git_dir, 'HEAD')
with open(head_file, 'r') as hf:
head_contents = hf.read().strip()
assert head_contents.startswith('ref: ')
ref_filename = head_contents[5:] # strip off "ref: "
real_ref = os.path.join(git_dir, ref_filename)
with open(real_ref, 'r') as rf:
return rf.read().strip() |
def qteInsertMode(self, pos: int, mode: str, value):
"""
Insert ``mode`` at position ``pos``.
If ``pos`` is negative then this is equivalent to ``pos=0``. If it
is larger than the number of modes in the list then it is appended
as the last element.
|Args|
* ``pos`` (**int**): insertion point.
* ``mode`` (**str**): name of mode.
* ``value`` (**object**) value associated with ``mode``.
|Returns|
* **None**
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
"""
# Add the label to the list.
label = self._qteGetLabelInstance()
label.setText(value)
self._qteModeList.insert(pos, (mode, value, label))
self._qteUpdateLabelWidths() | Insert ``mode`` at position ``pos``.
If ``pos`` is negative then this is equivalent to ``pos=0``. If it
is larger than the number of modes in the list then it is appended
as the last element.
|Args|
* ``pos`` (**int**): insertion point.
* ``mode`` (**str**): name of mode.
* ``value`` (**object**) value associated with ``mode``.
|Returns|
* **None**
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type. | Below is the the instruction that describes the task:
### Input:
Insert ``mode`` at position ``pos``.
If ``pos`` is negative then this is equivalent to ``pos=0``. If it
is larger than the number of modes in the list then it is appended
as the last element.
|Args|
* ``pos`` (**int**): insertion point.
* ``mode`` (**str**): name of mode.
* ``value`` (**object**) value associated with ``mode``.
|Returns|
* **None**
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
### Response:
def qteInsertMode(self, pos: int, mode: str, value):
"""
Insert ``mode`` at position ``pos``.
If ``pos`` is negative then this is equivalent to ``pos=0``. If it
is larger than the number of modes in the list then it is appended
as the last element.
|Args|
* ``pos`` (**int**): insertion point.
* ``mode`` (**str**): name of mode.
* ``value`` (**object**) value associated with ``mode``.
|Returns|
* **None**
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
"""
# Add the label to the list.
label = self._qteGetLabelInstance()
label.setText(value)
self._qteModeList.insert(pos, (mode, value, label))
self._qteUpdateLabelWidths() |
def copyto(self, src, where=None):
"""Emulates function `copyto` in NumPy.
Parameters
----------
where: (N,) bool ndarray
True if particle n in src must be copied.
src: (N,) `ThetaParticles` object
source
for each n such that where[n] is True, copy particle n in src
into self (at location n)
"""
for k in self.containers:
v = self.__dict__[k]
if isinstance(v, np.ndarray):
np.copyto(v, src.__dict__[k], where=where)
else:
v.copyto(src.__dict__[k], where=where) | Emulates function `copyto` in NumPy.
Parameters
----------
where: (N,) bool ndarray
True if particle n in src must be copied.
src: (N,) `ThetaParticles` object
source
for each n such that where[n] is True, copy particle n in src
into self (at location n) | Below is the the instruction that describes the task:
### Input:
Emulates function `copyto` in NumPy.
Parameters
----------
where: (N,) bool ndarray
True if particle n in src must be copied.
src: (N,) `ThetaParticles` object
source
for each n such that where[n] is True, copy particle n in src
into self (at location n)
### Response:
def copyto(self, src, where=None):
"""Emulates function `copyto` in NumPy.
Parameters
----------
where: (N,) bool ndarray
True if particle n in src must be copied.
src: (N,) `ThetaParticles` object
source
for each n such that where[n] is True, copy particle n in src
into self (at location n)
"""
for k in self.containers:
v = self.__dict__[k]
if isinstance(v, np.ndarray):
np.copyto(v, src.__dict__[k], where=where)
else:
v.copyto(src.__dict__[k], where=where) |
def get_yaml_schema(self):
"""GetYamlSchema.
[Preview API]
:rtype: object
"""
response = self._send(http_method='GET',
location_id='1f9990b9-1dba-441f-9c2e-6485888c42b6',
version='5.1-preview.1')
return self._deserialize('object', response) | GetYamlSchema.
[Preview API]
:rtype: object | Below is the the instruction that describes the task:
### Input:
GetYamlSchema.
[Preview API]
:rtype: object
### Response:
def get_yaml_schema(self):
"""GetYamlSchema.
[Preview API]
:rtype: object
"""
response = self._send(http_method='GET',
location_id='1f9990b9-1dba-441f-9c2e-6485888c42b6',
version='5.1-preview.1')
return self._deserialize('object', response) |
def install(self, host):
"""Setup common to all Qt-based hosts"""
print("Installing..")
if self._state["installed"]:
return
if self.is_headless():
log.info("Headless host")
return
print("aboutToQuit..")
self.app.aboutToQuit.connect(self._on_application_quit)
if host == "Maya":
print("Maya host..")
window = {
widget.objectName(): widget
for widget in self.app.topLevelWidgets()
}["MayaWindow"]
else:
window = self.find_window()
# Install event filter
print("event filter..")
event_filter = self.EventFilter(window)
window.installEventFilter(event_filter)
for signal in SIGNALS_TO_REMOVE_EVENT_FILTER:
pyblish.api.register_callback(signal, self.uninstall)
log.info("Installed event filter")
self.window = window
self._state["installed"] = True
self._state["eventFilter"] = event_filter | Setup common to all Qt-based hosts | Below is the the instruction that describes the task:
### Input:
Setup common to all Qt-based hosts
### Response:
def install(self, host):
"""Setup common to all Qt-based hosts"""
print("Installing..")
if self._state["installed"]:
return
if self.is_headless():
log.info("Headless host")
return
print("aboutToQuit..")
self.app.aboutToQuit.connect(self._on_application_quit)
if host == "Maya":
print("Maya host..")
window = {
widget.objectName(): widget
for widget in self.app.topLevelWidgets()
}["MayaWindow"]
else:
window = self.find_window()
# Install event filter
print("event filter..")
event_filter = self.EventFilter(window)
window.installEventFilter(event_filter)
for signal in SIGNALS_TO_REMOVE_EVENT_FILTER:
pyblish.api.register_callback(signal, self.uninstall)
log.info("Installed event filter")
self.window = window
self._state["installed"] = True
self._state["eventFilter"] = event_filter |
def riak_http_search_query(self, solr_core, solr_params, count_deleted=False):
"""
This method is for advanced SOLR queries. Riak HTTP search query endpoint,
sends solr_params and query string as a proxy and returns solr reponse.
Args:
solr_core (str): solr core on which query will be executed
solr_params (str): solr specific query params, such as rows, start, fl, df, wt etc..
count_deleted (bool): ignore deleted records or not
Returns:
(dict): dict of solr response
"""
# append current _solr_query params
sq = ["%s%%3A%s" % (q[0], q[1]) for q in self._solr_query]
if not count_deleted:
sq.append("-deleted%3ATrue")
search_host = "http://%s:%s/search/query/%s?wt=json&q=%s&%s" % (
settings.RIAK_SERVER,
settings.RIAK_HTTP_PORT,
solr_core,
"+AND+".join(sq),
solr_params
)
return json.loads(bytes_to_str(urlopen(search_host).read())) | This method is for advanced SOLR queries. Riak HTTP search query endpoint,
sends solr_params and query string as a proxy and returns solr reponse.
Args:
solr_core (str): solr core on which query will be executed
solr_params (str): solr specific query params, such as rows, start, fl, df, wt etc..
count_deleted (bool): ignore deleted records or not
Returns:
(dict): dict of solr response | Below is the the instruction that describes the task:
### Input:
This method is for advanced SOLR queries. Riak HTTP search query endpoint,
sends solr_params and query string as a proxy and returns solr reponse.
Args:
solr_core (str): solr core on which query will be executed
solr_params (str): solr specific query params, such as rows, start, fl, df, wt etc..
count_deleted (bool): ignore deleted records or not
Returns:
(dict): dict of solr response
### Response:
def riak_http_search_query(self, solr_core, solr_params, count_deleted=False):
"""
This method is for advanced SOLR queries. Riak HTTP search query endpoint,
sends solr_params and query string as a proxy and returns solr reponse.
Args:
solr_core (str): solr core on which query will be executed
solr_params (str): solr specific query params, such as rows, start, fl, df, wt etc..
count_deleted (bool): ignore deleted records or not
Returns:
(dict): dict of solr response
"""
# append current _solr_query params
sq = ["%s%%3A%s" % (q[0], q[1]) for q in self._solr_query]
if not count_deleted:
sq.append("-deleted%3ATrue")
search_host = "http://%s:%s/search/query/%s?wt=json&q=%s&%s" % (
settings.RIAK_SERVER,
settings.RIAK_HTTP_PORT,
solr_core,
"+AND+".join(sq),
solr_params
)
return json.loads(bytes_to_str(urlopen(search_host).read())) |
def build_extension(extensions: Sequence[ExtensionHeader]) -> str:
"""
Unparse a ``Sec-WebSocket-Extensions`` header.
This is the reverse of :func:`parse_extension`.
"""
return ", ".join(
build_extension_item(name, parameters) for name, parameters in extensions
) | Unparse a ``Sec-WebSocket-Extensions`` header.
This is the reverse of :func:`parse_extension`. | Below is the the instruction that describes the task:
### Input:
Unparse a ``Sec-WebSocket-Extensions`` header.
This is the reverse of :func:`parse_extension`.
### Response:
def build_extension(extensions: Sequence[ExtensionHeader]) -> str:
"""
Unparse a ``Sec-WebSocket-Extensions`` header.
This is the reverse of :func:`parse_extension`.
"""
return ", ".join(
build_extension_item(name, parameters) for name, parameters in extensions
) |
def get_found_includes(self, env, scanner, path):
"""Return the included implicit dependencies in this file.
Cache results so we only scan the file once per path
regardless of how many times this information is requested.
"""
memo_key = (id(env), id(scanner), path)
try:
memo_dict = self._memo['get_found_includes']
except KeyError:
memo_dict = {}
self._memo['get_found_includes'] = memo_dict
else:
try:
return memo_dict[memo_key]
except KeyError:
pass
if scanner:
result = [n.disambiguate() for n in scanner(self, env, path)]
else:
result = []
memo_dict[memo_key] = result
return result | Return the included implicit dependencies in this file.
Cache results so we only scan the file once per path
regardless of how many times this information is requested. | Below is the the instruction that describes the task:
### Input:
Return the included implicit dependencies in this file.
Cache results so we only scan the file once per path
regardless of how many times this information is requested.
### Response:
def get_found_includes(self, env, scanner, path):
"""Return the included implicit dependencies in this file.
Cache results so we only scan the file once per path
regardless of how many times this information is requested.
"""
memo_key = (id(env), id(scanner), path)
try:
memo_dict = self._memo['get_found_includes']
except KeyError:
memo_dict = {}
self._memo['get_found_includes'] = memo_dict
else:
try:
return memo_dict[memo_key]
except KeyError:
pass
if scanner:
result = [n.disambiguate() for n in scanner(self, env, path)]
else:
result = []
memo_dict[memo_key] = result
return result |
def iget_batches(task_ids, batch_size=10):
"""Yield out a map of the keys and futures in batches of the batch size
passed in.
"""
make_key = lambda _id: ndb.Key(FuriousAsyncMarker, _id)
for keys in i_batch(imap(make_key, task_ids), batch_size):
yield izip(keys, ndb.get_multi_async(keys)) | Yield out a map of the keys and futures in batches of the batch size
passed in. | Below is the the instruction that describes the task:
### Input:
Yield out a map of the keys and futures in batches of the batch size
passed in.
### Response:
def iget_batches(task_ids, batch_size=10):
"""Yield out a map of the keys and futures in batches of the batch size
passed in.
"""
make_key = lambda _id: ndb.Key(FuriousAsyncMarker, _id)
for keys in i_batch(imap(make_key, task_ids), batch_size):
yield izip(keys, ndb.get_multi_async(keys)) |
def save(self, name):
"""
Save the string buffer to a file. Finalizes prior to saving.
:param name: File path.
:type name: unicode | str
"""
self.finalize()
with open(name, 'wb+') as f:
if six.PY3:
f.write(self.fileobj.getbuffer())
else:
f.write(self.fileobj.getvalue().encode('utf-8')) | Save the string buffer to a file. Finalizes prior to saving.
:param name: File path.
:type name: unicode | str | Below is the the instruction that describes the task:
### Input:
Save the string buffer to a file. Finalizes prior to saving.
:param name: File path.
:type name: unicode | str
### Response:
def save(self, name):
"""
Save the string buffer to a file. Finalizes prior to saving.
:param name: File path.
:type name: unicode | str
"""
self.finalize()
with open(name, 'wb+') as f:
if six.PY3:
f.write(self.fileobj.getbuffer())
else:
f.write(self.fileobj.getvalue().encode('utf-8')) |
def get_image_size(self, token, resolution=0):
"""
Return the size of the volume (3D). Convenient for when you want
to download the entirety of a dataset.
Arguments:
token (str): The token for which to find the dataset image bounds
resolution (int : 0): The resolution at which to get image bounds.
Defaults to 0, to get the largest area available.
Returns:
int[3]: The size of the bounds. Should == get_volume.shape
Raises:
RemoteDataNotFoundError: If the token is invalid, or if the
metadata at that resolution is unavailable in projinfo.
"""
info = self.get_proj_info(token)
res = str(resolution)
if res not in info['dataset']['imagesize']:
raise RemoteDataNotFoundError("Resolution " + res +
" is not available.")
return info['dataset']['imagesize'][str(resolution)] | Return the size of the volume (3D). Convenient for when you want
to download the entirety of a dataset.
Arguments:
token (str): The token for which to find the dataset image bounds
resolution (int : 0): The resolution at which to get image bounds.
Defaults to 0, to get the largest area available.
Returns:
int[3]: The size of the bounds. Should == get_volume.shape
Raises:
RemoteDataNotFoundError: If the token is invalid, or if the
metadata at that resolution is unavailable in projinfo. | Below is the the instruction that describes the task:
### Input:
Return the size of the volume (3D). Convenient for when you want
to download the entirety of a dataset.
Arguments:
token (str): The token for which to find the dataset image bounds
resolution (int : 0): The resolution at which to get image bounds.
Defaults to 0, to get the largest area available.
Returns:
int[3]: The size of the bounds. Should == get_volume.shape
Raises:
RemoteDataNotFoundError: If the token is invalid, or if the
metadata at that resolution is unavailable in projinfo.
### Response:
def get_image_size(self, token, resolution=0):
"""
Return the size of the volume (3D). Convenient for when you want
to download the entirety of a dataset.
Arguments:
token (str): The token for which to find the dataset image bounds
resolution (int : 0): The resolution at which to get image bounds.
Defaults to 0, to get the largest area available.
Returns:
int[3]: The size of the bounds. Should == get_volume.shape
Raises:
RemoteDataNotFoundError: If the token is invalid, or if the
metadata at that resolution is unavailable in projinfo.
"""
info = self.get_proj_info(token)
res = str(resolution)
if res not in info['dataset']['imagesize']:
raise RemoteDataNotFoundError("Resolution " + res +
" is not available.")
return info['dataset']['imagesize'][str(resolution)] |
def des_cbc_pkcs5_encrypt(key, data, iv):
"""
Encrypts plaintext using DES with a 56 bit key
:param key:
The encryption key - a byte string 8 bytes long (includes error correction bits)
:param data:
The plaintext - a byte string
:param iv:
The 8-byte initialization vector to use - a byte string - set as None
to generate an appropriate one
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A tuple of two byte strings (iv, ciphertext)
"""
if len(key) != 8:
raise ValueError(pretty_message(
'''
key must be 8 bytes (56 bits + 8 parity bits) long - is %s
''',
len(key)
))
if not iv:
iv = rand_bytes(8)
elif len(iv) != 8:
raise ValueError(pretty_message(
'''
iv must be 8 bytes long - is %s
''',
len(iv)
))
return (iv, _encrypt(Security.kSecAttrKeyTypeDES, key, data, iv, Security.kSecPaddingPKCS5Key)) | Encrypts plaintext using DES with a 56 bit key
:param key:
The encryption key - a byte string 8 bytes long (includes error correction bits)
:param data:
The plaintext - a byte string
:param iv:
The 8-byte initialization vector to use - a byte string - set as None
to generate an appropriate one
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A tuple of two byte strings (iv, ciphertext) | Below is the the instruction that describes the task:
### Input:
Encrypts plaintext using DES with a 56 bit key
:param key:
The encryption key - a byte string 8 bytes long (includes error correction bits)
:param data:
The plaintext - a byte string
:param iv:
The 8-byte initialization vector to use - a byte string - set as None
to generate an appropriate one
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A tuple of two byte strings (iv, ciphertext)
### Response:
def des_cbc_pkcs5_encrypt(key, data, iv):
"""
Encrypts plaintext using DES with a 56 bit key
:param key:
The encryption key - a byte string 8 bytes long (includes error correction bits)
:param data:
The plaintext - a byte string
:param iv:
The 8-byte initialization vector to use - a byte string - set as None
to generate an appropriate one
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A tuple of two byte strings (iv, ciphertext)
"""
if len(key) != 8:
raise ValueError(pretty_message(
'''
key must be 8 bytes (56 bits + 8 parity bits) long - is %s
''',
len(key)
))
if not iv:
iv = rand_bytes(8)
elif len(iv) != 8:
raise ValueError(pretty_message(
'''
iv must be 8 bytes long - is %s
''',
len(iv)
))
return (iv, _encrypt(Security.kSecAttrKeyTypeDES, key, data, iv, Security.kSecPaddingPKCS5Key)) |
def ConsultarConstanciaCTGPDF(self, numero_ctg=None,
archivo="constancia.pdf"):
"Operación Consultar Constancia de CTG en PDF"
ret = self.client.consultarConstanciaCTGPDF(request=dict(
auth={
'token': self.Token, 'sign': self.Sign,
'cuitRepresentado': self.Cuit, },
ctg=numero_ctg,
))['response']
self.__analizar_errores(ret)
datos = base64.b64decode(ret.get('archivo', ""))
f = open(archivo, "wb")
f.write(datos)
f.close()
return True | Operación Consultar Constancia de CTG en PDF | Below is the the instruction that describes the task:
### Input:
Operación Consultar Constancia de CTG en PDF
### Response:
def ConsultarConstanciaCTGPDF(self, numero_ctg=None,
archivo="constancia.pdf"):
"Operación Consultar Constancia de CTG en PDF"
ret = self.client.consultarConstanciaCTGPDF(request=dict(
auth={
'token': self.Token, 'sign': self.Sign,
'cuitRepresentado': self.Cuit, },
ctg=numero_ctg,
))['response']
self.__analizar_errores(ret)
datos = base64.b64decode(ret.get('archivo', ""))
f = open(archivo, "wb")
f.write(datos)
f.close()
return True |
def combo_serve(request, path, client):
"""
Handles generating a 'combo' file for the given path. This is similar to
what happens when we upload to S3. Processors are applied, and we get
the value that we would if we were serving from S3. This is a good way
to make sure combo files work as intended before rolling out
to production.
"""
joinfile = path
sourcefiles = msettings['JOINED'][path]
# Generate the combo file as a string.
combo_data, dirname = combine_files(joinfile, sourcefiles, client)
if path.endswith('.css'):
mime_type = 'text/css'
elif joinfile.endswith('.js'):
mime_type = 'application/javascript'
return HttpResponse(combo_data, mimetype=mime_type) | Handles generating a 'combo' file for the given path. This is similar to
what happens when we upload to S3. Processors are applied, and we get
the value that we would if we were serving from S3. This is a good way
to make sure combo files work as intended before rolling out
to production. | Below is the the instruction that describes the task:
### Input:
Handles generating a 'combo' file for the given path. This is similar to
what happens when we upload to S3. Processors are applied, and we get
the value that we would if we were serving from S3. This is a good way
to make sure combo files work as intended before rolling out
to production.
### Response:
def combo_serve(request, path, client):
"""
Handles generating a 'combo' file for the given path. This is similar to
what happens when we upload to S3. Processors are applied, and we get
the value that we would if we were serving from S3. This is a good way
to make sure combo files work as intended before rolling out
to production.
"""
joinfile = path
sourcefiles = msettings['JOINED'][path]
# Generate the combo file as a string.
combo_data, dirname = combine_files(joinfile, sourcefiles, client)
if path.endswith('.css'):
mime_type = 'text/css'
elif joinfile.endswith('.js'):
mime_type = 'application/javascript'
return HttpResponse(combo_data, mimetype=mime_type) |
def write_tsv(self, path):
"""Write the database to a tab-delimited text file.
Parameters
----------
path: str
The path name of the file.
Returns
-------
None
"""
with open(path, 'wb') as ofh:
writer = csv.writer(
ofh, dialect='excel-tab',
quoting=csv.QUOTE_NONE, lineterminator=os.linesep
)
for gs in self._gene_sets.values():
writer.writerow(gs.to_list()) | Write the database to a tab-delimited text file.
Parameters
----------
path: str
The path name of the file.
Returns
-------
None | Below is the the instruction that describes the task:
### Input:
Write the database to a tab-delimited text file.
Parameters
----------
path: str
The path name of the file.
Returns
-------
None
### Response:
def write_tsv(self, path):
"""Write the database to a tab-delimited text file.
Parameters
----------
path: str
The path name of the file.
Returns
-------
None
"""
with open(path, 'wb') as ofh:
writer = csv.writer(
ofh, dialect='excel-tab',
quoting=csv.QUOTE_NONE, lineterminator=os.linesep
)
for gs in self._gene_sets.values():
writer.writerow(gs.to_list()) |
def _get_help_record(opt):
"""Re-implementation of click.Opt.get_help_record.
The variant of 'get_help_record' found in Click makes uses of slashes to
separate multiple opts, and formats option arguments using upper case. This
is not compatible with Sphinx's 'option' directive, which expects
comma-separated opts and option arguments surrounded by angle brackets [1].
[1] http://www.sphinx-doc.org/en/stable/domains.html#directive-option
"""
def _write_opts(opts):
rv, _ = click.formatting.join_options(opts)
if not opt.is_flag and not opt.count:
rv += ' <{}>'.format(opt.name)
return rv
rv = [_write_opts(opt.opts)]
if opt.secondary_opts:
rv.append(_write_opts(opt.secondary_opts))
help = opt.help or ''
extra = []
if opt.default is not None and opt.show_default:
extra.append(
'default: %s' % (', '.join('%s' % d for d in opt.default)
if isinstance(opt.default,
(list, tuple)) else opt.default, ))
if opt.required:
extra.append('required')
if extra:
help = '%s[%s]' % (help and help + ' ' or '', '; '.join(extra))
return ', '.join(rv), help | Re-implementation of click.Opt.get_help_record.
The variant of 'get_help_record' found in Click makes uses of slashes to
separate multiple opts, and formats option arguments using upper case. This
is not compatible with Sphinx's 'option' directive, which expects
comma-separated opts and option arguments surrounded by angle brackets [1].
[1] http://www.sphinx-doc.org/en/stable/domains.html#directive-option | Below is the the instruction that describes the task:
### Input:
Re-implementation of click.Opt.get_help_record.
The variant of 'get_help_record' found in Click makes uses of slashes to
separate multiple opts, and formats option arguments using upper case. This
is not compatible with Sphinx's 'option' directive, which expects
comma-separated opts and option arguments surrounded by angle brackets [1].
[1] http://www.sphinx-doc.org/en/stable/domains.html#directive-option
### Response:
def _get_help_record(opt):
"""Re-implementation of click.Opt.get_help_record.
The variant of 'get_help_record' found in Click makes uses of slashes to
separate multiple opts, and formats option arguments using upper case. This
is not compatible with Sphinx's 'option' directive, which expects
comma-separated opts and option arguments surrounded by angle brackets [1].
[1] http://www.sphinx-doc.org/en/stable/domains.html#directive-option
"""
def _write_opts(opts):
rv, _ = click.formatting.join_options(opts)
if not opt.is_flag and not opt.count:
rv += ' <{}>'.format(opt.name)
return rv
rv = [_write_opts(opt.opts)]
if opt.secondary_opts:
rv.append(_write_opts(opt.secondary_opts))
help = opt.help or ''
extra = []
if opt.default is not None and opt.show_default:
extra.append(
'default: %s' % (', '.join('%s' % d for d in opt.default)
if isinstance(opt.default,
(list, tuple)) else opt.default, ))
if opt.required:
extra.append('required')
if extra:
help = '%s[%s]' % (help and help + ' ' or '', '; '.join(extra))
return ', '.join(rv), help |
def sample_histograms(fig, input_sample, problem, param_dict):
'''Plots a set of subplots of histograms of the input sample
'''
num_vars = problem['num_vars']
names = problem['names']
framing = 101 + (num_vars * 10)
# Find number of levels
num_levels = len(set(input_sample[:, 1]))
out = []
for variable in range(num_vars):
ax = fig.add_subplot(framing + variable)
out.append(ax.hist(input_sample[:, variable],
bins=num_levels,
normed=False,
label=None,
**param_dict))
ax.set_title('%s' % (names[variable]))
ax.tick_params(axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelbottom='off') # labels along the bottom edge off)
if variable > 0:
ax.tick_params(axis='y', # changes apply to the y-axis
which='both', # both major and minor ticks affected
labelleft='off') # labels along the left edge off)
return out | Plots a set of subplots of histograms of the input sample | Below is the the instruction that describes the task:
### Input:
Plots a set of subplots of histograms of the input sample
### Response:
def sample_histograms(fig, input_sample, problem, param_dict):
'''Plots a set of subplots of histograms of the input sample
'''
num_vars = problem['num_vars']
names = problem['names']
framing = 101 + (num_vars * 10)
# Find number of levels
num_levels = len(set(input_sample[:, 1]))
out = []
for variable in range(num_vars):
ax = fig.add_subplot(framing + variable)
out.append(ax.hist(input_sample[:, variable],
bins=num_levels,
normed=False,
label=None,
**param_dict))
ax.set_title('%s' % (names[variable]))
ax.tick_params(axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelbottom='off') # labels along the bottom edge off)
if variable > 0:
ax.tick_params(axis='y', # changes apply to the y-axis
which='both', # both major and minor ticks affected
labelleft='off') # labels along the left edge off)
return out |
def new_item(self, hash_key, range_key=None, attrs=None):
"""
Return an new, unsaved Item which can later be PUT to
Amazon DynamoDB.
"""
return Item(self, hash_key, range_key, attrs) | Return an new, unsaved Item which can later be PUT to
Amazon DynamoDB. | Below is the the instruction that describes the task:
### Input:
Return an new, unsaved Item which can later be PUT to
Amazon DynamoDB.
### Response:
def new_item(self, hash_key, range_key=None, attrs=None):
"""
Return an new, unsaved Item which can later be PUT to
Amazon DynamoDB.
"""
return Item(self, hash_key, range_key, attrs) |
def handle_mark_read_request(cls, request, message, dispatch, hash_is_valid, redirect_to):
"""Handles a request to mark a message as read.
:param Request request: Request instance
:param Message message: Message model instance
:param Dispatch dispatch: Dispatch model instance
:param bool hash_is_valid: Flag indicating that user supplied request signature is correct
:param str redirect_to: Redirection URL
:rtype: list
"""
if hash_is_valid:
dispatch.mark_read()
dispatch.save()
signal = sig_mark_read_success
else:
signal = sig_mark_read_failed
signal.send(cls, request=request, message=message, dispatch=dispatch)
return redirect(redirect_to) | Handles a request to mark a message as read.
:param Request request: Request instance
:param Message message: Message model instance
:param Dispatch dispatch: Dispatch model instance
:param bool hash_is_valid: Flag indicating that user supplied request signature is correct
:param str redirect_to: Redirection URL
:rtype: list | Below is the the instruction that describes the task:
### Input:
Handles a request to mark a message as read.
:param Request request: Request instance
:param Message message: Message model instance
:param Dispatch dispatch: Dispatch model instance
:param bool hash_is_valid: Flag indicating that user supplied request signature is correct
:param str redirect_to: Redirection URL
:rtype: list
### Response:
def handle_mark_read_request(cls, request, message, dispatch, hash_is_valid, redirect_to):
"""Handles a request to mark a message as read.
:param Request request: Request instance
:param Message message: Message model instance
:param Dispatch dispatch: Dispatch model instance
:param bool hash_is_valid: Flag indicating that user supplied request signature is correct
:param str redirect_to: Redirection URL
:rtype: list
"""
if hash_is_valid:
dispatch.mark_read()
dispatch.save()
signal = sig_mark_read_success
else:
signal = sig_mark_read_failed
signal.send(cls, request=request, message=message, dispatch=dispatch)
return redirect(redirect_to) |
def update_install_json():
"""Update the install.json configuration file if exists."""
if not os.path.isfile('install.json'):
return
with open('install.json', 'r') as f:
install_json = json.load(f)
if install_json.get('programMain'):
install_json['programMain'] = 'run'
# update features
install_json['features'] = ['aotExecutionEnabled', 'appBuilderCompliant', 'secureParams']
with open('install.json', 'w') as f:
json.dump(install_json, f, indent=2, sort_keys=True) | Update the install.json configuration file if exists. | Below is the the instruction that describes the task:
### Input:
Update the install.json configuration file if exists.
### Response:
def update_install_json():
"""Update the install.json configuration file if exists."""
if not os.path.isfile('install.json'):
return
with open('install.json', 'r') as f:
install_json = json.load(f)
if install_json.get('programMain'):
install_json['programMain'] = 'run'
# update features
install_json['features'] = ['aotExecutionEnabled', 'appBuilderCompliant', 'secureParams']
with open('install.json', 'w') as f:
json.dump(install_json, f, indent=2, sort_keys=True) |
def topDownCompute(self, encoded):
"""
[overrides nupic.encoders.scalar.ScalarEncoder.topDownCompute]
"""
if self.minval is None or self.maxval is None:
return [EncoderResult(value=0, scalar=0,
encoding=numpy.zeros(self.n))]
return super(AdaptiveScalarEncoder, self).topDownCompute(encoded) | [overrides nupic.encoders.scalar.ScalarEncoder.topDownCompute] | Below is the the instruction that describes the task:
### Input:
[overrides nupic.encoders.scalar.ScalarEncoder.topDownCompute]
### Response:
def topDownCompute(self, encoded):
"""
[overrides nupic.encoders.scalar.ScalarEncoder.topDownCompute]
"""
if self.minval is None or self.maxval is None:
return [EncoderResult(value=0, scalar=0,
encoding=numpy.zeros(self.n))]
return super(AdaptiveScalarEncoder, self).topDownCompute(encoded) |
def create_shift(self, params={}):
"""
Creates a shift
http://dev.wheniwork.com/#create/update-shift
"""
url = "/2/shifts/"
body = params
data = self._post_resource(url, body)
shift = self.shift_from_json(data["shift"])
return shift | Creates a shift
http://dev.wheniwork.com/#create/update-shift | Below is the the instruction that describes the task:
### Input:
Creates a shift
http://dev.wheniwork.com/#create/update-shift
### Response:
def create_shift(self, params={}):
"""
Creates a shift
http://dev.wheniwork.com/#create/update-shift
"""
url = "/2/shifts/"
body = params
data = self._post_resource(url, body)
shift = self.shift_from_json(data["shift"])
return shift |
def get_label(self, code):
"""Returns string label for given code string
Inverse of get_code
Parameters
----------
code: String
\tCode string, field 1 of style tuple
"""
for style in self.styles:
if style[1] == code:
return style[0]
msg = _("Code {code} is invalid.").format(code=code)
raise ValueError(msg) | Returns string label for given code string
Inverse of get_code
Parameters
----------
code: String
\tCode string, field 1 of style tuple | Below is the the instruction that describes the task:
### Input:
Returns string label for given code string
Inverse of get_code
Parameters
----------
code: String
\tCode string, field 1 of style tuple
### Response:
def get_label(self, code):
"""Returns string label for given code string
Inverse of get_code
Parameters
----------
code: String
\tCode string, field 1 of style tuple
"""
for style in self.styles:
if style[1] == code:
return style[0]
msg = _("Code {code} is invalid.").format(code=code)
raise ValueError(msg) |
def generate_twofactor_code_for_time(shared_secret, timestamp):
"""Generate Steam 2FA code for timestamp
:param shared_secret: authenticator shared secret
:type shared_secret: bytes
:param timestamp: timestamp to use, if left out uses current time
:type timestamp: int
:return: steam two factor code
:rtype: str
"""
hmac = hmac_sha1(bytes(shared_secret),
struct.pack('>Q', int(timestamp)//30)) # this will NOT stop working in 2038
start = ord(hmac[19:20]) & 0xF
codeint = struct.unpack('>I', hmac[start:start+4])[0] & 0x7fffffff
charset = '23456789BCDFGHJKMNPQRTVWXY'
code = ''
for _ in range(5):
codeint, i = divmod(codeint, len(charset))
code += charset[i]
return code | Generate Steam 2FA code for timestamp
:param shared_secret: authenticator shared secret
:type shared_secret: bytes
:param timestamp: timestamp to use, if left out uses current time
:type timestamp: int
:return: steam two factor code
:rtype: str | Below is the the instruction that describes the task:
### Input:
Generate Steam 2FA code for timestamp
:param shared_secret: authenticator shared secret
:type shared_secret: bytes
:param timestamp: timestamp to use, if left out uses current time
:type timestamp: int
:return: steam two factor code
:rtype: str
### Response:
def generate_twofactor_code_for_time(shared_secret, timestamp):
"""Generate Steam 2FA code for timestamp
:param shared_secret: authenticator shared secret
:type shared_secret: bytes
:param timestamp: timestamp to use, if left out uses current time
:type timestamp: int
:return: steam two factor code
:rtype: str
"""
hmac = hmac_sha1(bytes(shared_secret),
struct.pack('>Q', int(timestamp)//30)) # this will NOT stop working in 2038
start = ord(hmac[19:20]) & 0xF
codeint = struct.unpack('>I', hmac[start:start+4])[0] & 0x7fffffff
charset = '23456789BCDFGHJKMNPQRTVWXY'
code = ''
for _ in range(5):
codeint, i = divmod(codeint, len(charset))
code += charset[i]
return code |
def downloadMARCOAI(doc_id, base):
"""
Download MARC OAI document with given `doc_id` from given (logical) `base`.
Funny part is, that some documents can be obtained only with this function
in their full text.
Args:
doc_id (str): You will get this from :func:`getDocumentIDs`.
base (str, optional): Base from which you want to download Aleph
document.
This seems to be duplicite with
:func:`searchInAleph` parameters, but it's just
something Aleph's X-Services wants, so ..
Returns:
str: MARC XML Unicode string.
Raises:
InvalidAlephBaseException
DocumentNotFoundException
"""
downer = Downloader()
data = downer.download(
ALEPH_URL + Template(OAI_DOC_URL_TEMPLATE).substitute(
DOC_ID=doc_id,
BASE=base
)
)
dom = dhtmlparser.parseString(data)
# check for errors
error = dom.find("error")
if len(error) <= 0: # no errors
return data
if "Error reading document" in error[0].getContent():
raise DocumentNotFoundException(
str(error[0].getContent())
)
else:
raise InvalidAlephBaseException(
error[0].getContent() + "\n" +
"The base you are trying to access probably doesn't exist."
) | Download MARC OAI document with given `doc_id` from given (logical) `base`.
Funny part is, that some documents can be obtained only with this function
in their full text.
Args:
doc_id (str): You will get this from :func:`getDocumentIDs`.
base (str, optional): Base from which you want to download Aleph
document.
This seems to be duplicite with
:func:`searchInAleph` parameters, but it's just
something Aleph's X-Services wants, so ..
Returns:
str: MARC XML Unicode string.
Raises:
InvalidAlephBaseException
DocumentNotFoundException | Below is the the instruction that describes the task:
### Input:
Download MARC OAI document with given `doc_id` from given (logical) `base`.
Funny part is, that some documents can be obtained only with this function
in their full text.
Args:
doc_id (str): You will get this from :func:`getDocumentIDs`.
base (str, optional): Base from which you want to download Aleph
document.
This seems to be duplicite with
:func:`searchInAleph` parameters, but it's just
something Aleph's X-Services wants, so ..
Returns:
str: MARC XML Unicode string.
Raises:
InvalidAlephBaseException
DocumentNotFoundException
### Response:
def downloadMARCOAI(doc_id, base):
"""
Download MARC OAI document with given `doc_id` from given (logical) `base`.
Funny part is, that some documents can be obtained only with this function
in their full text.
Args:
doc_id (str): You will get this from :func:`getDocumentIDs`.
base (str, optional): Base from which you want to download Aleph
document.
This seems to be duplicite with
:func:`searchInAleph` parameters, but it's just
something Aleph's X-Services wants, so ..
Returns:
str: MARC XML Unicode string.
Raises:
InvalidAlephBaseException
DocumentNotFoundException
"""
downer = Downloader()
data = downer.download(
ALEPH_URL + Template(OAI_DOC_URL_TEMPLATE).substitute(
DOC_ID=doc_id,
BASE=base
)
)
dom = dhtmlparser.parseString(data)
# check for errors
error = dom.find("error")
if len(error) <= 0: # no errors
return data
if "Error reading document" in error[0].getContent():
raise DocumentNotFoundException(
str(error[0].getContent())
)
else:
raise InvalidAlephBaseException(
error[0].getContent() + "\n" +
"The base you are trying to access probably doesn't exist."
) |
def Start(self,
file_size=0,
maximum_pending_files=1000,
use_external_stores=False):
"""Initialize our state."""
super(MultiGetFileLogic, self).Start()
self.state.files_hashed = 0
self.state.use_external_stores = use_external_stores
self.state.file_size = file_size
self.state.files_to_fetch = 0
self.state.files_fetched = 0
self.state.files_skipped = 0
# Counter to batch up hash checking in the filestore
self.state.files_hashed_since_check = 0
# A dict of file trackers which are waiting to be checked by the file
# store. Keys are vfs urns and values are FileTrack instances. Values are
# copied to pending_files for download if not present in FileStore.
self.state.pending_hashes = {}
# A dict of file trackers currently being fetched. Keys are vfs urns and
# values are FileTracker instances.
self.state.pending_files = {}
# The maximum number of files we are allowed to download concurrently.
self.state.maximum_pending_files = maximum_pending_files
# As pathspecs are added to the flow they are appended to this array. We
# then simply pass their index in this array as a surrogate for the full
# pathspec. This allows us to use integers to track pathspecs in dicts etc.
self.state.indexed_pathspecs = []
self.state.request_data_list = []
# The index of the next pathspec to start. Pathspecs are added to
# indexed_pathspecs and wait there until there are free trackers for
# them. When the number of pending_files falls below the
# "maximum_pending_files" count] = we increment this index and start of
# downloading another pathspec.
self.state.next_pathspec_to_start = 0
# Number of blob hashes we have received but not yet scheduled for download.
self.state.blob_hashes_pending = 0 | Initialize our state. | Below is the the instruction that describes the task:
### Input:
Initialize our state.
### Response:
def Start(self,
file_size=0,
maximum_pending_files=1000,
use_external_stores=False):
"""Initialize our state."""
super(MultiGetFileLogic, self).Start()
self.state.files_hashed = 0
self.state.use_external_stores = use_external_stores
self.state.file_size = file_size
self.state.files_to_fetch = 0
self.state.files_fetched = 0
self.state.files_skipped = 0
# Counter to batch up hash checking in the filestore
self.state.files_hashed_since_check = 0
# A dict of file trackers which are waiting to be checked by the file
# store. Keys are vfs urns and values are FileTrack instances. Values are
# copied to pending_files for download if not present in FileStore.
self.state.pending_hashes = {}
# A dict of file trackers currently being fetched. Keys are vfs urns and
# values are FileTracker instances.
self.state.pending_files = {}
# The maximum number of files we are allowed to download concurrently.
self.state.maximum_pending_files = maximum_pending_files
# As pathspecs are added to the flow they are appended to this array. We
# then simply pass their index in this array as a surrogate for the full
# pathspec. This allows us to use integers to track pathspecs in dicts etc.
self.state.indexed_pathspecs = []
self.state.request_data_list = []
# The index of the next pathspec to start. Pathspecs are added to
# indexed_pathspecs and wait there until there are free trackers for
# them. When the number of pending_files falls below the
# "maximum_pending_files" count] = we increment this index and start of
# downloading another pathspec.
self.state.next_pathspec_to_start = 0
# Number of blob hashes we have received but not yet scheduled for download.
self.state.blob_hashes_pending = 0 |
def get_file_from_url(job, any_url, encryption_key=None, per_file_encryption=True,
write_to_jobstore=True):
"""
Download a supplied URL that points to a file on an http, https or ftp server. If the file is
found to be an https s3 link then the file is downloaded using `get_file_from_s3`. The file is
downloaded and written to the jobstore if requested.
Encryption arguments are for passing to `get_file_from_s3` if required.
:param str any_url: URL for the file
:param str encryption_key: Path to the master key
:param bool per_file_encryption: If encrypted, was the file encrypted using the per-file method?
:param bool write_to_jobstore: Should the file be written to the job store?
:return: Path to the downloaded file or fsID (if write_to_jobstore was True)
:rtype: str|toil.fileStore.FileID
"""
work_dir = job.fileStore.getLocalTempDir()
filename = '/'.join([work_dir, str(uuid.uuid4())])
url = any_url
parsed_url = urlparse(any_url)
try:
response = urllib2.urlopen(url)
except urllib2.HTTPError:
if parsed_url.netloc.startswith(('s3', 'S3')):
job.fileStore.logToMaster("Detected https link is for an encrypted s3 file.")
return get_file_from_s3(job, any_url, encryption_key=encryption_key,
per_file_encryption=per_file_encryption,
write_to_jobstore=write_to_jobstore)
else:
raise
else:
with open(filename, 'w') as f:
f.write(response.read())
if write_to_jobstore:
filename = job.fileStore.writeGlobalFile(filename)
return filename | Download a supplied URL that points to a file on an http, https or ftp server. If the file is
found to be an https s3 link then the file is downloaded using `get_file_from_s3`. The file is
downloaded and written to the jobstore if requested.
Encryption arguments are for passing to `get_file_from_s3` if required.
:param str any_url: URL for the file
:param str encryption_key: Path to the master key
:param bool per_file_encryption: If encrypted, was the file encrypted using the per-file method?
:param bool write_to_jobstore: Should the file be written to the job store?
:return: Path to the downloaded file or fsID (if write_to_jobstore was True)
:rtype: str|toil.fileStore.FileID | Below is the the instruction that describes the task:
### Input:
Download a supplied URL that points to a file on an http, https or ftp server. If the file is
found to be an https s3 link then the file is downloaded using `get_file_from_s3`. The file is
downloaded and written to the jobstore if requested.
Encryption arguments are for passing to `get_file_from_s3` if required.
:param str any_url: URL for the file
:param str encryption_key: Path to the master key
:param bool per_file_encryption: If encrypted, was the file encrypted using the per-file method?
:param bool write_to_jobstore: Should the file be written to the job store?
:return: Path to the downloaded file or fsID (if write_to_jobstore was True)
:rtype: str|toil.fileStore.FileID
### Response:
def get_file_from_url(job, any_url, encryption_key=None, per_file_encryption=True,
write_to_jobstore=True):
"""
Download a supplied URL that points to a file on an http, https or ftp server. If the file is
found to be an https s3 link then the file is downloaded using `get_file_from_s3`. The file is
downloaded and written to the jobstore if requested.
Encryption arguments are for passing to `get_file_from_s3` if required.
:param str any_url: URL for the file
:param str encryption_key: Path to the master key
:param bool per_file_encryption: If encrypted, was the file encrypted using the per-file method?
:param bool write_to_jobstore: Should the file be written to the job store?
:return: Path to the downloaded file or fsID (if write_to_jobstore was True)
:rtype: str|toil.fileStore.FileID
"""
work_dir = job.fileStore.getLocalTempDir()
filename = '/'.join([work_dir, str(uuid.uuid4())])
url = any_url
parsed_url = urlparse(any_url)
try:
response = urllib2.urlopen(url)
except urllib2.HTTPError:
if parsed_url.netloc.startswith(('s3', 'S3')):
job.fileStore.logToMaster("Detected https link is for an encrypted s3 file.")
return get_file_from_s3(job, any_url, encryption_key=encryption_key,
per_file_encryption=per_file_encryption,
write_to_jobstore=write_to_jobstore)
else:
raise
else:
with open(filename, 'w') as f:
f.write(response.read())
if write_to_jobstore:
filename = job.fileStore.writeGlobalFile(filename)
return filename |
def remote(self, remote_base=None, username=None, password=None):
"""
Configures remote access
Parameters
----------
remote_base : str
base URL path for remote repository
username : str
user name for remote repository
password : str
password for local repository
"""
if remote_base is not None:
self.remote_base = remote_base
self._remote = True
self.set_auth(username=username, password=password)
if self.auth.ready():
passman = HTTPPasswordMgrWithDefaultRealm()
passman.add_password(None, self.remote_base, self.auth.username, self.auth.password)
authhandler = HTTPBasicAuthHandler(passman)
opener = build_opener(authhandler)
install_opener(opener) | Configures remote access
Parameters
----------
remote_base : str
base URL path for remote repository
username : str
user name for remote repository
password : str
password for local repository | Below is the the instruction that describes the task:
### Input:
Configures remote access
Parameters
----------
remote_base : str
base URL path for remote repository
username : str
user name for remote repository
password : str
password for local repository
### Response:
def remote(self, remote_base=None, username=None, password=None):
"""
Configures remote access
Parameters
----------
remote_base : str
base URL path for remote repository
username : str
user name for remote repository
password : str
password for local repository
"""
if remote_base is not None:
self.remote_base = remote_base
self._remote = True
self.set_auth(username=username, password=password)
if self.auth.ready():
passman = HTTPPasswordMgrWithDefaultRealm()
passman.add_password(None, self.remote_base, self.auth.username, self.auth.password)
authhandler = HTTPBasicAuthHandler(passman)
opener = build_opener(authhandler)
install_opener(opener) |
def _cache_update_needed(self, courseid):
"""
:param courseid: the (valid) course id of the course
:raise InvalidNameException, CourseNotFoundException
:return: True if an update of the cache is needed, False else
"""
if courseid not in self._cache:
return True
try:
descriptor_name = self._get_course_descriptor_path(courseid)
last_update = {descriptor_name: self._filesystem.get_last_modification_time(descriptor_name)}
translations_fs = self._filesystem.from_subfolder("$i18n")
if translations_fs.exists():
for f in translations_fs.list(folders=False, files=True, recursive=False):
lang = f[0:len(f) - 3]
if translations_fs.exists(lang + ".mo"):
last_update["$i18n/" + lang + ".mo"] = translations_fs.get_last_modification_time(lang + ".mo")
except:
raise CourseNotFoundException()
last_modif = self._cache[courseid][1]
for filename, mftime in last_update.items():
if filename not in last_modif or last_modif[filename] < mftime:
return True
return False | :param courseid: the (valid) course id of the course
:raise InvalidNameException, CourseNotFoundException
:return: True if an update of the cache is needed, False else | Below is the the instruction that describes the task:
### Input:
:param courseid: the (valid) course id of the course
:raise InvalidNameException, CourseNotFoundException
:return: True if an update of the cache is needed, False else
### Response:
def _cache_update_needed(self, courseid):
"""
:param courseid: the (valid) course id of the course
:raise InvalidNameException, CourseNotFoundException
:return: True if an update of the cache is needed, False else
"""
if courseid not in self._cache:
return True
try:
descriptor_name = self._get_course_descriptor_path(courseid)
last_update = {descriptor_name: self._filesystem.get_last_modification_time(descriptor_name)}
translations_fs = self._filesystem.from_subfolder("$i18n")
if translations_fs.exists():
for f in translations_fs.list(folders=False, files=True, recursive=False):
lang = f[0:len(f) - 3]
if translations_fs.exists(lang + ".mo"):
last_update["$i18n/" + lang + ".mo"] = translations_fs.get_last_modification_time(lang + ".mo")
except:
raise CourseNotFoundException()
last_modif = self._cache[courseid][1]
for filename, mftime in last_update.items():
if filename not in last_modif or last_modif[filename] < mftime:
return True
return False |
def adaptStandardLogging(loggerName, logCategory, targetModule):
"""
Make a logger from the standard library log through the Flumotion logging
system.
@param loggerName: The standard logger to adapt, e.g. 'library.module'
@type loggerName: str
@param logCategory: The Flumotion log category to use when reporting output
from the standard logger, e.g. 'librarymodule'
@type logCategory: str
@param targetModule: The name of the module that the logging should look
like it's coming from. Use this if you don't want to
see the file names and line numbers of the library
who's logger you are adapting.
@type targetModule: str or None
"""
logger = logging.getLogger(loggerName)
# if there is already a FluHandler, exit
if map(lambda h: isinstance(h, LogHandler), logger.handlers):
return
logger.setLevel(logLevelToStdLevel(getCategoryLevel(logCategory)))
logger.addHandler(LogHandler(logCategory, targetModule)) | Make a logger from the standard library log through the Flumotion logging
system.
@param loggerName: The standard logger to adapt, e.g. 'library.module'
@type loggerName: str
@param logCategory: The Flumotion log category to use when reporting output
from the standard logger, e.g. 'librarymodule'
@type logCategory: str
@param targetModule: The name of the module that the logging should look
like it's coming from. Use this if you don't want to
see the file names and line numbers of the library
who's logger you are adapting.
@type targetModule: str or None | Below is the the instruction that describes the task:
### Input:
Make a logger from the standard library log through the Flumotion logging
system.
@param loggerName: The standard logger to adapt, e.g. 'library.module'
@type loggerName: str
@param logCategory: The Flumotion log category to use when reporting output
from the standard logger, e.g. 'librarymodule'
@type logCategory: str
@param targetModule: The name of the module that the logging should look
like it's coming from. Use this if you don't want to
see the file names and line numbers of the library
who's logger you are adapting.
@type targetModule: str or None
### Response:
def adaptStandardLogging(loggerName, logCategory, targetModule):
"""
Make a logger from the standard library log through the Flumotion logging
system.
@param loggerName: The standard logger to adapt, e.g. 'library.module'
@type loggerName: str
@param logCategory: The Flumotion log category to use when reporting output
from the standard logger, e.g. 'librarymodule'
@type logCategory: str
@param targetModule: The name of the module that the logging should look
like it's coming from. Use this if you don't want to
see the file names and line numbers of the library
who's logger you are adapting.
@type targetModule: str or None
"""
logger = logging.getLogger(loggerName)
# if there is already a FluHandler, exit
if map(lambda h: isinstance(h, LogHandler), logger.handlers):
return
logger.setLevel(logLevelToStdLevel(getCategoryLevel(logCategory)))
logger.addHandler(LogHandler(logCategory, targetModule)) |
def get_media_detail_output_interface_interface_identifier_gbic_gbc_vendor_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_media_detail = ET.Element("get_media_detail")
config = get_media_detail
output = ET.SubElement(get_media_detail, "output")
interface = ET.SubElement(output, "interface")
interface_type_key = ET.SubElement(interface, "interface-type")
interface_type_key.text = kwargs.pop('interface_type')
interface_name_key = ET.SubElement(interface, "interface-name")
interface_name_key.text = kwargs.pop('interface_name')
interface_identifier = ET.SubElement(interface, "interface-identifier")
gbic = ET.SubElement(interface_identifier, "gbic")
gbc = ET.SubElement(gbic, "gbc")
vendor_name = ET.SubElement(gbc, "vendor-name")
vendor_name.text = kwargs.pop('vendor_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def get_media_detail_output_interface_interface_identifier_gbic_gbc_vendor_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_media_detail = ET.Element("get_media_detail")
config = get_media_detail
output = ET.SubElement(get_media_detail, "output")
interface = ET.SubElement(output, "interface")
interface_type_key = ET.SubElement(interface, "interface-type")
interface_type_key.text = kwargs.pop('interface_type')
interface_name_key = ET.SubElement(interface, "interface-name")
interface_name_key.text = kwargs.pop('interface_name')
interface_identifier = ET.SubElement(interface, "interface-identifier")
gbic = ET.SubElement(interface_identifier, "gbic")
gbc = ET.SubElement(gbic, "gbc")
vendor_name = ET.SubElement(gbc, "vendor-name")
vendor_name.text = kwargs.pop('vendor_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def _set_port_profile_domain(self, v, load=False):
"""
Setter method for port_profile_domain, mapped from YANG variable /port_profile_domain (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_port_profile_domain is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_port_profile_domain() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("port_profile_domain_name",port_profile_domain.port_profile_domain, yang_name="port-profile-domain", rest_name="port-profile-domain", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='port-profile-domain-name', extensions={u'tailf-common': {u'info': u'Define a port-profile-domain', u'cli-no-key-completion': None, u'cli-full-no': None, u'sort-priority': u'78', u'cli-suppress-list-no': None, u'cli-full-command': None, u'callpoint': u'port-profile-domain-callpoint'}}), is_container='list', yang_name="port-profile-domain", rest_name="port-profile-domain", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Define a port-profile-domain', u'cli-no-key-completion': None, u'cli-full-no': None, u'sort-priority': u'78', u'cli-suppress-list-no': None, u'cli-full-command': None, u'callpoint': u'port-profile-domain-callpoint'}}, namespace='urn:brocade.com:mgmt:brocade-port-profile', defining_module='brocade-port-profile', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """port_profile_domain must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("port_profile_domain_name",port_profile_domain.port_profile_domain, yang_name="port-profile-domain", rest_name="port-profile-domain", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='port-profile-domain-name', extensions={u'tailf-common': {u'info': u'Define a port-profile-domain', u'cli-no-key-completion': None, u'cli-full-no': None, u'sort-priority': u'78', u'cli-suppress-list-no': None, u'cli-full-command': None, u'callpoint': u'port-profile-domain-callpoint'}}), is_container='list', yang_name="port-profile-domain", rest_name="port-profile-domain", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Define a port-profile-domain', u'cli-no-key-completion': None, u'cli-full-no': None, u'sort-priority': u'78', u'cli-suppress-list-no': None, u'cli-full-command': None, u'callpoint': u'port-profile-domain-callpoint'}}, namespace='urn:brocade.com:mgmt:brocade-port-profile', defining_module='brocade-port-profile', yang_type='list', is_config=True)""",
})
self.__port_profile_domain = t
if hasattr(self, '_set'):
self._set() | Setter method for port_profile_domain, mapped from YANG variable /port_profile_domain (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_port_profile_domain is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_port_profile_domain() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for port_profile_domain, mapped from YANG variable /port_profile_domain (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_port_profile_domain is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_port_profile_domain() directly.
### Response:
def _set_port_profile_domain(self, v, load=False):
"""
Setter method for port_profile_domain, mapped from YANG variable /port_profile_domain (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_port_profile_domain is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_port_profile_domain() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("port_profile_domain_name",port_profile_domain.port_profile_domain, yang_name="port-profile-domain", rest_name="port-profile-domain", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='port-profile-domain-name', extensions={u'tailf-common': {u'info': u'Define a port-profile-domain', u'cli-no-key-completion': None, u'cli-full-no': None, u'sort-priority': u'78', u'cli-suppress-list-no': None, u'cli-full-command': None, u'callpoint': u'port-profile-domain-callpoint'}}), is_container='list', yang_name="port-profile-domain", rest_name="port-profile-domain", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Define a port-profile-domain', u'cli-no-key-completion': None, u'cli-full-no': None, u'sort-priority': u'78', u'cli-suppress-list-no': None, u'cli-full-command': None, u'callpoint': u'port-profile-domain-callpoint'}}, namespace='urn:brocade.com:mgmt:brocade-port-profile', defining_module='brocade-port-profile', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """port_profile_domain must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("port_profile_domain_name",port_profile_domain.port_profile_domain, yang_name="port-profile-domain", rest_name="port-profile-domain", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='port-profile-domain-name', extensions={u'tailf-common': {u'info': u'Define a port-profile-domain', u'cli-no-key-completion': None, u'cli-full-no': None, u'sort-priority': u'78', u'cli-suppress-list-no': None, u'cli-full-command': None, u'callpoint': u'port-profile-domain-callpoint'}}), is_container='list', yang_name="port-profile-domain", rest_name="port-profile-domain", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Define a port-profile-domain', u'cli-no-key-completion': None, u'cli-full-no': None, u'sort-priority': u'78', u'cli-suppress-list-no': None, u'cli-full-command': None, u'callpoint': u'port-profile-domain-callpoint'}}, namespace='urn:brocade.com:mgmt:brocade-port-profile', defining_module='brocade-port-profile', yang_type='list', is_config=True)""",
})
self.__port_profile_domain = t
if hasattr(self, '_set'):
self._set() |
def dump_nt_sorted(g: Graph) -> List[str]:
"""
Dump graph g in a sorted n3 format
:param g: graph to dump
:return: stringified representation of g
"""
return [l.decode('ascii') for l in sorted(g.serialize(format='nt').splitlines()) if l] | Dump graph g in a sorted n3 format
:param g: graph to dump
:return: stringified representation of g | Below is the the instruction that describes the task:
### Input:
Dump graph g in a sorted n3 format
:param g: graph to dump
:return: stringified representation of g
### Response:
def dump_nt_sorted(g: Graph) -> List[str]:
"""
Dump graph g in a sorted n3 format
:param g: graph to dump
:return: stringified representation of g
"""
return [l.decode('ascii') for l in sorted(g.serialize(format='nt').splitlines()) if l] |
def with_name(self, name):
"""Return a new path with the file name changed."""
if not self.name:
raise ValueError("%r has an empty name" % (self,))
return self._from_parsed_parts(self._drv, self._root,
self._parts[:-1] + [name]) | Return a new path with the file name changed. | Below is the the instruction that describes the task:
### Input:
Return a new path with the file name changed.
### Response:
def with_name(self, name):
"""Return a new path with the file name changed."""
if not self.name:
raise ValueError("%r has an empty name" % (self,))
return self._from_parsed_parts(self._drv, self._root,
self._parts[:-1] + [name]) |
def mute(ip):
"""Polyfill for muting the TV."""
tv_url = 'http://{}:6095/controller?action=keyevent&keycode='.format(ip)
count = 0
while count > 30:
count = count + 1
request = requests.get(tv_url + 'volumedown')
if request.status_code != 200:
return False
return True | Polyfill for muting the TV. | Below is the the instruction that describes the task:
### Input:
Polyfill for muting the TV.
### Response:
def mute(ip):
"""Polyfill for muting the TV."""
tv_url = 'http://{}:6095/controller?action=keyevent&keycode='.format(ip)
count = 0
while count > 30:
count = count + 1
request = requests.get(tv_url + 'volumedown')
if request.status_code != 200:
return False
return True |
def _list_or_args(self, keys, args):
"""
Shamelessly copied from redis-py.
"""
# returns a single list combining keys and args
try:
iter(keys)
# a string can be iterated, but indicates
# keys wasn't passed as a list
if isinstance(keys, basestring):
keys = [keys]
except TypeError:
keys = [keys]
if args:
keys.extend(args)
return keys | Shamelessly copied from redis-py. | Below is the the instruction that describes the task:
### Input:
Shamelessly copied from redis-py.
### Response:
def _list_or_args(self, keys, args):
"""
Shamelessly copied from redis-py.
"""
# returns a single list combining keys and args
try:
iter(keys)
# a string can be iterated, but indicates
# keys wasn't passed as a list
if isinstance(keys, basestring):
keys = [keys]
except TypeError:
keys = [keys]
if args:
keys.extend(args)
return keys |
def send_batch(messages, api_key=None, secure=None, test=None, **request_args):
'''Send a batch of messages.
:param messages: Messages to send.
:type message: A list of `dict` or :class:`Message`
:param api_key: Your Postmark API key. Required, if `test` is not `True`.
:param secure: Use the https scheme for the Postmark API.
Defaults to `True`
:param test: Use the Postmark Test API. Defaults to `False`.
:param \*\*request_args: Keyword arguments to pass to
:func:`requests.request`.
:rtype: :class:`BatchSendResponse`
'''
return _default_pyst_batch_sender.send(messages=messages, api_key=api_key,
secure=secure, test=test,
**request_args) | Send a batch of messages.
:param messages: Messages to send.
:type message: A list of `dict` or :class:`Message`
:param api_key: Your Postmark API key. Required, if `test` is not `True`.
:param secure: Use the https scheme for the Postmark API.
Defaults to `True`
:param test: Use the Postmark Test API. Defaults to `False`.
:param \*\*request_args: Keyword arguments to pass to
:func:`requests.request`.
:rtype: :class:`BatchSendResponse` | Below is the the instruction that describes the task:
### Input:
Send a batch of messages.
:param messages: Messages to send.
:type message: A list of `dict` or :class:`Message`
:param api_key: Your Postmark API key. Required, if `test` is not `True`.
:param secure: Use the https scheme for the Postmark API.
Defaults to `True`
:param test: Use the Postmark Test API. Defaults to `False`.
:param \*\*request_args: Keyword arguments to pass to
:func:`requests.request`.
:rtype: :class:`BatchSendResponse`
### Response:
def send_batch(messages, api_key=None, secure=None, test=None, **request_args):
'''Send a batch of messages.
:param messages: Messages to send.
:type message: A list of `dict` or :class:`Message`
:param api_key: Your Postmark API key. Required, if `test` is not `True`.
:param secure: Use the https scheme for the Postmark API.
Defaults to `True`
:param test: Use the Postmark Test API. Defaults to `False`.
:param \*\*request_args: Keyword arguments to pass to
:func:`requests.request`.
:rtype: :class:`BatchSendResponse`
'''
return _default_pyst_batch_sender.send(messages=messages, api_key=api_key,
secure=secure, test=test,
**request_args) |
def create_upload_url(success_path,
max_bytes_per_blob=None,
max_bytes_total=None,
**options):
"""Create upload URL for POST form.
Args:
success_path: Path within application to call when POST is successful
and upload is complete.
max_bytes_per_blob: The maximum size in bytes that any one blob in the
upload can be or None for no maximum size.
max_bytes_total: The maximum size in bytes that the aggregate sizes of all
of the blobs in the upload can be or None for no maximum size.
**options: Options for create_rpc().
Returns:
The upload URL.
Raises:
TypeError: If max_bytes_per_blob or max_bytes_total are not integral types.
ValueError: If max_bytes_per_blob or max_bytes_total are not
positive values.
"""
fut = create_upload_url_async(success_path,
max_bytes_per_blob=max_bytes_per_blob,
max_bytes_total=max_bytes_total,
**options)
return fut.get_result() | Create upload URL for POST form.
Args:
success_path: Path within application to call when POST is successful
and upload is complete.
max_bytes_per_blob: The maximum size in bytes that any one blob in the
upload can be or None for no maximum size.
max_bytes_total: The maximum size in bytes that the aggregate sizes of all
of the blobs in the upload can be or None for no maximum size.
**options: Options for create_rpc().
Returns:
The upload URL.
Raises:
TypeError: If max_bytes_per_blob or max_bytes_total are not integral types.
ValueError: If max_bytes_per_blob or max_bytes_total are not
positive values. | Below is the the instruction that describes the task:
### Input:
Create upload URL for POST form.
Args:
success_path: Path within application to call when POST is successful
and upload is complete.
max_bytes_per_blob: The maximum size in bytes that any one blob in the
upload can be or None for no maximum size.
max_bytes_total: The maximum size in bytes that the aggregate sizes of all
of the blobs in the upload can be or None for no maximum size.
**options: Options for create_rpc().
Returns:
The upload URL.
Raises:
TypeError: If max_bytes_per_blob or max_bytes_total are not integral types.
ValueError: If max_bytes_per_blob or max_bytes_total are not
positive values.
### Response:
def create_upload_url(success_path,
max_bytes_per_blob=None,
max_bytes_total=None,
**options):
"""Create upload URL for POST form.
Args:
success_path: Path within application to call when POST is successful
and upload is complete.
max_bytes_per_blob: The maximum size in bytes that any one blob in the
upload can be or None for no maximum size.
max_bytes_total: The maximum size in bytes that the aggregate sizes of all
of the blobs in the upload can be or None for no maximum size.
**options: Options for create_rpc().
Returns:
The upload URL.
Raises:
TypeError: If max_bytes_per_blob or max_bytes_total are not integral types.
ValueError: If max_bytes_per_blob or max_bytes_total are not
positive values.
"""
fut = create_upload_url_async(success_path,
max_bytes_per_blob=max_bytes_per_blob,
max_bytes_total=max_bytes_total,
**options)
return fut.get_result() |
def two_phase_dP_gravitational(angle, z, alpha_i, rho_li, rho_gi,
alpha_o=None, rho_lo=None, rho_go=None, g=g):
r'''This function handles calculation of two-phase liquid-gas pressure drop
due to gravitation for flow inside channels. This is a discrete
calculation for a segment with a known difference in elevation (and ideally
known inlet and outlet pressures so density dependence can be included).
.. math::
- \Delta P_{grav} = g \sin \theta z \left\{\frac{ [\alpha_o\rho_{g,o}
+ (1-\alpha_o)\rho_{l,o}] + [\alpha_i\rho_{g,i} + (1-\alpha_i)\rho_{l,i}]}
{2}\right\}
Parameters
----------
angle : float
The angle of the pipe with respect to the horizontal, [degrees]
z : float
The total length of the pipe, [m]
alpha_i : float
Void fraction at inlet (area of gas / total area of channel), [-]
rho_li : float
Liquid phase density at inlet, [kg/m^3]
rho_gi : float
Gas phase density at inlet, [kg/m^3]
alpha_o : float, optional
Void fraction at outlet (area of gas / total area of channel), [-]
rho_lo : float, optional
Liquid phase density at outlet, [kg/m^3]
rho_go : float, optional
Gas phase density at outlet, [kg/m^3]
g : float, optional
Acceleration due to gravity, [m/s^2]
Returns
-------
dP : float
Gravitational component of pressure drop for two-phase flow, [Pa]
Notes
-----
The use of different gas and liquid phase densities and void fraction
at the inlet and outlet is optional; the outlet densities and void fraction
will be assumed to be those of the inlet if they are not specified. This
does not add much accuracy.
There is a continuous variant of this method which can be integrated over,
at the expense of a speed. The differential form of this is as follows
([1]_, [2]_):
.. math::
-\left(\frac{dP}{dz} \right)_{grav} = [\alpha\rho_g + (1-\alpha)
\rho_l]g \sin \theta
Examples
--------
Example calculation, page 13-2 from [3]_:
>>> two_phase_dP_gravitational(angle=90, z=2, alpha_i=0.9685, rho_li=1518.,
... rho_gi=2.6)
987.237416829999
The same calculation, but using average inlet and outlet conditions:
>>> two_phase_dP_gravitational(angle=90, z=2, alpha_i=0.9685, rho_li=1518.,
... rho_gi=2.6, alpha_o=0.968, rho_lo=1517.9, rho_go=2.59)
994.5416058829999
References
----------
.. [1] Rohsenow, Warren and James Hartnett and Young Cho. Handbook of Heat
Transfer, 3E. New York: McGraw-Hill, 1998.
.. [2] Kim, Sung-Min, and Issam Mudawar. "Review of Databases and
Predictive Methods for Pressure Drop in Adiabatic, Condensing and
Boiling Mini/Micro-Channel Flows." International Journal of Heat and
Mass Transfer 77 (October 2014): 74-97.
doi:10.1016/j.ijheatmasstransfer.2014.04.035.
.. [3] Thome, John R. "Engineering Data Book III." Wolverine Tube Inc
(2004). http://www.wlv.com/heat-transfer-databook/
'''
if rho_lo is None:
rho_lo = rho_li
if rho_go is None:
rho_go = rho_gi
if alpha_o is None:
alpha_o = alpha_i
angle = radians(angle)
in_term = alpha_i*rho_gi + (1. - alpha_i)*rho_li
out_term = alpha_o*rho_go + (1. - alpha_o)*rho_lo
return g*z*sin(angle)*(out_term + in_term)/2. | r'''This function handles calculation of two-phase liquid-gas pressure drop
due to gravitation for flow inside channels. This is a discrete
calculation for a segment with a known difference in elevation (and ideally
known inlet and outlet pressures so density dependence can be included).
.. math::
- \Delta P_{grav} = g \sin \theta z \left\{\frac{ [\alpha_o\rho_{g,o}
+ (1-\alpha_o)\rho_{l,o}] + [\alpha_i\rho_{g,i} + (1-\alpha_i)\rho_{l,i}]}
{2}\right\}
Parameters
----------
angle : float
The angle of the pipe with respect to the horizontal, [degrees]
z : float
The total length of the pipe, [m]
alpha_i : float
Void fraction at inlet (area of gas / total area of channel), [-]
rho_li : float
Liquid phase density at inlet, [kg/m^3]
rho_gi : float
Gas phase density at inlet, [kg/m^3]
alpha_o : float, optional
Void fraction at outlet (area of gas / total area of channel), [-]
rho_lo : float, optional
Liquid phase density at outlet, [kg/m^3]
rho_go : float, optional
Gas phase density at outlet, [kg/m^3]
g : float, optional
Acceleration due to gravity, [m/s^2]
Returns
-------
dP : float
Gravitational component of pressure drop for two-phase flow, [Pa]
Notes
-----
The use of different gas and liquid phase densities and void fraction
at the inlet and outlet is optional; the outlet densities and void fraction
will be assumed to be those of the inlet if they are not specified. This
does not add much accuracy.
There is a continuous variant of this method which can be integrated over,
at the expense of a speed. The differential form of this is as follows
([1]_, [2]_):
.. math::
-\left(\frac{dP}{dz} \right)_{grav} = [\alpha\rho_g + (1-\alpha)
\rho_l]g \sin \theta
Examples
--------
Example calculation, page 13-2 from [3]_:
>>> two_phase_dP_gravitational(angle=90, z=2, alpha_i=0.9685, rho_li=1518.,
... rho_gi=2.6)
987.237416829999
The same calculation, but using average inlet and outlet conditions:
>>> two_phase_dP_gravitational(angle=90, z=2, alpha_i=0.9685, rho_li=1518.,
... rho_gi=2.6, alpha_o=0.968, rho_lo=1517.9, rho_go=2.59)
994.5416058829999
References
----------
.. [1] Rohsenow, Warren and James Hartnett and Young Cho. Handbook of Heat
Transfer, 3E. New York: McGraw-Hill, 1998.
.. [2] Kim, Sung-Min, and Issam Mudawar. "Review of Databases and
Predictive Methods for Pressure Drop in Adiabatic, Condensing and
Boiling Mini/Micro-Channel Flows." International Journal of Heat and
Mass Transfer 77 (October 2014): 74-97.
doi:10.1016/j.ijheatmasstransfer.2014.04.035.
.. [3] Thome, John R. "Engineering Data Book III." Wolverine Tube Inc
(2004). http://www.wlv.com/heat-transfer-databook/ | Below is the the instruction that describes the task:
### Input:
r'''This function handles calculation of two-phase liquid-gas pressure drop
due to gravitation for flow inside channels. This is a discrete
calculation for a segment with a known difference in elevation (and ideally
known inlet and outlet pressures so density dependence can be included).
.. math::
- \Delta P_{grav} = g \sin \theta z \left\{\frac{ [\alpha_o\rho_{g,o}
+ (1-\alpha_o)\rho_{l,o}] + [\alpha_i\rho_{g,i} + (1-\alpha_i)\rho_{l,i}]}
{2}\right\}
Parameters
----------
angle : float
The angle of the pipe with respect to the horizontal, [degrees]
z : float
The total length of the pipe, [m]
alpha_i : float
Void fraction at inlet (area of gas / total area of channel), [-]
rho_li : float
Liquid phase density at inlet, [kg/m^3]
rho_gi : float
Gas phase density at inlet, [kg/m^3]
alpha_o : float, optional
Void fraction at outlet (area of gas / total area of channel), [-]
rho_lo : float, optional
Liquid phase density at outlet, [kg/m^3]
rho_go : float, optional
Gas phase density at outlet, [kg/m^3]
g : float, optional
Acceleration due to gravity, [m/s^2]
Returns
-------
dP : float
Gravitational component of pressure drop for two-phase flow, [Pa]
Notes
-----
The use of different gas and liquid phase densities and void fraction
at the inlet and outlet is optional; the outlet densities and void fraction
will be assumed to be those of the inlet if they are not specified. This
does not add much accuracy.
There is a continuous variant of this method which can be integrated over,
at the expense of a speed. The differential form of this is as follows
([1]_, [2]_):
.. math::
-\left(\frac{dP}{dz} \right)_{grav} = [\alpha\rho_g + (1-\alpha)
\rho_l]g \sin \theta
Examples
--------
Example calculation, page 13-2 from [3]_:
>>> two_phase_dP_gravitational(angle=90, z=2, alpha_i=0.9685, rho_li=1518.,
... rho_gi=2.6)
987.237416829999
The same calculation, but using average inlet and outlet conditions:
>>> two_phase_dP_gravitational(angle=90, z=2, alpha_i=0.9685, rho_li=1518.,
... rho_gi=2.6, alpha_o=0.968, rho_lo=1517.9, rho_go=2.59)
994.5416058829999
References
----------
.. [1] Rohsenow, Warren and James Hartnett and Young Cho. Handbook of Heat
Transfer, 3E. New York: McGraw-Hill, 1998.
.. [2] Kim, Sung-Min, and Issam Mudawar. "Review of Databases and
Predictive Methods for Pressure Drop in Adiabatic, Condensing and
Boiling Mini/Micro-Channel Flows." International Journal of Heat and
Mass Transfer 77 (October 2014): 74-97.
doi:10.1016/j.ijheatmasstransfer.2014.04.035.
.. [3] Thome, John R. "Engineering Data Book III." Wolverine Tube Inc
(2004). http://www.wlv.com/heat-transfer-databook/
### Response:
def two_phase_dP_gravitational(angle, z, alpha_i, rho_li, rho_gi,
alpha_o=None, rho_lo=None, rho_go=None, g=g):
r'''This function handles calculation of two-phase liquid-gas pressure drop
due to gravitation for flow inside channels. This is a discrete
calculation for a segment with a known difference in elevation (and ideally
known inlet and outlet pressures so density dependence can be included).
.. math::
- \Delta P_{grav} = g \sin \theta z \left\{\frac{ [\alpha_o\rho_{g,o}
+ (1-\alpha_o)\rho_{l,o}] + [\alpha_i\rho_{g,i} + (1-\alpha_i)\rho_{l,i}]}
{2}\right\}
Parameters
----------
angle : float
The angle of the pipe with respect to the horizontal, [degrees]
z : float
The total length of the pipe, [m]
alpha_i : float
Void fraction at inlet (area of gas / total area of channel), [-]
rho_li : float
Liquid phase density at inlet, [kg/m^3]
rho_gi : float
Gas phase density at inlet, [kg/m^3]
alpha_o : float, optional
Void fraction at outlet (area of gas / total area of channel), [-]
rho_lo : float, optional
Liquid phase density at outlet, [kg/m^3]
rho_go : float, optional
Gas phase density at outlet, [kg/m^3]
g : float, optional
Acceleration due to gravity, [m/s^2]
Returns
-------
dP : float
Gravitational component of pressure drop for two-phase flow, [Pa]
Notes
-----
The use of different gas and liquid phase densities and void fraction
at the inlet and outlet is optional; the outlet densities and void fraction
will be assumed to be those of the inlet if they are not specified. This
does not add much accuracy.
There is a continuous variant of this method which can be integrated over,
at the expense of a speed. The differential form of this is as follows
([1]_, [2]_):
.. math::
-\left(\frac{dP}{dz} \right)_{grav} = [\alpha\rho_g + (1-\alpha)
\rho_l]g \sin \theta
Examples
--------
Example calculation, page 13-2 from [3]_:
>>> two_phase_dP_gravitational(angle=90, z=2, alpha_i=0.9685, rho_li=1518.,
... rho_gi=2.6)
987.237416829999
The same calculation, but using average inlet and outlet conditions:
>>> two_phase_dP_gravitational(angle=90, z=2, alpha_i=0.9685, rho_li=1518.,
... rho_gi=2.6, alpha_o=0.968, rho_lo=1517.9, rho_go=2.59)
994.5416058829999
References
----------
.. [1] Rohsenow, Warren and James Hartnett and Young Cho. Handbook of Heat
Transfer, 3E. New York: McGraw-Hill, 1998.
.. [2] Kim, Sung-Min, and Issam Mudawar. "Review of Databases and
Predictive Methods for Pressure Drop in Adiabatic, Condensing and
Boiling Mini/Micro-Channel Flows." International Journal of Heat and
Mass Transfer 77 (October 2014): 74-97.
doi:10.1016/j.ijheatmasstransfer.2014.04.035.
.. [3] Thome, John R. "Engineering Data Book III." Wolverine Tube Inc
(2004). http://www.wlv.com/heat-transfer-databook/
'''
if rho_lo is None:
rho_lo = rho_li
if rho_go is None:
rho_go = rho_gi
if alpha_o is None:
alpha_o = alpha_i
angle = radians(angle)
in_term = alpha_i*rho_gi + (1. - alpha_i)*rho_li
out_term = alpha_o*rho_go + (1. - alpha_o)*rho_lo
return g*z*sin(angle)*(out_term + in_term)/2. |
def release(self, resource):
"""release(resource)
Returns a resource to the pool. Most of the time you will want
to use :meth:`transaction`, but if you use :meth:`acquire`,
you must release the acquired resource back to the pool when
finished. Failure to do so could result in deadlock.
:param resource: Resource
"""
with self.releaser:
resource.claimed = False
self.releaser.notify_all() | release(resource)
Returns a resource to the pool. Most of the time you will want
to use :meth:`transaction`, but if you use :meth:`acquire`,
you must release the acquired resource back to the pool when
finished. Failure to do so could result in deadlock.
:param resource: Resource | Below is the the instruction that describes the task:
### Input:
release(resource)
Returns a resource to the pool. Most of the time you will want
to use :meth:`transaction`, but if you use :meth:`acquire`,
you must release the acquired resource back to the pool when
finished. Failure to do so could result in deadlock.
:param resource: Resource
### Response:
def release(self, resource):
"""release(resource)
Returns a resource to the pool. Most of the time you will want
to use :meth:`transaction`, but if you use :meth:`acquire`,
you must release the acquired resource back to the pool when
finished. Failure to do so could result in deadlock.
:param resource: Resource
"""
with self.releaser:
resource.claimed = False
self.releaser.notify_all() |
def itertuples(self, index=True, name="Pandas"):
"""
Iterate over DataFrame rows as namedtuples.
Parameters
----------
index : bool, default True
If True, return the index as the first element of the tuple.
name : str or None, default "Pandas"
The name of the returned namedtuples or None to return regular
tuples.
Yields
-------
collections.namedtuple
Yields a namedtuple for each row in the DataFrame with the first
field possibly being the index and following fields being the
column values.
See Also
--------
DataFrame.iterrows : Iterate over DataFrame rows as (index, Series)
pairs.
DataFrame.iteritems : Iterate over (column name, Series) pairs.
Notes
-----
The column names will be renamed to positional names if they are
invalid Python identifiers, repeated, or start with an underscore.
With a large number of columns (>255), regular tuples are returned.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]},
... index=['dog', 'hawk'])
>>> df
num_legs num_wings
dog 4 0
hawk 2 2
>>> for row in df.itertuples():
... print(row)
...
Pandas(Index='dog', num_legs=4, num_wings=0)
Pandas(Index='hawk', num_legs=2, num_wings=2)
By setting the `index` parameter to False we can remove the index
as the first element of the tuple:
>>> for row in df.itertuples(index=False):
... print(row)
...
Pandas(num_legs=4, num_wings=0)
Pandas(num_legs=2, num_wings=2)
With the `name` parameter set we set a custom name for the yielded
namedtuples:
>>> for row in df.itertuples(name='Animal'):
... print(row)
...
Animal(Index='dog', num_legs=4, num_wings=0)
Animal(Index='hawk', num_legs=2, num_wings=2)
"""
arrays = []
fields = list(self.columns)
if index:
arrays.append(self.index)
fields.insert(0, "Index")
# use integer indexing because of possible duplicate column names
arrays.extend(self.iloc[:, k] for k in range(len(self.columns)))
# Python 3 supports at most 255 arguments to constructor
if name is not None and len(self.columns) + index < 256:
itertuple = collections.namedtuple(name, fields, rename=True)
return map(itertuple._make, zip(*arrays))
# fallback to regular tuples
return zip(*arrays) | Iterate over DataFrame rows as namedtuples.
Parameters
----------
index : bool, default True
If True, return the index as the first element of the tuple.
name : str or None, default "Pandas"
The name of the returned namedtuples or None to return regular
tuples.
Yields
-------
collections.namedtuple
Yields a namedtuple for each row in the DataFrame with the first
field possibly being the index and following fields being the
column values.
See Also
--------
DataFrame.iterrows : Iterate over DataFrame rows as (index, Series)
pairs.
DataFrame.iteritems : Iterate over (column name, Series) pairs.
Notes
-----
The column names will be renamed to positional names if they are
invalid Python identifiers, repeated, or start with an underscore.
With a large number of columns (>255), regular tuples are returned.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]},
... index=['dog', 'hawk'])
>>> df
num_legs num_wings
dog 4 0
hawk 2 2
>>> for row in df.itertuples():
... print(row)
...
Pandas(Index='dog', num_legs=4, num_wings=0)
Pandas(Index='hawk', num_legs=2, num_wings=2)
By setting the `index` parameter to False we can remove the index
as the first element of the tuple:
>>> for row in df.itertuples(index=False):
... print(row)
...
Pandas(num_legs=4, num_wings=0)
Pandas(num_legs=2, num_wings=2)
With the `name` parameter set we set a custom name for the yielded
namedtuples:
>>> for row in df.itertuples(name='Animal'):
... print(row)
...
Animal(Index='dog', num_legs=4, num_wings=0)
Animal(Index='hawk', num_legs=2, num_wings=2) | Below is the the instruction that describes the task:
### Input:
Iterate over DataFrame rows as namedtuples.
Parameters
----------
index : bool, default True
If True, return the index as the first element of the tuple.
name : str or None, default "Pandas"
The name of the returned namedtuples or None to return regular
tuples.
Yields
-------
collections.namedtuple
Yields a namedtuple for each row in the DataFrame with the first
field possibly being the index and following fields being the
column values.
See Also
--------
DataFrame.iterrows : Iterate over DataFrame rows as (index, Series)
pairs.
DataFrame.iteritems : Iterate over (column name, Series) pairs.
Notes
-----
The column names will be renamed to positional names if they are
invalid Python identifiers, repeated, or start with an underscore.
With a large number of columns (>255), regular tuples are returned.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]},
... index=['dog', 'hawk'])
>>> df
num_legs num_wings
dog 4 0
hawk 2 2
>>> for row in df.itertuples():
... print(row)
...
Pandas(Index='dog', num_legs=4, num_wings=0)
Pandas(Index='hawk', num_legs=2, num_wings=2)
By setting the `index` parameter to False we can remove the index
as the first element of the tuple:
>>> for row in df.itertuples(index=False):
... print(row)
...
Pandas(num_legs=4, num_wings=0)
Pandas(num_legs=2, num_wings=2)
With the `name` parameter set we set a custom name for the yielded
namedtuples:
>>> for row in df.itertuples(name='Animal'):
... print(row)
...
Animal(Index='dog', num_legs=4, num_wings=0)
Animal(Index='hawk', num_legs=2, num_wings=2)
### Response:
def itertuples(self, index=True, name="Pandas"):
"""
Iterate over DataFrame rows as namedtuples.
Parameters
----------
index : bool, default True
If True, return the index as the first element of the tuple.
name : str or None, default "Pandas"
The name of the returned namedtuples or None to return regular
tuples.
Yields
-------
collections.namedtuple
Yields a namedtuple for each row in the DataFrame with the first
field possibly being the index and following fields being the
column values.
See Also
--------
DataFrame.iterrows : Iterate over DataFrame rows as (index, Series)
pairs.
DataFrame.iteritems : Iterate over (column name, Series) pairs.
Notes
-----
The column names will be renamed to positional names if they are
invalid Python identifiers, repeated, or start with an underscore.
With a large number of columns (>255), regular tuples are returned.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]},
... index=['dog', 'hawk'])
>>> df
num_legs num_wings
dog 4 0
hawk 2 2
>>> for row in df.itertuples():
... print(row)
...
Pandas(Index='dog', num_legs=4, num_wings=0)
Pandas(Index='hawk', num_legs=2, num_wings=2)
By setting the `index` parameter to False we can remove the index
as the first element of the tuple:
>>> for row in df.itertuples(index=False):
... print(row)
...
Pandas(num_legs=4, num_wings=0)
Pandas(num_legs=2, num_wings=2)
With the `name` parameter set we set a custom name for the yielded
namedtuples:
>>> for row in df.itertuples(name='Animal'):
... print(row)
...
Animal(Index='dog', num_legs=4, num_wings=0)
Animal(Index='hawk', num_legs=2, num_wings=2)
"""
arrays = []
fields = list(self.columns)
if index:
arrays.append(self.index)
fields.insert(0, "Index")
# use integer indexing because of possible duplicate column names
arrays.extend(self.iloc[:, k] for k in range(len(self.columns)))
# Python 3 supports at most 255 arguments to constructor
if name is not None and len(self.columns) + index < 256:
itertuple = collections.namedtuple(name, fields, rename=True)
return map(itertuple._make, zip(*arrays))
# fallback to regular tuples
return zip(*arrays) |
def abort(self):
"""
Immediately close the stream, without sending remaining buffers or
performing a proper shutdown.
"""
if self._state == _State.CLOSED:
self._invalid_state("abort() called")
return
self._force_close(None) | Immediately close the stream, without sending remaining buffers or
performing a proper shutdown. | Below is the the instruction that describes the task:
### Input:
Immediately close the stream, without sending remaining buffers or
performing a proper shutdown.
### Response:
def abort(self):
"""
Immediately close the stream, without sending remaining buffers or
performing a proper shutdown.
"""
if self._state == _State.CLOSED:
self._invalid_state("abort() called")
return
self._force_close(None) |
def get_fields(self):
"""
Return all field objects
:rtype: a list of :class:`EncodedField` objects
"""
if self.__cache_all_fields is None:
self.__cache_all_fields = []
for i in self.get_classes():
for j in i.get_fields():
self.__cache_all_fields.append(j)
return self.__cache_all_fields | Return all field objects
:rtype: a list of :class:`EncodedField` objects | Below is the the instruction that describes the task:
### Input:
Return all field objects
:rtype: a list of :class:`EncodedField` objects
### Response:
def get_fields(self):
"""
Return all field objects
:rtype: a list of :class:`EncodedField` objects
"""
if self.__cache_all_fields is None:
self.__cache_all_fields = []
for i in self.get_classes():
for j in i.get_fields():
self.__cache_all_fields.append(j)
return self.__cache_all_fields |
def main():
'''
Bootstrapper CLI
'''
parser = argparse.ArgumentParser(prog='kclboot',
description='kclboot - Kinesis Client Library Bootstrapper')
subparsers = parser.add_subparsers(title='Subcommands', help='Additional help', dest='subparser')
# Common arguments
jar_path_parser = argparse.ArgumentParser(add_help=False)
jar_path_parser.add_argument('--jar-folder', dest='jar_folder', default='./jars',
help='Folder used to store jar files')
prop_path_parser = argparse.ArgumentParser(add_help=False)
prop_path_parser.add_argument('--properties-file', required=True, dest='properties_file',
help='*.properties file with KCL settings')
# Sub-commands
download_parser = subparsers.add_parser('download', parents=[jar_path_parser],
description='Download jars necessary to run KCL\'s MultiLangDaemon')
download_parser.set_defaults(func=_download)
command_parser = subparsers.add_parser('command', parents=[jar_path_parser, prop_path_parser],
description='Output formatted Java invocation with classpath')
command_parser.set_defaults(func=_command)
classpath_parser = subparsers.add_parser('classpath', parents=[jar_path_parser, prop_path_parser],
description='Output classpath, including jars and the folder containing the *.properties file')
classpath_parser.set_defaults(func=_classpath)
properties_parser = subparsers.add_parser('properties-from-env', parents=[prop_path_parser],
description='Generate a *.properties file from environmental variables')
properties_parser.set_defaults(func=_properties_from_env)
args = parser.parse_args()
if args.subparser:
args.func(args)
elif len(vars(args).keys()) == 1:
parser.print_usage() | Bootstrapper CLI | Below is the the instruction that describes the task:
### Input:
Bootstrapper CLI
### Response:
def main():
'''
Bootstrapper CLI
'''
parser = argparse.ArgumentParser(prog='kclboot',
description='kclboot - Kinesis Client Library Bootstrapper')
subparsers = parser.add_subparsers(title='Subcommands', help='Additional help', dest='subparser')
# Common arguments
jar_path_parser = argparse.ArgumentParser(add_help=False)
jar_path_parser.add_argument('--jar-folder', dest='jar_folder', default='./jars',
help='Folder used to store jar files')
prop_path_parser = argparse.ArgumentParser(add_help=False)
prop_path_parser.add_argument('--properties-file', required=True, dest='properties_file',
help='*.properties file with KCL settings')
# Sub-commands
download_parser = subparsers.add_parser('download', parents=[jar_path_parser],
description='Download jars necessary to run KCL\'s MultiLangDaemon')
download_parser.set_defaults(func=_download)
command_parser = subparsers.add_parser('command', parents=[jar_path_parser, prop_path_parser],
description='Output formatted Java invocation with classpath')
command_parser.set_defaults(func=_command)
classpath_parser = subparsers.add_parser('classpath', parents=[jar_path_parser, prop_path_parser],
description='Output classpath, including jars and the folder containing the *.properties file')
classpath_parser.set_defaults(func=_classpath)
properties_parser = subparsers.add_parser('properties-from-env', parents=[prop_path_parser],
description='Generate a *.properties file from environmental variables')
properties_parser.set_defaults(func=_properties_from_env)
args = parser.parse_args()
if args.subparser:
args.func(args)
elif len(vars(args).keys()) == 1:
parser.print_usage() |
def run_sls_remove(sls_cmd, env_vars):
"""Run sls remove command."""
sls_process = subprocess.Popen(sls_cmd,
stdout=subprocess.PIPE,
env=env_vars)
stdoutdata, _stderrdata = sls_process.communicate()
sls_return = sls_process.wait()
print(stdoutdata)
if sls_return != 0 and (sls_return == 1 and not (
re.search(r"Stack '.*' does not exist", stdoutdata))):
sys.exit(sls_return) | Run sls remove command. | Below is the the instruction that describes the task:
### Input:
Run sls remove command.
### Response:
def run_sls_remove(sls_cmd, env_vars):
"""Run sls remove command."""
sls_process = subprocess.Popen(sls_cmd,
stdout=subprocess.PIPE,
env=env_vars)
stdoutdata, _stderrdata = sls_process.communicate()
sls_return = sls_process.wait()
print(stdoutdata)
if sls_return != 0 and (sls_return == 1 and not (
re.search(r"Stack '.*' does not exist", stdoutdata))):
sys.exit(sls_return) |
def debug(self):
"""Retrieve the debug information from the identity manager."""
url = '{}debug/status'.format(self.url)
try:
return make_request(url, timeout=self.timeout)
except ServerError as err:
return {"error": str(err)} | Retrieve the debug information from the identity manager. | Below is the the instruction that describes the task:
### Input:
Retrieve the debug information from the identity manager.
### Response:
def debug(self):
"""Retrieve the debug information from the identity manager."""
url = '{}debug/status'.format(self.url)
try:
return make_request(url, timeout=self.timeout)
except ServerError as err:
return {"error": str(err)} |
def GetName(obj):
"""A compatibility wrapper for getting object's name.
In Python 2 class names are returned as `bytes` (since class names can contain
only ASCII characters) whereas in Python 3 they are `unicode` (since class
names can contain arbitrary unicode characters).
This function makes this behaviour consistent and always returns class name as
an unicode string.
Once support for Python 2 is dropped all invocations of this call can be
replaced with ordinary `__name__` access.
Args:
obj: A type or function object to get the name for.
Returns:
Name of the specified class as unicode string.
"""
precondition.AssertType(obj, (type, types.FunctionType))
if PY2:
return obj.__name__.decode("ascii")
else:
return obj.__name__ | A compatibility wrapper for getting object's name.
In Python 2 class names are returned as `bytes` (since class names can contain
only ASCII characters) whereas in Python 3 they are `unicode` (since class
names can contain arbitrary unicode characters).
This function makes this behaviour consistent and always returns class name as
an unicode string.
Once support for Python 2 is dropped all invocations of this call can be
replaced with ordinary `__name__` access.
Args:
obj: A type or function object to get the name for.
Returns:
Name of the specified class as unicode string. | Below is the the instruction that describes the task:
### Input:
A compatibility wrapper for getting object's name.
In Python 2 class names are returned as `bytes` (since class names can contain
only ASCII characters) whereas in Python 3 they are `unicode` (since class
names can contain arbitrary unicode characters).
This function makes this behaviour consistent and always returns class name as
an unicode string.
Once support for Python 2 is dropped all invocations of this call can be
replaced with ordinary `__name__` access.
Args:
obj: A type or function object to get the name for.
Returns:
Name of the specified class as unicode string.
### Response:
def GetName(obj):
"""A compatibility wrapper for getting object's name.
In Python 2 class names are returned as `bytes` (since class names can contain
only ASCII characters) whereas in Python 3 they are `unicode` (since class
names can contain arbitrary unicode characters).
This function makes this behaviour consistent and always returns class name as
an unicode string.
Once support for Python 2 is dropped all invocations of this call can be
replaced with ordinary `__name__` access.
Args:
obj: A type or function object to get the name for.
Returns:
Name of the specified class as unicode string.
"""
precondition.AssertType(obj, (type, types.FunctionType))
if PY2:
return obj.__name__.decode("ascii")
else:
return obj.__name__ |
def save(self, fname=''):
"""
Save the list of items to AIKIF core and optionally to local file fname
"""
if fname != '':
with open(fname, 'w') as f:
for i in self.lstPrograms:
f.write(self.get_file_info_line(i, ','))
# save to standard AIKIF structure
filemap = mod_filemap.FileMap([], [])
#location_fileList = filemap.get_full_filename(filemap.find_type('LOCATION'), filemap.find_ontology('FILE-PROGRAM')[0])
object_fileList = filemap.get_full_filename(filemap.find_type('OBJECT'), filemap.find_ontology('FILE-PROGRAM')[0])
print('object_fileList = ' + object_fileList + '\n')
if os.path.exists(object_fileList):
os.remove(object_fileList)
self.lstPrograms.sort()
try:
with open(object_fileList, 'a') as f:
f.write('\n'.join([i[0] for i in self.lstPrograms]))
except Exception as ex:
print('ERROR = cant write to object_filelist ' , object_fileList, str(ex)) | Save the list of items to AIKIF core and optionally to local file fname | Below is the the instruction that describes the task:
### Input:
Save the list of items to AIKIF core and optionally to local file fname
### Response:
def save(self, fname=''):
"""
Save the list of items to AIKIF core and optionally to local file fname
"""
if fname != '':
with open(fname, 'w') as f:
for i in self.lstPrograms:
f.write(self.get_file_info_line(i, ','))
# save to standard AIKIF structure
filemap = mod_filemap.FileMap([], [])
#location_fileList = filemap.get_full_filename(filemap.find_type('LOCATION'), filemap.find_ontology('FILE-PROGRAM')[0])
object_fileList = filemap.get_full_filename(filemap.find_type('OBJECT'), filemap.find_ontology('FILE-PROGRAM')[0])
print('object_fileList = ' + object_fileList + '\n')
if os.path.exists(object_fileList):
os.remove(object_fileList)
self.lstPrograms.sort()
try:
with open(object_fileList, 'a') as f:
f.write('\n'.join([i[0] for i in self.lstPrograms]))
except Exception as ex:
print('ERROR = cant write to object_filelist ' , object_fileList, str(ex)) |
def serialize_to_file(obj, file_name, append=False):
"""Pickle obj to file_name."""
logging.info("Serializing to file %s.", file_name)
with tf.gfile.Open(file_name, "a+" if append else "wb") as output_file:
pickle.dump(obj, output_file)
logging.info("Done serializing to file %s.", file_name) | Pickle obj to file_name. | Below is the the instruction that describes the task:
### Input:
Pickle obj to file_name.
### Response:
def serialize_to_file(obj, file_name, append=False):
"""Pickle obj to file_name."""
logging.info("Serializing to file %s.", file_name)
with tf.gfile.Open(file_name, "a+" if append else "wb") as output_file:
pickle.dump(obj, output_file)
logging.info("Done serializing to file %s.", file_name) |
def check(self, request, secret):
"""Verifies whether or not the request bears an authorization appropriate and valid for this version of the signature.
This verifies every element of the signature, including the timestamp's value.
Does not alter the request.
Keyword arguments:
request -- A request object which can be consumed by this API.
secret -- The base64-encoded secret key for the HMAC authorization.
"""
if request.get_header("Authorization") == "":
return False
ah = self.parse_auth_headers(request.get_header("Authorization"))
if "signature" not in ah:
return False
if request.get_header('x-authorization-timestamp') == '':
raise KeyError("X-Authorization-Timestamp is required.")
timestamp = int(float(request.get_header('x-authorization-timestamp')))
if timestamp == 0:
raise ValueError("X-Authorization-Timestamp must be a valid, non-zero timestamp.")
if self.preset_time is None:
curr_time = time.time()
else:
curr_time = self.preset_time
if timestamp > curr_time + 900:
raise ValueError("X-Authorization-Timestamp is too far in the future.")
if timestamp < curr_time - 900:
raise ValueError("X-Authorization-Timestamp is too far in the past.")
if request.body is not None and request.body != b'':
content_hash = request.get_header("x-authorization-content-sha256")
if content_hash == '':
raise KeyError("X-Authorization-Content-SHA256 is required for requests with a request body.")
sha256 = hashlib.sha256()
sha256.update(request.body)
if content_hash != base64.b64encode(sha256.digest()).decode('utf-8'):
raise ValueError("X-Authorization-Content-SHA256 must match the SHA-256 hash of the request body.")
return ah["signature"] == self.sign(request, ah, secret) | Verifies whether or not the request bears an authorization appropriate and valid for this version of the signature.
This verifies every element of the signature, including the timestamp's value.
Does not alter the request.
Keyword arguments:
request -- A request object which can be consumed by this API.
secret -- The base64-encoded secret key for the HMAC authorization. | Below is the the instruction that describes the task:
### Input:
Verifies whether or not the request bears an authorization appropriate and valid for this version of the signature.
This verifies every element of the signature, including the timestamp's value.
Does not alter the request.
Keyword arguments:
request -- A request object which can be consumed by this API.
secret -- The base64-encoded secret key for the HMAC authorization.
### Response:
def check(self, request, secret):
"""Verifies whether or not the request bears an authorization appropriate and valid for this version of the signature.
This verifies every element of the signature, including the timestamp's value.
Does not alter the request.
Keyword arguments:
request -- A request object which can be consumed by this API.
secret -- The base64-encoded secret key for the HMAC authorization.
"""
if request.get_header("Authorization") == "":
return False
ah = self.parse_auth_headers(request.get_header("Authorization"))
if "signature" not in ah:
return False
if request.get_header('x-authorization-timestamp') == '':
raise KeyError("X-Authorization-Timestamp is required.")
timestamp = int(float(request.get_header('x-authorization-timestamp')))
if timestamp == 0:
raise ValueError("X-Authorization-Timestamp must be a valid, non-zero timestamp.")
if self.preset_time is None:
curr_time = time.time()
else:
curr_time = self.preset_time
if timestamp > curr_time + 900:
raise ValueError("X-Authorization-Timestamp is too far in the future.")
if timestamp < curr_time - 900:
raise ValueError("X-Authorization-Timestamp is too far in the past.")
if request.body is not None and request.body != b'':
content_hash = request.get_header("x-authorization-content-sha256")
if content_hash == '':
raise KeyError("X-Authorization-Content-SHA256 is required for requests with a request body.")
sha256 = hashlib.sha256()
sha256.update(request.body)
if content_hash != base64.b64encode(sha256.digest()).decode('utf-8'):
raise ValueError("X-Authorization-Content-SHA256 must match the SHA-256 hash of the request body.")
return ah["signature"] == self.sign(request, ah, secret) |
def delete_library(self, library):
"""
Delete an Arctic Library, and all associated collections in the MongoDB.
Parameters
----------
library : `str`
The name of the library. e.g. 'library' or 'user.library'
"""
lib = ArcticLibraryBinding(self, library)
colname = lib.get_top_level_collection().name
if not [c for c in lib._db.list_collection_names(False) if re.match(r"^{}([\.].*)?$".format(colname), c)]:
logger.info('Nothing to delete. Arctic library %s does not exist.' % colname)
logger.info('Dropping collection: %s' % colname)
lib._db.drop_collection(colname)
for coll in lib._db.list_collection_names():
if coll.startswith(colname + '.'):
logger.info('Dropping collection: %s' % coll)
lib._db.drop_collection(coll)
if library in self._library_cache:
del self._library_cache[library]
del self._library_cache[lib.get_name()]
self._cache.delete_item_from_key('list_libraries', self._sanitize_lib_name(library)) | Delete an Arctic Library, and all associated collections in the MongoDB.
Parameters
----------
library : `str`
The name of the library. e.g. 'library' or 'user.library' | Below is the the instruction that describes the task:
### Input:
Delete an Arctic Library, and all associated collections in the MongoDB.
Parameters
----------
library : `str`
The name of the library. e.g. 'library' or 'user.library'
### Response:
def delete_library(self, library):
"""
Delete an Arctic Library, and all associated collections in the MongoDB.
Parameters
----------
library : `str`
The name of the library. e.g. 'library' or 'user.library'
"""
lib = ArcticLibraryBinding(self, library)
colname = lib.get_top_level_collection().name
if not [c for c in lib._db.list_collection_names(False) if re.match(r"^{}([\.].*)?$".format(colname), c)]:
logger.info('Nothing to delete. Arctic library %s does not exist.' % colname)
logger.info('Dropping collection: %s' % colname)
lib._db.drop_collection(colname)
for coll in lib._db.list_collection_names():
if coll.startswith(colname + '.'):
logger.info('Dropping collection: %s' % coll)
lib._db.drop_collection(coll)
if library in self._library_cache:
del self._library_cache[library]
del self._library_cache[lib.get_name()]
self._cache.delete_item_from_key('list_libraries', self._sanitize_lib_name(library)) |
def wheel(self, load):
'''
Send a master control function back to the wheel system
'''
# All wheel ops pass through eauth
auth_type, err_name, key = self._prep_auth_info(load)
# Authenticate
auth_check = self.loadauth.check_authentication(
load,
auth_type,
key=key,
show_username=True
)
error = auth_check.get('error')
if error:
# Authentication error occurred: do not continue.
return {'error': error}
# Authorize
username = auth_check.get('username')
if auth_type != 'user':
wheel_check = self.ckminions.wheel_check(
auth_check.get('auth_list', []),
load['fun'],
load['kwarg']
)
if not wheel_check:
return {'error': {'name': err_name,
'message': 'Authentication failure of type "{0}" occurred for '
'user {1}.'.format(auth_type, username)}}
elif isinstance(wheel_check, dict) and 'error' in wheel_check:
# A dictionary with an error name/message was handled by ckminions.wheel_check
return wheel_check
# Authenticated. Do the job.
jid = salt.utils.jid.gen_jid(self.opts)
fun = load.pop('fun')
tag = salt.utils.event.tagify(jid, prefix='wheel')
data = {'fun': "wheel.{0}".format(fun),
'jid': jid,
'tag': tag,
'user': username}
try:
self.event.fire_event(data, salt.utils.event.tagify([jid, 'new'], 'wheel'))
ret = self.wheel_.call_func(fun, **load)
data['return'] = ret
data['success'] = True
self.event.fire_event(data, salt.utils.event.tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
except Exception as exc:
log.exception('Exception occurred while introspecting %s', fun)
data['return'] = 'Exception occurred in wheel {0}: {1}: {2}'.format(
fun,
exc.__class__.__name__,
exc,
)
data['success'] = False
self.event.fire_event(data, salt.utils.event.tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data} | Send a master control function back to the wheel system | Below is the the instruction that describes the task:
### Input:
Send a master control function back to the wheel system
### Response:
def wheel(self, load):
'''
Send a master control function back to the wheel system
'''
# All wheel ops pass through eauth
auth_type, err_name, key = self._prep_auth_info(load)
# Authenticate
auth_check = self.loadauth.check_authentication(
load,
auth_type,
key=key,
show_username=True
)
error = auth_check.get('error')
if error:
# Authentication error occurred: do not continue.
return {'error': error}
# Authorize
username = auth_check.get('username')
if auth_type != 'user':
wheel_check = self.ckminions.wheel_check(
auth_check.get('auth_list', []),
load['fun'],
load['kwarg']
)
if not wheel_check:
return {'error': {'name': err_name,
'message': 'Authentication failure of type "{0}" occurred for '
'user {1}.'.format(auth_type, username)}}
elif isinstance(wheel_check, dict) and 'error' in wheel_check:
# A dictionary with an error name/message was handled by ckminions.wheel_check
return wheel_check
# Authenticated. Do the job.
jid = salt.utils.jid.gen_jid(self.opts)
fun = load.pop('fun')
tag = salt.utils.event.tagify(jid, prefix='wheel')
data = {'fun': "wheel.{0}".format(fun),
'jid': jid,
'tag': tag,
'user': username}
try:
self.event.fire_event(data, salt.utils.event.tagify([jid, 'new'], 'wheel'))
ret = self.wheel_.call_func(fun, **load)
data['return'] = ret
data['success'] = True
self.event.fire_event(data, salt.utils.event.tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
except Exception as exc:
log.exception('Exception occurred while introspecting %s', fun)
data['return'] = 'Exception occurred in wheel {0}: {1}: {2}'.format(
fun,
exc.__class__.__name__,
exc,
)
data['success'] = False
self.event.fire_event(data, salt.utils.event.tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data} |
def update(self, read, write, manage):
"""
Update the SyncListPermissionInstance
:param bool read: Read access.
:param bool write: Write access.
:param bool manage: Manage access.
:returns: Updated SyncListPermissionInstance
:rtype: twilio.rest.sync.v1.service.sync_list.sync_list_permission.SyncListPermissionInstance
"""
data = values.of({'Read': read, 'Write': write, 'Manage': manage, })
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return SyncListPermissionInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
list_sid=self._solution['list_sid'],
identity=self._solution['identity'],
) | Update the SyncListPermissionInstance
:param bool read: Read access.
:param bool write: Write access.
:param bool manage: Manage access.
:returns: Updated SyncListPermissionInstance
:rtype: twilio.rest.sync.v1.service.sync_list.sync_list_permission.SyncListPermissionInstance | Below is the the instruction that describes the task:
### Input:
Update the SyncListPermissionInstance
:param bool read: Read access.
:param bool write: Write access.
:param bool manage: Manage access.
:returns: Updated SyncListPermissionInstance
:rtype: twilio.rest.sync.v1.service.sync_list.sync_list_permission.SyncListPermissionInstance
### Response:
def update(self, read, write, manage):
"""
Update the SyncListPermissionInstance
:param bool read: Read access.
:param bool write: Write access.
:param bool manage: Manage access.
:returns: Updated SyncListPermissionInstance
:rtype: twilio.rest.sync.v1.service.sync_list.sync_list_permission.SyncListPermissionInstance
"""
data = values.of({'Read': read, 'Write': write, 'Manage': manage, })
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return SyncListPermissionInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
list_sid=self._solution['list_sid'],
identity=self._solution['identity'],
) |
def _generate_struct_class_custom_annotations(self, ns, data_type):
"""
The _process_custom_annotations function allows client code to access
custom annotations defined in the spec.
"""
self.emit('def _process_custom_annotations(self, annotation_type, field_path, processor):')
with self.indent(), emit_pass_if_nothing_emitted(self):
self.emit(
(
'super({}, self)._process_custom_annotations(annotation_type, field_path, '
'processor)'
).format(class_name_for_data_type(data_type))
)
self.emit()
for field in data_type.fields:
field_name = fmt_var(field.name, check_reserved=True)
for annotation_type, processor in self._generate_custom_annotation_processors(
ns, field.data_type, field.custom_annotations):
annotation_class = class_name_for_annotation_type(annotation_type, ns)
self.emit('if annotation_type is {}:'.format(annotation_class))
with self.indent():
self.emit('self.{} = {}'.format(
field_name,
generate_func_call(
processor,
args=[
"'{{}}.{}'.format(field_path)".format(field_name),
'self.{}'.format(field_name),
])
))
self.emit() | The _process_custom_annotations function allows client code to access
custom annotations defined in the spec. | Below is the the instruction that describes the task:
### Input:
The _process_custom_annotations function allows client code to access
custom annotations defined in the spec.
### Response:
def _generate_struct_class_custom_annotations(self, ns, data_type):
"""
The _process_custom_annotations function allows client code to access
custom annotations defined in the spec.
"""
self.emit('def _process_custom_annotations(self, annotation_type, field_path, processor):')
with self.indent(), emit_pass_if_nothing_emitted(self):
self.emit(
(
'super({}, self)._process_custom_annotations(annotation_type, field_path, '
'processor)'
).format(class_name_for_data_type(data_type))
)
self.emit()
for field in data_type.fields:
field_name = fmt_var(field.name, check_reserved=True)
for annotation_type, processor in self._generate_custom_annotation_processors(
ns, field.data_type, field.custom_annotations):
annotation_class = class_name_for_annotation_type(annotation_type, ns)
self.emit('if annotation_type is {}:'.format(annotation_class))
with self.indent():
self.emit('self.{} = {}'.format(
field_name,
generate_func_call(
processor,
args=[
"'{{}}.{}'.format(field_path)".format(field_name),
'self.{}'.format(field_name),
])
))
self.emit() |
def drawRect(self, x1, y1, x2, y2, angle=0):
"""
Draws a rectangle on the current :py:class:`Layer` with the current :py:class:`Brush`.
Coordinates are relative to the original layer size WITHOUT downsampling applied.
:param x1: The X of the top-left corner of the rectangle.
:param y1: The Y of the top-left corner of the rectangle.
:param x2: The X of the bottom-right corner of the rectangle.
:param y2: The Y of the bottom-right corner of the rectangle.
:param angle: An angle (in degrees) of rotation around the center of the rectangle.
:rtype: Nothing.
"""
vertices = [[x1,y1],[x2,y1],[x2,y2],[x1,y2],]
rotatedVertices = rotateMatrix(vertices, (x1+x2)*0.5, (y1+y2)*0.5, angle)
self.drawClosedPath(rotatedVertices) | Draws a rectangle on the current :py:class:`Layer` with the current :py:class:`Brush`.
Coordinates are relative to the original layer size WITHOUT downsampling applied.
:param x1: The X of the top-left corner of the rectangle.
:param y1: The Y of the top-left corner of the rectangle.
:param x2: The X of the bottom-right corner of the rectangle.
:param y2: The Y of the bottom-right corner of the rectangle.
:param angle: An angle (in degrees) of rotation around the center of the rectangle.
:rtype: Nothing. | Below is the the instruction that describes the task:
### Input:
Draws a rectangle on the current :py:class:`Layer` with the current :py:class:`Brush`.
Coordinates are relative to the original layer size WITHOUT downsampling applied.
:param x1: The X of the top-left corner of the rectangle.
:param y1: The Y of the top-left corner of the rectangle.
:param x2: The X of the bottom-right corner of the rectangle.
:param y2: The Y of the bottom-right corner of the rectangle.
:param angle: An angle (in degrees) of rotation around the center of the rectangle.
:rtype: Nothing.
### Response:
def drawRect(self, x1, y1, x2, y2, angle=0):
"""
Draws a rectangle on the current :py:class:`Layer` with the current :py:class:`Brush`.
Coordinates are relative to the original layer size WITHOUT downsampling applied.
:param x1: The X of the top-left corner of the rectangle.
:param y1: The Y of the top-left corner of the rectangle.
:param x2: The X of the bottom-right corner of the rectangle.
:param y2: The Y of the bottom-right corner of the rectangle.
:param angle: An angle (in degrees) of rotation around the center of the rectangle.
:rtype: Nothing.
"""
vertices = [[x1,y1],[x2,y1],[x2,y2],[x1,y2],]
rotatedVertices = rotateMatrix(vertices, (x1+x2)*0.5, (y1+y2)*0.5, angle)
self.drawClosedPath(rotatedVertices) |
def normalize(self):
"""Normalize the MOC to a given order.
This command takes a MOC order (0-29) and normalizes the MOC so that
its maximum order is the given order.
::
pymoctool a.fits --normalize 10 --output a_10.fits
"""
if self.moc is None:
raise CommandError('No MOC information present for normalization')
order = int(self.params.pop())
self.moc.normalize(order) | Normalize the MOC to a given order.
This command takes a MOC order (0-29) and normalizes the MOC so that
its maximum order is the given order.
::
pymoctool a.fits --normalize 10 --output a_10.fits | Below is the the instruction that describes the task:
### Input:
Normalize the MOC to a given order.
This command takes a MOC order (0-29) and normalizes the MOC so that
its maximum order is the given order.
::
pymoctool a.fits --normalize 10 --output a_10.fits
### Response:
def normalize(self):
"""Normalize the MOC to a given order.
This command takes a MOC order (0-29) and normalizes the MOC so that
its maximum order is the given order.
::
pymoctool a.fits --normalize 10 --output a_10.fits
"""
if self.moc is None:
raise CommandError('No MOC information present for normalization')
order = int(self.params.pop())
self.moc.normalize(order) |
def _trim_buffer_garbage(rawmessage, debug=True):
"""Remove leading bytes from a byte stream.
A proper message byte stream begins with 0x02.
"""
while rawmessage and rawmessage[0] != MESSAGE_START_CODE_0X02:
if debug:
_LOGGER.debug('Buffer content: %s', binascii.hexlify(rawmessage))
_LOGGER.debug('Trimming leading buffer garbage')
rawmessage = rawmessage[1:]
return rawmessage | Remove leading bytes from a byte stream.
A proper message byte stream begins with 0x02. | Below is the the instruction that describes the task:
### Input:
Remove leading bytes from a byte stream.
A proper message byte stream begins with 0x02.
### Response:
def _trim_buffer_garbage(rawmessage, debug=True):
"""Remove leading bytes from a byte stream.
A proper message byte stream begins with 0x02.
"""
while rawmessage and rawmessage[0] != MESSAGE_START_CODE_0X02:
if debug:
_LOGGER.debug('Buffer content: %s', binascii.hexlify(rawmessage))
_LOGGER.debug('Trimming leading buffer garbage')
rawmessage = rawmessage[1:]
return rawmessage |
def find_wheels(projects, search_dirs):
"""Find wheels from which we can import PROJECTS.
Scan through SEARCH_DIRS for a wheel for each PROJECT in turn. Return
a list of the first wheel found for each PROJECT
"""
wheels = []
# Look through SEARCH_DIRS for the first suitable wheel. Don't bother
# about version checking here, as this is simply to get something we can
# then use to install the correct version.
for project in projects:
for dirname in search_dirs:
# This relies on only having "universal" wheels available.
# The pattern could be tightened to require -py2.py3-none-any.whl.
files = glob.glob(os.path.join(dirname, project + '-*.whl'))
if files:
wheels.append(os.path.abspath(files[0]))
break
else:
# We're out of luck, so quit with a suitable error
logger.fatal('Cannot find a wheel for %s' % (project,))
return wheels | Find wheels from which we can import PROJECTS.
Scan through SEARCH_DIRS for a wheel for each PROJECT in turn. Return
a list of the first wheel found for each PROJECT | Below is the the instruction that describes the task:
### Input:
Find wheels from which we can import PROJECTS.
Scan through SEARCH_DIRS for a wheel for each PROJECT in turn. Return
a list of the first wheel found for each PROJECT
### Response:
def find_wheels(projects, search_dirs):
"""Find wheels from which we can import PROJECTS.
Scan through SEARCH_DIRS for a wheel for each PROJECT in turn. Return
a list of the first wheel found for each PROJECT
"""
wheels = []
# Look through SEARCH_DIRS for the first suitable wheel. Don't bother
# about version checking here, as this is simply to get something we can
# then use to install the correct version.
for project in projects:
for dirname in search_dirs:
# This relies on only having "universal" wheels available.
# The pattern could be tightened to require -py2.py3-none-any.whl.
files = glob.glob(os.path.join(dirname, project + '-*.whl'))
if files:
wheels.append(os.path.abspath(files[0]))
break
else:
# We're out of luck, so quit with a suitable error
logger.fatal('Cannot find a wheel for %s' % (project,))
return wheels |
def remove_children(self, reset_parent=True):
"""
Remove all the children of this node.
:param bool reset_parent: if ``True``, set to ``None`` the parent attribute
of the children
"""
if reset_parent:
for child in self.children:
child.parent = None
self.__children = [] | Remove all the children of this node.
:param bool reset_parent: if ``True``, set to ``None`` the parent attribute
of the children | Below is the the instruction that describes the task:
### Input:
Remove all the children of this node.
:param bool reset_parent: if ``True``, set to ``None`` the parent attribute
of the children
### Response:
def remove_children(self, reset_parent=True):
"""
Remove all the children of this node.
:param bool reset_parent: if ``True``, set to ``None`` the parent attribute
of the children
"""
if reset_parent:
for child in self.children:
child.parent = None
self.__children = [] |
def generate_unique_key(master_key_path, url):
"""
Input1: Path to the BD2K Master Key (for S3 Encryption)
Input2: S3 URL (e.g. https://s3-us-west-2.amazonaws.com/cgl-driver-projects-encrypted/wcdt/exome_bams/DTB-111-N.bam)
Returns: 32-byte unique key generated for that URL
"""
with open(master_key_path, 'r') as f:
master_key = f.read()
assert len(master_key) == 32, 'Invalid Key! Must be 32 characters. ' \
'Key: {}, Length: {}'.format(master_key, len(master_key))
new_key = hashlib.sha256(master_key + url).digest()
assert len(new_key) == 32, 'New key is invalid and is not 32 characters: {}'.format(new_key)
return new_key | Input1: Path to the BD2K Master Key (for S3 Encryption)
Input2: S3 URL (e.g. https://s3-us-west-2.amazonaws.com/cgl-driver-projects-encrypted/wcdt/exome_bams/DTB-111-N.bam)
Returns: 32-byte unique key generated for that URL | Below is the the instruction that describes the task:
### Input:
Input1: Path to the BD2K Master Key (for S3 Encryption)
Input2: S3 URL (e.g. https://s3-us-west-2.amazonaws.com/cgl-driver-projects-encrypted/wcdt/exome_bams/DTB-111-N.bam)
Returns: 32-byte unique key generated for that URL
### Response:
def generate_unique_key(master_key_path, url):
"""
Input1: Path to the BD2K Master Key (for S3 Encryption)
Input2: S3 URL (e.g. https://s3-us-west-2.amazonaws.com/cgl-driver-projects-encrypted/wcdt/exome_bams/DTB-111-N.bam)
Returns: 32-byte unique key generated for that URL
"""
with open(master_key_path, 'r') as f:
master_key = f.read()
assert len(master_key) == 32, 'Invalid Key! Must be 32 characters. ' \
'Key: {}, Length: {}'.format(master_key, len(master_key))
new_key = hashlib.sha256(master_key + url).digest()
assert len(new_key) == 32, 'New key is invalid and is not 32 characters: {}'.format(new_key)
return new_key |
def endpoint_name(self, endpoint_name):
"""
Sets the endpoint_name of this PreSharedKey.
The unique endpoint identifier that this pre-shared key applies to. 16-64 [printable](https://en.wikipedia.org/wiki/ASCII#Printable_characters) (non-control) ASCII characters.
:param endpoint_name: The endpoint_name of this PreSharedKey.
:type: str
"""
if endpoint_name is None:
raise ValueError("Invalid value for `endpoint_name`, must not be `None`")
if endpoint_name is not None and not re.search('^[ -~]{16,64}$', endpoint_name):
raise ValueError("Invalid value for `endpoint_name`, must be a follow pattern or equal to `/^[ -~]{16,64}$/`")
self._endpoint_name = endpoint_name | Sets the endpoint_name of this PreSharedKey.
The unique endpoint identifier that this pre-shared key applies to. 16-64 [printable](https://en.wikipedia.org/wiki/ASCII#Printable_characters) (non-control) ASCII characters.
:param endpoint_name: The endpoint_name of this PreSharedKey.
:type: str | Below is the the instruction that describes the task:
### Input:
Sets the endpoint_name of this PreSharedKey.
The unique endpoint identifier that this pre-shared key applies to. 16-64 [printable](https://en.wikipedia.org/wiki/ASCII#Printable_characters) (non-control) ASCII characters.
:param endpoint_name: The endpoint_name of this PreSharedKey.
:type: str
### Response:
def endpoint_name(self, endpoint_name):
"""
Sets the endpoint_name of this PreSharedKey.
The unique endpoint identifier that this pre-shared key applies to. 16-64 [printable](https://en.wikipedia.org/wiki/ASCII#Printable_characters) (non-control) ASCII characters.
:param endpoint_name: The endpoint_name of this PreSharedKey.
:type: str
"""
if endpoint_name is None:
raise ValueError("Invalid value for `endpoint_name`, must not be `None`")
if endpoint_name is not None and not re.search('^[ -~]{16,64}$', endpoint_name):
raise ValueError("Invalid value for `endpoint_name`, must be a follow pattern or equal to `/^[ -~]{16,64}$/`")
self._endpoint_name = endpoint_name |
def _execute(self, operation): # type: (Operation) -> None
"""
Execute a given operation.
"""
method = operation.job_type
getattr(self, "_execute_{}".format(method))(operation) | Execute a given operation. | Below is the the instruction that describes the task:
### Input:
Execute a given operation.
### Response:
def _execute(self, operation): # type: (Operation) -> None
"""
Execute a given operation.
"""
method = operation.job_type
getattr(self, "_execute_{}".format(method))(operation) |
def query_mongo_sort_decend(
database_name,
collection_name,
query={},
skip=0,
limit=getattr(
settings,
'MONGO_LIMIT',
200),
return_keys=(),
sortkey=None):
"""return a response_dict with a list of search results in decending
order based on a sort key
"""
l = []
response_dict = {}
try:
mongodb_client_url = getattr(settings, 'MONGODB_CLIENT',
'mongodb://localhost:27017/')
mc = MongoClient(mongodb_client_url,document_class=OrderedDict)
db = mc[str(database_name)]
collection = db[str(collection_name)]
if return_keys:
return_dict = {}
for k in return_keys:
return_dict[k] = 1
# print "returndict=",return_dict
mysearchresult = collection.find(
query, return_dict).skip(skip).limit(limit).sort(
sortkey, DESCENDING)
else:
mysearchresult = collection.find(query).skip(
skip).limit(limit).sort(sortkey, DESCENDING)
# response_dict['num_results']=int(mysearchresult.count(with_limit_and_skip=False))
response_dict['code'] = 200
response_dict['type'] = "search-results"
for d in mysearchresult:
d['id'] = d['_id'].__str__()
del d['_id']
l.append(d)
response_dict['results'] = l
except:
print("Error reading from Mongo")
print(str(sys.exc_info()))
response_dict['num_results'] = 0
response_dict['code'] = 500
response_dict['type'] = "Error"
response_dict['results'] = []
response_dict['message'] = str(sys.exc_info())
return response_dict | return a response_dict with a list of search results in decending
order based on a sort key | Below is the the instruction that describes the task:
### Input:
return a response_dict with a list of search results in decending
order based on a sort key
### Response:
def query_mongo_sort_decend(
database_name,
collection_name,
query={},
skip=0,
limit=getattr(
settings,
'MONGO_LIMIT',
200),
return_keys=(),
sortkey=None):
"""return a response_dict with a list of search results in decending
order based on a sort key
"""
l = []
response_dict = {}
try:
mongodb_client_url = getattr(settings, 'MONGODB_CLIENT',
'mongodb://localhost:27017/')
mc = MongoClient(mongodb_client_url,document_class=OrderedDict)
db = mc[str(database_name)]
collection = db[str(collection_name)]
if return_keys:
return_dict = {}
for k in return_keys:
return_dict[k] = 1
# print "returndict=",return_dict
mysearchresult = collection.find(
query, return_dict).skip(skip).limit(limit).sort(
sortkey, DESCENDING)
else:
mysearchresult = collection.find(query).skip(
skip).limit(limit).sort(sortkey, DESCENDING)
# response_dict['num_results']=int(mysearchresult.count(with_limit_and_skip=False))
response_dict['code'] = 200
response_dict['type'] = "search-results"
for d in mysearchresult:
d['id'] = d['_id'].__str__()
del d['_id']
l.append(d)
response_dict['results'] = l
except:
print("Error reading from Mongo")
print(str(sys.exc_info()))
response_dict['num_results'] = 0
response_dict['code'] = 500
response_dict['type'] = "Error"
response_dict['results'] = []
response_dict['message'] = str(sys.exc_info())
return response_dict |
def get_vcf_header(source):
"""Get the header lines of a vcf file
Args:
source(iterable): A vcf file
Returns:
head (HeaderParser): A headerparser object
"""
head = HeaderParser()
#Parse the header lines
for line in source:
line = line.rstrip()
if line.startswith('#'):
if line.startswith('##'):
logger.debug("Found metadata line {0}".format(line))
head.parse_meta_data(line)
else:
logger.debug("Found header line {0}".format(line))
head.parse_header_line(line)
else:
break
return head | Get the header lines of a vcf file
Args:
source(iterable): A vcf file
Returns:
head (HeaderParser): A headerparser object | Below is the the instruction that describes the task:
### Input:
Get the header lines of a vcf file
Args:
source(iterable): A vcf file
Returns:
head (HeaderParser): A headerparser object
### Response:
def get_vcf_header(source):
"""Get the header lines of a vcf file
Args:
source(iterable): A vcf file
Returns:
head (HeaderParser): A headerparser object
"""
head = HeaderParser()
#Parse the header lines
for line in source:
line = line.rstrip()
if line.startswith('#'):
if line.startswith('##'):
logger.debug("Found metadata line {0}".format(line))
head.parse_meta_data(line)
else:
logger.debug("Found header line {0}".format(line))
head.parse_header_line(line)
else:
break
return head |
def make_message(message, binary=False):
"""Make text message."""
if isinstance(message, str):
message = message.encode('utf-8')
if binary:
return _make_frame(message, OPCODE_BINARY)
else:
return _make_frame(message, OPCODE_TEXT) | Make text message. | Below is the the instruction that describes the task:
### Input:
Make text message.
### Response:
def make_message(message, binary=False):
"""Make text message."""
if isinstance(message, str):
message = message.encode('utf-8')
if binary:
return _make_frame(message, OPCODE_BINARY)
else:
return _make_frame(message, OPCODE_TEXT) |
def S_star(u, dfs_data):
"""The set of all descendants of u, with u added."""
s_u = S(u, dfs_data)
if u not in s_u:
s_u.append(u)
return s_u | The set of all descendants of u, with u added. | Below is the the instruction that describes the task:
### Input:
The set of all descendants of u, with u added.
### Response:
def S_star(u, dfs_data):
"""The set of all descendants of u, with u added."""
s_u = S(u, dfs_data)
if u not in s_u:
s_u.append(u)
return s_u |
def detect(self, G):
"""Detect a single core-periphery pair.
Parameters
----------
G : NetworkX graph object
Examples
--------
>>> import networkx as nx
>>> import cpalgorithm as cpa
>>> G = nx.karate_club_graph() # load the karate club network.
>>> lrc = cp.LowRankCore()
>>> lrc.detect(G)
"""
self.c_, self.x_ = self._low_rank_core(G)
self.Q_ = self._score(G, self.c_, self.x_)
self.qs_ = self.Q_ | Detect a single core-periphery pair.
Parameters
----------
G : NetworkX graph object
Examples
--------
>>> import networkx as nx
>>> import cpalgorithm as cpa
>>> G = nx.karate_club_graph() # load the karate club network.
>>> lrc = cp.LowRankCore()
>>> lrc.detect(G) | Below is the the instruction that describes the task:
### Input:
Detect a single core-periphery pair.
Parameters
----------
G : NetworkX graph object
Examples
--------
>>> import networkx as nx
>>> import cpalgorithm as cpa
>>> G = nx.karate_club_graph() # load the karate club network.
>>> lrc = cp.LowRankCore()
>>> lrc.detect(G)
### Response:
def detect(self, G):
"""Detect a single core-periphery pair.
Parameters
----------
G : NetworkX graph object
Examples
--------
>>> import networkx as nx
>>> import cpalgorithm as cpa
>>> G = nx.karate_club_graph() # load the karate club network.
>>> lrc = cp.LowRankCore()
>>> lrc.detect(G)
"""
self.c_, self.x_ = self._low_rank_core(G)
self.Q_ = self._score(G, self.c_, self.x_)
self.qs_ = self.Q_ |
def sync_in_records(self, force=False):
"""Synchronize from files to records"""
self.log('---- Sync Files ----')
for f in self.build_source_files:
f.record_to_objects()
# Only the metadata needs to be driven to the objects, since the other files are used as code,
# directly from the file record.
self.build_source_files.file(File.BSFILE.META).record_to_objects()
self.commit() | Synchronize from files to records | Below is the the instruction that describes the task:
### Input:
Synchronize from files to records
### Response:
def sync_in_records(self, force=False):
"""Synchronize from files to records"""
self.log('---- Sync Files ----')
for f in self.build_source_files:
f.record_to_objects()
# Only the metadata needs to be driven to the objects, since the other files are used as code,
# directly from the file record.
self.build_source_files.file(File.BSFILE.META).record_to_objects()
self.commit() |
def set_class_weight(self, class_weight='auto', y=None):
""" Sets the class_weight of the classifier to match y """
if class_weight is None:
cw = None
try:
self.clf.set_params(class_weight=cw)
except ValueError:
pass
elif class_weight == 'auto':
c = np.bincount(y)
ii = np.nonzero(c)[0]
c = c / float(c.sum())
cw = dict(zip(ii[::-1], c[ii]))
try:
self.clf.set_params(class_weight=cw)
except ValueError:
import warnings
warnings.warn(
"Tried to set class_weight, but failed. The classifier "
"probably doesn't support it") | Sets the class_weight of the classifier to match y | Below is the the instruction that describes the task:
### Input:
Sets the class_weight of the classifier to match y
### Response:
def set_class_weight(self, class_weight='auto', y=None):
""" Sets the class_weight of the classifier to match y """
if class_weight is None:
cw = None
try:
self.clf.set_params(class_weight=cw)
except ValueError:
pass
elif class_weight == 'auto':
c = np.bincount(y)
ii = np.nonzero(c)[0]
c = c / float(c.sum())
cw = dict(zip(ii[::-1], c[ii]))
try:
self.clf.set_params(class_weight=cw)
except ValueError:
import warnings
warnings.warn(
"Tried to set class_weight, but failed. The classifier "
"probably doesn't support it") |
def solve_max(self, expr):
"""
Solves a symbolic :class:`~manticore.core.smtlib.expression.Expression` into
its maximum solution
:param manticore.core.smtlib.Expression expr: Symbolic value to solve
:return: Concrete value
:rtype: list[int]
"""
if isinstance(expr, int):
return expr
expr = self.migrate_expression(expr)
return self._solver.max(self._constraints, expr) | Solves a symbolic :class:`~manticore.core.smtlib.expression.Expression` into
its maximum solution
:param manticore.core.smtlib.Expression expr: Symbolic value to solve
:return: Concrete value
:rtype: list[int] | Below is the the instruction that describes the task:
### Input:
Solves a symbolic :class:`~manticore.core.smtlib.expression.Expression` into
its maximum solution
:param manticore.core.smtlib.Expression expr: Symbolic value to solve
:return: Concrete value
:rtype: list[int]
### Response:
def solve_max(self, expr):
"""
Solves a symbolic :class:`~manticore.core.smtlib.expression.Expression` into
its maximum solution
:param manticore.core.smtlib.Expression expr: Symbolic value to solve
:return: Concrete value
:rtype: list[int]
"""
if isinstance(expr, int):
return expr
expr = self.migrate_expression(expr)
return self._solver.max(self._constraints, expr) |
def to_json(self, sort_keys=False):
"""Produce a JSON-encoded SBP message.
"""
d = self.to_json_dict()
return json.dumps(d, sort_keys=sort_keys) | Produce a JSON-encoded SBP message. | Below is the the instruction that describes the task:
### Input:
Produce a JSON-encoded SBP message.
### Response:
def to_json(self, sort_keys=False):
"""Produce a JSON-encoded SBP message.
"""
d = self.to_json_dict()
return json.dumps(d, sort_keys=sort_keys) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.