_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 31 13.1k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q20300 | _Paragraph.runs | train | def runs(self):
"""
Immutable sequence of |_Run| objects corresponding to the runs in
this paragraph.
| python | {
"resource": ""
} |
q20301 | BaseShape.click_action | train | def click_action(self):
"""|ActionSetting| instance providing access to click behaviors.
Click behaviors are hyperlink-like behaviors including jumping to
a hyperlink (web page) or to another slide in the presentation. The
click action is that defined on the overall shape, not a run of text
within the | python | {
"resource": ""
} |
q20302 | _BaseShapes._next_shape_id | train | def _next_shape_id(self):
"""Return a unique shape id suitable for use with a new shape.
The returned id is 1 greater than the maximum shape id used so far.
In practice, the minimum id is 2 because the spTree element is always
assigned id="1".
"""
# ---presence of cached-max-shape-id | python | {
"resource": ""
} |
q20303 | _BaseGroupShapes.add_connector | train | def add_connector(self, connector_type, begin_x, begin_y, end_x, end_y):
"""Add a newly created connector shape to the end of this shape tree.
*connector_type* is a member of the :ref:`MsoConnectorType`
enumeration and the end-point values are specified as EMU values. The
returned connector is of type *connector_type* and has begin and end
| python | {
"resource": ""
} |
q20304 | _BaseGroupShapes.add_group_shape | train | def add_group_shape(self, shapes=[]):
"""Return a |GroupShape| object newly appended to this shape tree.
The group shape is empty and must be populated with shapes using
methods on its shape tree, available on its `.shapes` property. The
position and extents of the group shape are determined by the shapes
it contains; its position and extents are recalculated each time
| python | {
"resource": ""
} |
q20305 | _BaseGroupShapes.add_shape | train | def add_shape(self, autoshape_type_id, left, top, width, height):
"""Return new |Shape| object appended to this shape tree.
*autoshape_type_id* is a member of :ref:`MsoAutoShapeType` e.g.
``MSO_SHAPE.RECTANGLE`` specifying the type of shape to be added. The
remaining arguments specify the new shape's position and size.
"""
| python | {
"resource": ""
} |
q20306 | _BaseGroupShapes.add_textbox | train | def add_textbox(self, left, top, width, height):
"""Return newly added text box shape appended to this shape tree.
The text box is of the specified size, located at the specified
position on the slide.
"""
| python | {
"resource": ""
} |
q20307 | _BaseGroupShapes.build_freeform | train | def build_freeform(self, start_x=0, start_y=0, scale=1.0):
"""Return |FreeformBuilder| object to specify a freeform shape.
The optional *start_x* and *start_y* arguments specify the starting
pen position in local coordinates. They will be rounded to the
nearest integer before use and each default to zero.
The optional *scale* argument specifies the size of local coordinates
proportional to slide coordinates (EMU). If the vertical scale is
different than the horizontal scale (local coordinate units are
"rectangular"), a pair of numeric values can be provided as the
*scale* argument, e.g. `scale=(1.0, 2.0)`. In this case the first
number is interpreted as the horizontal (X) scale and the second | python | {
"resource": ""
} |
q20308 | SlideShapes.title | train | def title(self):
"""
The title placeholder shape on the slide or |None| if the slide has
no title placeholder.
| python | {
"resource": ""
} |
q20309 | _MoviePicElementCreator._poster_frame_rId | train | def _poster_frame_rId(self):
"""Return the rId of relationship to poster frame image.
The poster frame is the image used to represent the video | python | {
"resource": ""
} |
q20310 | _MoviePicElementCreator._video_part_rIds | train | def _video_part_rIds(self):
"""Return the rIds for relationships to media part for video.
This is where the media part and its relationships to | python | {
"resource": ""
} |
q20311 | Shape.shape_type | train | def shape_type(self):
"""
Unique integer identifying the type of this shape, like
``MSO_SHAPE_TYPE.TEXT_BOX``.
"""
if self.is_placeholder:
return MSO_SHAPE_TYPE.PLACEHOLDER
if self._sp.has_custom_geometry:
return MSO_SHAPE_TYPE.FREEFORM
if self._sp.is_autoshape:
| python | {
"resource": ""
} |
q20312 | Package.next_media_partname | train | def next_media_partname(self, ext):
"""Return |PackURI| instance for next available media partname.
Partname is first available, starting at sequence number 1. Empty
sequence numbers are reused. *ext* is used as the extension on the
returned partname.
"""
def first_available_media_idx():
media_idxs = sorted([
part.partname.idx for part in self.iter_parts()
if part.partname.startswith('/ppt/media/media')
])
| python | {
"resource": ""
} |
q20313 | _ImageParts._find_by_sha1 | train | def _find_by_sha1(self, sha1):
"""
Return an |ImagePart| object belonging to this package or |None| if
no matching image part is found. The image part is identified by the
SHA1 hash digest of the image binary it contains.
"""
for image_part in self:
| python | {
"resource": ""
} |
q20314 | NotesMasterPart.create_default | train | def create_default(cls, package):
"""
Create and return a default notes master part, including creating the
new theme it requires. | python | {
"resource": ""
} |
q20315 | NotesMasterPart._new_theme_part | train | def _new_theme_part(cls, package):
"""
Create and return a default theme part suitable for use with a notes
master.
"""
partname = package.next_partname('/ppt/theme/theme%d.xml')
| python | {
"resource": ""
} |
q20316 | SlidePart.get_or_add_video_media_part | train | def get_or_add_video_media_part(self, video):
"""Return rIds for media and video relationships to media part.
A new |MediaPart| object is created if it does not already exist
(such as would occur if the same video appeared more than once in
a presentation). Two relationships to the media part are created,
one each with MEDIA and VIDEO relationship types. The need for two
appears to | python | {
"resource": ""
} |
q20317 | SlidePart.notes_slide | train | def notes_slide(self):
"""
The |NotesSlide| instance associated with this slide. If the slide
does not have a notes slide, a new one is created. The same single
instance is returned on each call.
"""
try:
| python | {
"resource": ""
} |
q20318 | SlidePart._add_notes_slide_part | train | def _add_notes_slide_part(self):
"""
Return a newly created |NotesSlidePart| object related to this slide
part. Caller is responsible for ensuring this slide doesn't already
have a notes slide part.
"""
notes_slide_part | python | {
"resource": ""
} |
q20319 | _BaseWorkbookWriter.xlsx_blob | train | def xlsx_blob(self):
"""
Return the byte stream of an Excel file formatted as chart data for
the category chart specified in the chart data | python | {
"resource": ""
} |
q20320 | CategoryWorkbookWriter._series_col_letter | train | def _series_col_letter(self, series):
"""
The letter of the Excel worksheet column in which the data for | python | {
"resource": ""
} |
q20321 | _BasePlot.data_labels | train | def data_labels(self):
"""
|DataLabels| instance providing properties and methods on the
collection of data labels associated with this plot.
"""
dLbls = self._element.dLbls
if dLbls is None:
| python | {
"resource": ""
} |
q20322 | Chart.font | train | def font(self):
"""Font object controlling text format defaults for this chart."""
defRPr = (
self._chartSpace
.get_or_add_txPr()
.p_lst[0]
| python | {
"resource": ""
} |
q20323 | Chart.legend | train | def legend(self):
"""
A |Legend| object providing access to the properties of the legend
for this chart.
"""
| python | {
"resource": ""
} |
q20324 | Chart.value_axis | train | def value_axis(self):
"""
The |ValueAxis| object providing access to properties of the value
axis of this chart. Raises |ValueError| if the chart has no value
axis.
"""
valAx_lst = self._chartSpace.valAx_lst
| python | {
"resource": ""
} |
q20325 | CT_TextBodyProperties.autofit | train | def autofit(self):
"""
The autofit setting for the text frame, a member of the
``MSO_AUTO_SIZE`` enumeration.
"""
if self.noAutofit is not None:
return MSO_AUTO_SIZE.NONE
if self.normAutofit is not None:
| python | {
"resource": ""
} |
q20326 | Categories.depth | train | def depth(self):
"""
The number of hierarchy levels in this category graph. Returns 0 if
it contains no categories.
"""
categories = self._categories
if not categories:
return 0
first_depth = categories[0].depth | python | {
"resource": ""
} |
q20327 | Category.depth | train | def depth(self):
"""
The number of hierarchy levels rooted at this category node. Returns
1 if this category has no sub-categories.
"""
sub_categories = self._sub_categories
if not sub_categories:
return 1
first_depth = sub_categories[0].depth | python | {
"resource": ""
} |
q20328 | Category.leaf_count | train | def leaf_count(self):
"""
The number of leaf category nodes under this category. Returns
1 if this category has | python | {
"resource": ""
} |
q20329 | CT_SlideIdList._next_id | train | def _next_id(self):
"""
Return the next available slide ID as an int. Valid slide IDs start
at 256. The next integer value greater than the max value in use is
chosen, which minimizes that chance of reusing the id of a deleted
slide.
| python | {
"resource": ""
} |
q20330 | _default_pptx_path | train | def _default_pptx_path():
"""
Return the path to the built-in default .pptx package.
"""
| python | {
"resource": ""
} |
q20331 | API.__get_url | train | def __get_url(self, endpoint):
""" Get URL for requests """
url = self.url
api = "wc-api"
if url.endswith("/") is False:
url = "%s/" % url
if self.wp_api:
| python | {
"resource": ""
} |
q20332 | API.__get_oauth_url | train | def __get_oauth_url(self, url, method, **kwargs):
""" Generate oAuth1.0a URL """
oauth = OAuth(
url=url,
consumer_key=self.consumer_key,
consumer_secret=self.consumer_secret,
version=self.version,
| python | {
"resource": ""
} |
q20333 | OAuth.get_oauth_url | train | def get_oauth_url(self):
""" Returns the URL with OAuth params """
params = OrderedDict()
if "?" in self.url:
url = self.url[:self.url.find("?")]
for key, value in parse_qsl(urlparse(self.url).query):
params[key] = value
| python | {
"resource": ""
} |
q20334 | OAuth.generate_oauth_signature | train | def generate_oauth_signature(self, params, url):
""" Generate OAuth Signature """
if "oauth_signature" in params.keys():
del params["oauth_signature"]
base_request_uri = quote(url, "")
params = self.sorted_params(params)
params = self.normalize_parameters(params)
query_params = ["{param_key}%3D{param_value}".format(param_key=key, param_value=value)
for key, value in params.items()]
query_string = "%26".join(query_params)
string_to_sign = "%s&%s&%s" % (self.method, base_request_uri, query_string)
consumer_secret = str(self.consumer_secret)
| python | {
"resource": ""
} |
q20335 | OAuth.generate_nonce | train | def generate_nonce():
""" Generate nonce number """
nonce = ''.join([str(randint(0, 9)) for i in range(8)])
return HMAC(
| python | {
"resource": ""
} |
q20336 | print_ldamodel_distribution | train | def print_ldamodel_distribution(distrib, row_labels, val_labels, top_n=10):
"""
Print `n_top` top values from a LDA model's distribution `distrib`. Can be used for topic-word distributions and
document-topic distributions.
"""
df_values = top_n_from_distribution(distrib, top_n=top_n, row_labels=row_labels, val_labels=None)
df_labels = top_n_from_distribution(distrib, top_n=top_n, row_labels=row_labels, val_labels=val_labels)
| python | {
"resource": ""
} |
q20337 | print_ldamodel_topic_words | train | def print_ldamodel_topic_words(topic_word_distrib, vocab, n_top=10, row_labels=DEFAULT_TOPIC_NAME_FMT):
"""Print `n_top` values from a LDA model's topic-word distributions."""
| python | {
"resource": ""
} |
q20338 | print_ldamodel_doc_topics | train | def print_ldamodel_doc_topics(doc_topic_distrib, doc_labels, n_top=3, val_labels=DEFAULT_TOPIC_NAME_FMT):
"""Print `n_top` values from a LDA model's document-topic distributions."""
| python | {
"resource": ""
} |
q20339 | save_ldamodel_to_pickle | train | def save_ldamodel_to_pickle(picklefile, model, vocab, doc_labels, dtm=None, **kwargs):
"""Save a LDA model as pickle file."""
| python | {
"resource": ""
} |
q20340 | plot_heatmap | train | def plot_heatmap(fig, ax, data,
xaxislabel=None, yaxislabel=None,
xticklabels=None, yticklabels=None,
title=None, grid=True,
values_in_cells=True, round_values_in_cells=2,
legend=False,
fontsize_axislabel=None,
fontsize_axisticks=None,
fontsize_cell_values=None):
""""
helper function to plot a heatmap for a 2D matrix `data` using matplotlib's "matshow" function
"""
if not isinstance(data, np.ndarray):
data = np.array(data)
if data.ndim != 2:
raise ValueError('`data` must be a 2D matrix/array')
# draw basic heatmap
cax = ax.matshow(data)
# draw legend
if legend:
fig.colorbar(cax)
# set title
if title:
ax.set_title(title, y=1.25)
n_rows, n_cols = data.shape
# draw values in cells
if values_in_cells:
textcol_thresh = data.min() + (data.max() - data.min()) / 2
x_indices, y_indices = np.meshgrid(np.arange(n_cols), np.arange(n_rows))
for x, y in zip(x_indices.flatten(), y_indices.flatten()):
val = data[y, x]
# lower values get white text color for better visibility
textcol = 'white' if val < textcol_thresh else 'black'
disp_val = round(val, round_values_in_cells) if round_values_in_cells is not None else val
ax.text(x, y, disp_val, va='center', ha='center', color=textcol, fontsize=fontsize_cell_values)
# customize axes
if xaxislabel: | python | {
"resource": ""
} |
q20341 | get_term_proportions | train | def get_term_proportions(dtm):
"""
Return the term proportions given the document-term matrix `dtm`
"""
unnorm = get_term_frequencies(dtm)
if unnorm.sum() == 0:
| python | {
"resource": ""
} |
q20342 | TMPreproc._setup_workers | train | def _setup_workers(self, initial_states=None):
"""
Create worker processes and queues. Distribute the work evenly across worker processes. Optionally
send initial states defined in list `initial_states` to each worker process.
"""
if initial_states is not None:
require_listlike(initial_states)
self.tasks_queues = []
self.results_queue = mp.Queue()
self.workers = []
common_kwargs = dict(tokenizer=self.tokenizer,
stemmer=self.stemmer,
lemmata_dict=self.lemmata_dict,
pos_tagger=self.pos_tagger)
if initial_states is not None:
logger.info('setting up %d worker processes with initial states' % len(initial_states))
for i_worker, w_state in enumerate(initial_states):
task_q = mp.JoinableQueue()
w = _PreprocWorker(i_worker, w_state.pop('docs'), self.language, task_q, self.results_queue,
name='_PreprocWorker#%d' % i_worker, **common_kwargs)
w.start()
task_q.put(('set_state', w_state))
self.workers.append(w)
self.tasks_queues.append(task_q)
[q.join() for q in self.tasks_queues]
else:
| python | {
"resource": ""
} |
q20343 | _words_by_score | train | def _words_by_score(words, score, least_to_most, n=None):
"""
Order a vector of `words` by a `score`, either `least_to_most` or reverse. Optionally return only the top `n`
results.
"""
if words.shape != score.shape:
raise ValueError('`words` and `score` must have the same shape')
if n is not None and (n <= 0 or n > len(words)):
raise ValueError('`n` must | python | {
"resource": ""
} |
q20344 | _words_by_salience_score | train | def _words_by_salience_score(vocab, topic_word_distrib, doc_topic_distrib, doc_lengths, n=None, least_to_most=False):
"""Return words in `vocab` ordered by saliency score."""
| python | {
"resource": ""
} |
q20345 | _words_by_distinctiveness_score | train | def _words_by_distinctiveness_score(vocab, topic_word_distrib, doc_topic_distrib, doc_lengths, n=None,
least_to_most=False):
"""Return words in `vocab` ordered by distinctiveness score."""
p_t = get_marginal_topic_distrib(doc_topic_distrib, doc_lengths)
| python | {
"resource": ""
} |
q20346 | pickle_data | train | def pickle_data(data, picklefile):
"""Helper function to pickle `data` | python | {
"resource": ""
} |
q20347 | unpickle_file | train | def unpickle_file(picklefile, **kwargs):
"""Helper function to unpickle | python | {
"resource": ""
} |
q20348 | dyndoc_insert | train | def dyndoc_insert(src):
"""docstring_insert - a decorator to insert API-docparts dynamically."""
# manipulating docstrings this way is tricky due to indentation
# the JSON needs leading whitespace to be interpreted correctly
import json
import re
def mkblock(d, flag=0):
# response, pretty formatted
v = json.dumps(d, indent=2)
if flag == 1:
# strip the '[' and ']' in case of a list holding items
# that stand on their own (example: tick records from a stream)
nw = re.findall('.*?\[(.*)\]', v, flags=re.S)
v = nw[0]
# add leading whitespace for each line and start with a newline
return "\n{}".format("".join(["{0:>16}{1}\n".format("", L)
for L in v.split('\n')]))
def dec(obj):
allSlots = re.findall("\{(_v3.*?)\}", obj.__doc__)
docsub = {}
sub = {}
for k in allSlots:
p = re.findall("^(_v3.*)_(.*)", k)
p = list(*p)
sub.update({p[1]: p[0]})
for v in sub.values():
| python | {
"resource": ""
} |
q20349 | endpoint | train | def endpoint(url, method="GET", expected_status=200):
"""endpoint - decorator to manipulate the REST-service endpoint.
The endpoint decorator sets the endpoint and | python | {
"resource": ""
} |
q20350 | abstractclass | train | def abstractclass(cls):
"""abstractclass - class decorator.
make sure the class is abstract and cannot be used on it's own.
@abstractclass
class A(object):
def __init__(self, *args, **kwargs):
# logic
pass
class B(A):
pass
a = A() # results in an AssertionError
b = B() # works fine
"""
setattr(cls, "_ISNEVER", cls.__bases__[0].__name__)
origInit = cls.__dict__["__init__"]
def wrapInit(self, *args, **kwargs):
# when the class is instantiated we can check for bases
# we don't want it to be the base class
try:
| python | {
"resource": ""
} |
q20351 | granularity_to_time | train | def granularity_to_time(s):
"""convert a named granularity into seconds.
get value in seconds for named granularities: M1, M5 ... H1 etc.
>>> print(granularity_to_time("M5"))
300
"""
mfact = {
'S': 1,
'M': 60,
'H': 3600,
'D': 86400,
'W': 604800,
}
try:
| python | {
"resource": ""
} |
q20352 | get_classes | train | def get_classes(modName):
"""return a list of all classes in a module."""
classNames = []
| python | {
"resource": ""
} |
q20353 | InstrumentsCandlesFactory | train | def InstrumentsCandlesFactory(instrument, params=None):
"""InstrumentsCandlesFactory - generate InstrumentCandles requests.
InstrumentsCandlesFactory is used to retrieve historical data by
automatically generating consecutive requests when the OANDA limit
of *count* records is exceeded.
This is known by calculating the number of candles between *from* and
*to*. If *to* is not specified *to* will be equal to *now*.
The *count* parameter is only used to control the number of records to
retrieve in a single request.
The *includeFirst* parameter is forced to make sure that results do
no have a 1-record gap between consecutive requests.
Parameters
----------
instrument : string (required)
the instrument to create the order for
params: params (optional)
the parameters to specify the historical range,
see the REST-V20 docs regarding 'instrument' at developer.oanda.com
If no params are specified, just a single InstrumentsCandles request
will be generated acting the same as if you had just created it
directly.
Example
-------
The *oandapyV20.API* client processes requests as objects. So,
downloading large historical batches simply comes down to:
>>> import json
>>> from oandapyV20 import API
>>> from oandapyV20.contrib.factories import InstrumentsCandlesFactory
>>>
>>> client = API(access_token=...)
>>> instrument, granularity = "EUR_USD", "M15"
>>> _from = "2017-01-01T00:00:00Z"
>>> params = {
... "from": _from,
... "granularity": granularity,
... "count": 2500,
... }
>>> with open("/tmp/{}.{}".format(instrument, granularity), "w") as OUT:
>>> # The factory returns a generator generating consecutive
>>> # requests to retrieve full history from date 'from' till 'to'
>>> for r in InstrumentsCandlesFactory(instrument=instrument,
... params=params)
>>> client.request(r)
>>> OUT.write(json.dumps(r.response.get('candles'), indent=2))
.. note:: Normally you can't combine *from*, *to* and *count*.
When *count* specified, it is used to calculate the gap between
*to* and *from*. The *params* passed to the generated request
itself does contain the *count* parameter.
"""
RFC3339 = "%Y-%m-%dT%H:%M:%SZ"
# if not specified use the default of 'S5' as OANDA does
gs = granularity_to_time(params.get('granularity', 'S5'))
_from = None
_epoch_from = None
if 'from' in params:
_from = datetime.strptime(params.get('from'), RFC3339)
_epoch_from = int(calendar.timegm(_from.timetuple()))
_to = datetime.utcnow()
if 'to' in params: | python | {
"resource": ""
} |
q20354 | API.request | train | def request(self, endpoint):
"""Perform a request for the APIRequest instance 'endpoint'.
Parameters
----------
endpoint : APIRequest
The endpoint parameter contains an instance of an APIRequest
containing the endpoint, method and optionally other parameters
or body data.
Raises
------
V20Error in case of HTTP response code >= 400
"""
method = endpoint.method
method = method.lower()
params = None
try:
params = getattr(endpoint, "params")
except AttributeError:
# request does not have params
params = {}
headers = {}
if hasattr(endpoint, "HEADERS"):
headers = getattr(endpoint, "HEADERS")
request_args = {}
if method == 'get':
request_args['params'] = params
elif hasattr(endpoint, "data") and endpoint.data:
request_args['json'] = endpoint.data
# if any parameter for request then merge them
request_args.update(self._request_params)
# which API to access ?
if not (hasattr(endpoint, "STREAM") and
getattr(endpoint, "STREAM") is True):
url = "{}/{}".format(
TRADING_ENVIRONMENTS[self.environment]["api"],
| python | {
"resource": ""
} |
q20355 | make_definition_classes | train | def make_definition_classes(mod):
"""Dynamically create the definition classes from module 'mod'."""
rootpath = "oandapyV20"
PTH = "{}.definitions.{}".format(rootpath, mod)
M = import_module(PTH)
__ALL__ = [] # construct the __all__ variable
for cls, cldef in M.definitions.items():
orig, fiV = next(six.iteritems(cldef))
fiK = orig.replace('-', '_')
# create the docstring dynamically
clsdoc = dyndoc.format(cls=cls,
PTH=PTH,
mod=mod,
firstItem=fiK, orig=orig,
firstItemVal=fiV)
# Since we can't change the docstring afterwards (it's readonly)
# figure this out before and not during ...
for K, V in cldef.items():
attrName = K
if "-" in K:
attrName = K.replace('-', '_')
adoc = _doc.format(K, attrName, K)
| python | {
"resource": ""
} |
q20356 | _fix_integrity_error | train | def _fix_integrity_error(f):
"""Ensure raising of IntegrityError on unique constraint violations.
In earlier versions of hdbcli it doesn't raise the hdbcli.dbapi.IntegrityError
exception for unique constraint violations. To support also older versions
of hdbcli this decorator inspects the raised exception and | python | {
"resource": ""
} |
q20357 | pp_xml | train | def pp_xml(body):
"""Pretty print format some XML so it's readable."""
| python | {
"resource": ""
} |
q20358 | Client.power_on | train | def power_on(self):
"""Power on the box."""
payload = amt.wsman.power_state_request(self.uri, "on")
| python | {
"resource": ""
} |
q20359 | Client.set_next_boot | train | def set_next_boot(self, boot_device):
"""Sets the machine to boot to boot_device on its next reboot
Will default back to normal boot list on the reboot that follows.
"""
| python | {
"resource": ""
} |
q20360 | dumps | train | def dumps(obj, **kwargs):
"""Serialize ``obj`` to a JSON5-formatted ``str``."""
t = type(obj)
if obj is True:
return u'true'
elif obj is False:
return u'false'
elif obj == None:
return u'null'
elif t == type('') or t == type(u''):
single = "'" in obj
double = '"' in obj
if single and double:
return json.dumps(obj)
elif single:
return '"' + obj + '"'
else:
| python | {
"resource": ""
} |
q20361 | Connection.destroy | train | def destroy(self):
"""Close the connection, and close any associated
CBS authentication session.
"""
try:
| python | {
"resource": ""
} |
q20362 | Connection.work | train | def work(self):
"""Perform a single Connection iteration."""
try:
raise self._error
except TypeError:
pass
except Exception as e:
_logger.warning("%r", e)
raise
try:
self.lock() | python | {
"resource": ""
} |
q20363 | MessageSender._detach_received | train | def _detach_received(self, error):
"""Callback called when a link DETACH frame is received.
This callback will process the received DETACH error to determine if
the link is recoverable or whether it should be shutdown.
:param error: The error information from the detach
frame.
:type error: ~uamqp.errors.ErrorResponse
"""
# pylint: disable=protected-access
if error:
condition = error.condition
description = error.description
info = error.info
else:
| python | {
"resource": ""
} |
q20364 | MessageSender.get_state | train | def get_state(self):
"""Get the state of the MessageSender and its underlying Link.
:rtype: ~uamqp.constants.MessageSenderState
"""
try:
raise self._error
| python | {
"resource": ""
} |
q20365 | AMQPClientAsync.open_async | train | async def open_async(self, connection=None):
"""Asynchronously open the client. The client can create a new Connection
or an existing Connection can be passed in. This existing Connection
may have an existing CBS authentication Session, which will be
used for this client as well. Otherwise a new Session will be
created.
:param connection: An existing Connection that may be shared between
multiple clients.
:type connetion: ~uamqp.async_ops.connection_async.ConnectionAsync
"""
# pylint: disable=protected-access
if self._session:
return # already open
if connection:
_logger.info("Using existing connection.")
self._auth = connection.auth
self._ext_connection = True
self._connection = connection or self.connection_type(
self._hostname,
self._auth,
container_id=self._name,
max_frame_size=self._max_frame_size,
channel_max=self._channel_max,
idle_timeout=self._idle_timeout,
properties=self._properties,
remote_idle_timeout_empty_frame_send_ratio=self._remote_idle_timeout_empty_frame_send_ratio,
error_policy=self._error_policy,
debug=self._debug_trace,
loop=self.loop)
if not self._connection.cbs and isinstance(self._auth, authentication.CBSAsyncAuthMixin):
self._connection.cbs = await asyncio.shield(self._auth.create_authenticator_async(
self._connection,
| python | {
"resource": ""
} |
q20366 | AMQPClientAsync.close_async | train | async def close_async(self):
"""Close the client asynchronously. This includes closing the Session
and CBS authentication layer as well as the Connection.
If the client was opened using an external Connection,
this will be left intact.
"""
if self.message_handler:
await self.message_handler.destroy_async()
self.message_handler = None
self._shutdown = True
if self._keep_alive_thread:
await self._keep_alive_thread
self._keep_alive_thread = None
if not self._session:
return # already closed.
if not self._connection.cbs:
_logger.info("Closing non-CBS session.")
| python | {
"resource": ""
} |
q20367 | AMQPClientAsync.do_work_async | train | async def do_work_async(self):
"""Run a single connection iteration asynchronously.
This will return `True` if the connection is still open
and ready to be used for further work, or `False` if it needs
to be shut down.
:rtype: bool
:raises: TimeoutError or ~uamqp.errors.ClientTimeout if | python | {
"resource": ""
} |
q20368 | SendClientAsync.send_message_async | train | async def send_message_async(self, messages, close_on_done=False):
"""Send a single message or batched message asynchronously.
:param messages: A message to send. This can either be a single instance
of ~uamqp.message.Message, or multiple messages wrapped in an instance
of ~uamqp.message.BatchMessage.
:type message: ~uamqp.message.Message
:param close_on_done: Close the client once the message is sent. Default is `False`.
:type close_on_done: bool
:raises: ~uamqp.errors.MessageException if message fails to send after retry policy
is exhausted.
"""
batch = messages.gather()
pending_batch = []
for message in batch:
message.idle_time = self._counter.get_current_ms()
async with self._pending_messages_lock:
self._pending_messages.append(message)
pending_batch.append(message)
await self.open_async()
try:
while any([m for m in pending_batch if m.state not in constants.DONE_STATES]):
await self.do_work_async()
failed = [m for m in pending_batch if m.state == constants.MessageState.SendFailed]
| python | {
"resource": ""
} |
q20369 | SendClientAsync.send_all_messages_async | train | async def send_all_messages_async(self, close_on_done=True):
"""Send all pending messages in the queue asynchronously.
This will return a list of the send result of all the pending
messages so it can be determined if any messages failed to send.
This function will open the client if it is not already open.
:param close_on_done: Close the client once the messages are sent.
Default is `True`.
:type close_on_done: bool
:rtype: list[~uamqp.constants.MessageState]
"""
await self.open_async()
try:
| python | {
"resource": ""
} |
q20370 | ReceiveClientAsync._client_ready_async | train | async def _client_ready_async(self):
"""Determine whether the client is ready to start receiving messages.
To be ready, the connection must be open and authentication complete,
The Session, Link and MessageReceiver must be open and in non-errored
states.
:rtype: bool
:raises: ~uamqp.errors.MessageHandlerError if the MessageReceiver
goes into an error state.
"""
# pylint: disable=protected-access
if not self.message_handler:
self.message_handler = self.receiver_type(
self._session, self._remote_address, self._name,
on_message_received=self._message_received,
name='receiver-link-{}'.format(uuid.uuid4()),
debug=self._debug_trace,
receive_settle_mode=self._receive_settle_mode,
prefetch=self._prefetch,
max_message_size=self._max_message_size,
properties=self._link_properties,
error_policy=self._error_policy,
encoding=self._encoding,
loop=self.loop) | python | {
"resource": ""
} |
q20371 | ReceiveClientAsync.receive_message_batch_async | train | async def receive_message_batch_async(self, max_batch_size=None, on_message_received=None, timeout=0):
"""Receive a batch of messages asynchronously. This method will return as soon as some
messages are available rather than waiting to achieve a specific batch size, and
therefore the number of messages returned per call will vary up to the maximum allowed.
If the receive client is configured with `auto_complete=True` then the messages received
in the batch returned by this function will already be settled. Alternatively, if
`auto_complete=False`, then each message will need to be explicitly settled before
it expires and is released.
:param max_batch_size: The maximum number of messages that can be returned in
one call. This value cannot be larger than the prefetch value, and if not specified,
the prefetch value will be used.
:type max_batch_size: int
:param on_message_received: A callback to process messages as they arrive from the
service. It takes a single argument, a ~uamqp.message.Message object.
:type on_message_received: callable[~uamqp.message.Message]
:param timeout: I timeout in milliseconds for which to wait to receive any messages.
If no messages are received in this time, an empty list will be returned. If set to
| python | {
"resource": ""
} |
q20372 | ReceiveClientAsync.receive_messages_iter_async | train | def receive_messages_iter_async(self, on_message_received=None):
"""Receive messages by asynchronous generator. Messages returned in the
generator have already been accepted - if you wish to add logic to accept
or reject messages based on custom criteria, pass in a callback.
If the receive client is configured with `auto_complete=True` then the messages received
from the iterator returned by this function will be automatically settled when the iterator
is incremented. Alternatively, if `auto_complete=False`, then each message will need to
be explicitly settled before it expires and is released.
:param on_message_received: A callback to process messages as they arrive from the
service. It | python | {
"resource": ""
} |
q20373 | create_sas_token | train | def create_sas_token(key_name, shared_access_key, scope, expiry=timedelta(hours=1)):
"""Create a SAS token.
:param key_name: The username/key name/policy name for the token.
:type key_name: bytes
:param shared_access_key: The shared access key to generate the token from.
:type shared_access_key: bytes
:param scope: The token permissions scope.
:type scope: bytes
:param expiry: The lifetime of the generated token. Default is | python | {
"resource": ""
} |
q20374 | _convert_py_number | train | def _convert_py_number(value):
"""Convert a Python integer value into equivalent C object.
Will attempt to use the smallest possible conversion, starting with int, then long
then double.
"""
try:
return c_uamqp.int_value(value)
except OverflowError:
| python | {
"resource": ""
} |
q20375 | ConnectionAsync.work_async | train | async def work_async(self):
"""Perform a single Connection iteration asynchronously."""
try:
raise self._error
except TypeError:
pass
except Exception as e:
_logger.warning("%r", e)
raise
try:
await self.lock_async()
| python | {
"resource": ""
} |
q20376 | ConnectionAsync.destroy_async | train | async def destroy_async(self):
"""Close the connection asynchronously, and close any associated
CBS authentication session.
"""
try:
await self.lock_async()
_logger.debug("Unlocked connection %r to close.", self.container_id)
await self._close_async()
except asyncio.TimeoutError:
| python | {
"resource": ""
} |
q20377 | AMQPAuth.set_tlsio | train | def set_tlsio(self, hostname, port, http_proxy):
"""Setup the default underlying TLS IO layer. On Windows this is
Schannel, on Linux and MacOS this is OpenSSL.
:param hostname: The endpoint hostname.
:type hostname: bytes
:param port: The TLS port.
:type port: int
"""
_default_tlsio = c_uamqp.get_default_tlsio()
_tlsio_config = c_uamqp.TLSIOConfig()
_tlsio_config.hostname = hostname
_tlsio_config.port = int(port)
if http_proxy:
proxy_config = self._build_proxy_config(hostname, port, http_proxy)
_tlsio_config.set_proxy_config(proxy_config)
self._underlying_xio = c_uamqp.xio_from_tlsioconfig(_default_tlsio, _tlsio_config)
cert = self.cert_file or certifi.where()
| python | {
"resource": ""
} |
q20378 | AMQPAuth.close | train | def close(self):
"""Close the authentication layer and cleanup
all the authentication wrapper objects.
"""
| python | {
"resource": ""
} |
q20379 | Message.decode_from_bytes | train | def decode_from_bytes(cls, data):
"""Decode an AMQP message from a bytearray.
The returned message will not have a delivery context and
therefore will be considered to be | python | {
"resource": ""
} |
q20380 | Message._parse_message | train | def _parse_message(self, message):
"""Parse a message received from an AMQP service.
:param message: The received C message.
:type message: uamqp.c_uamqp.cMessage
"""
_logger.debug("Parsing received message %r.", self.delivery_no)
self._message = message
body_type = message.body_type
if body_type == c_uamqp.MessageBodyType.NoneType:
self._body = None
elif body_type == c_uamqp.MessageBodyType.DataType:
self._body = DataBody(self._message)
elif body_type == c_uamqp.MessageBodyType.SequenceType:
raise TypeError("Message body type Sequence not supported.")
else:
self._body = ValueBody(self._message)
_props = self._message.properties
if _props:
_logger.debug("Parsing received message properties %r.", self.delivery_no)
self.properties = MessageProperties(properties=_props, encoding=self._encoding)
_header = self._message.header
if _header:
_logger.debug("Parsing received message header %r.", self.delivery_no)
self.header = MessageHeader(header=_header)
_footer = self._message.footer
if _footer:
_logger.debug("Parsing received message footer %r.", self.delivery_no)
self.footer = _footer.map
_app_props = self._message.application_properties
| python | {
"resource": ""
} |
q20381 | Message.get_message_encoded_size | train | def get_message_encoded_size(self):
"""Pre-emptively get the size of the message once it has been encoded
to go over the wire so we can raise an error if the message will be
rejected for being to large.
This method is not available for messages | python | {
"resource": ""
} |
q20382 | Message.encode_message | train | def encode_message(self):
"""Encode message to AMQP wire-encoded bytearray.
:rtype: bytearray
"""
if not self._message:
raise ValueError("No message data to encode.")
cloned_data = self._message.clone()
| python | {
"resource": ""
} |
q20383 | Message.gather | train | def gather(self):
"""Return all the messages represented by this object.
This will always be a list of a single message.
:rtype: list[~uamqp.message.Message]
"""
if self.state in constants.RECEIVE_STATES:
raise TypeError("Only new messages can be gathered.")
if not self._message:
| python | {
"resource": ""
} |
q20384 | Message.get_message | train | def get_message(self):
"""Get the underlying C message from this object.
:rtype: uamqp.c_uamqp.cMessage
"""
if not self._message:
return None
| python | {
"resource": ""
} |
q20385 | Message.accept | train | def accept(self):
"""Send a response disposition to the service to indicate that
a received message has been accepted. If the client is running in PeekLock
mode, the service will wait on this disposition. Otherwise it will
be ignored. Returns `True` is message was accepted, or `False` | python | {
"resource": ""
} |
q20386 | Message.reject | train | def reject(self, condition=None, description=None):
"""Send a response disposition to the service to indicate that
a received message has been rejected. If the client is running in PeekLock
mode, the service will wait on this disposition. Otherwise it will
be ignored. A rejected message will increment the messages delivery count.
Returns `True` is message was rejected, or `False` if the message
was already settled.
:param condition: The AMQP rejection code. By default this is `amqp:internal-error`.
:type condition: bytes or str
:param description: A description/reason to accompany the rejection.
:type description: bytes or str
:rtype: bool
:raises: TypeError | python | {
"resource": ""
} |
q20387 | Message.release | train | def release(self):
"""Send a response disposition to the service to indicate that
a received message has been released. If the client is running in PeekLock
mode, the service will wait on this disposition. Otherwise it will
be ignored. A released message will not incremenet the messages
delivery count. Returns `True` is message was released, or `False` if the message
was already settled.
:rtype: bool
:raises: TypeError if the message is being sent rather than received.
"""
| python | {
"resource": ""
} |
q20388 | Message.modify | train | def modify(self, failed, deliverable, annotations=None):
"""Send a response disposition to the service to indicate that
a received message has been modified. If the client is running in PeekLock
mode, the service will wait on this disposition. Otherwise it will
be ignored. Returns `True` is message was modified, or `False` if the message
was already settled.
:param failed: Whether this delivery of this message failed. This does not
indicate whether subsequence deliveries of this message would also fail.
:type failed: bool
:param deliverable: Whether this message will be deliverable to this client
on subsequent deliveries - i.e. whether delivery is retryable.
:type deliverable: bool
:param annotations: Annotations | python | {
"resource": ""
} |
q20389 | BatchMessage._create_batch_message | train | def _create_batch_message(self):
"""Create a ~uamqp.message.Message for a value supplied by the data
generator. Applies all properties and annotations to the message.
:rtype: ~uamqp.message.Message
| python | {
"resource": ""
} |
q20390 | BatchMessage._multi_message_generator | train | def _multi_message_generator(self):
"""Generate multiple ~uamqp.message.Message objects from a single data
stream that in total may exceed the maximum individual message size.
Data will be continuously added to a single message until that message
reaches a max allowable size, at which point it will be yielded and
a new message will be started.
:rtype: generator[~uamqp.message.Message]
"""
unappended_message_bytes = None
while True:
new_message = self._create_batch_message()
message_size = new_message.get_message_encoded_size() + self.size_offset
body_size = 0
if unappended_message_bytes:
new_message._body.append(unappended_message_bytes) # pylint: disable=protected-access
body_size += len(unappended_message_bytes)
try:
for | python | {
"resource": ""
} |
q20391 | BatchMessage.gather | train | def gather(self):
"""Return all the messages represented by this object. This will convert
the batch data into individual Message objects, which may be one
or more if multi_messages is set to `True`.
:rtype: list[~uamqp.message.Message]
"""
if self._multi_messages:
return self._multi_message_generator()
new_message = self._create_batch_message()
message_size = new_message.get_message_encoded_size() + self.size_offset
body_size = 0
for data in self._body_gen:
message_bytes = None
try:
| python | {
"resource": ""
} |
q20392 | DataBody.append | train | def append(self, data):
"""Append a section to the body.
:param data: The data to append.
:type data: str or bytes
"""
if isinstance(data, six.text_type):
| python | {
"resource": ""
} |
q20393 | ValueBody.set | train | def set(self, value):
"""Set a value as the message body. This can be any
Python data type and it will be automatically encoded
into an AMQP type. If a specific AMQP type is required, a
`types.AMQPType` can be used.
:param data: The data to send in the body. | python | {
"resource": ""
} |
q20394 | CBSAuthMixin.create_authenticator | train | def create_authenticator(self, connection, debug=False, **kwargs):
"""Create the AMQP session and the CBS channel with which
to negotiate the token.
:param connection: The underlying AMQP connection on which
to create the session.
:type connection: ~uamqp.connection.Connection
:param debug: Whether to emit network trace logging events for the
CBS session. Default is `False`. Logging events are set at INFO level.
:type debug: bool
:rtype: uamqp.c_uamqp.CBSTokenAuth
"""
self._connection = connection
self._session = Session(connection, **kwargs)
try:
self._cbs_auth = c_uamqp.CBSTokenAuth(
self.audience,
self.token_type,
self.token,
int(self.expires_at),
self._session._session, # pylint: disable=protected-access
| python | {
"resource": ""
} |
q20395 | CBSAuthMixin.close_authenticator | train | def close_authenticator(self):
"""Close the CBS auth channel and session."""
_logger.info("Shutting down CBS session on connection: %r.", self._connection.container_id)
try:
_logger.debug("Unlocked CBS to close on connection: %r.", self._connection.container_id)
self._cbs_auth.destroy()
_logger.info("Auth closed, destroying session on connection: %r.", | python | {
"resource": ""
} |
q20396 | SASTokenAuth.update_token | train | def update_token(self):
"""If a username and password are present - attempt to use them to
request a fresh SAS token.
"""
if not self.username or not self.password:
raise errors.TokenExpired("Unable to refresh token - no username or password.")
| python | {
"resource": ""
} |
q20397 | SASTokenAuth.from_shared_access_key | train | def from_shared_access_key(
cls,
uri,
key_name,
shared_access_key,
expiry=None,
port=constants.DEFAULT_AMQPS_PORT,
timeout=10,
retry_policy=TokenRetryPolicy(),
verify=None,
http_proxy=None,
encoding='UTF-8'):
"""Attempt to create a CBS token session using a Shared Access Key such
as is used to connect to Azure services.
:param uri: The AMQP endpoint URI. This must be provided as
a decoded string.
:type uri: str
:param key_name: The SAS token username, also referred to as the key
name or policy name.
:type key_name: str
:param shared_access_key: The SAS token password, also referred to as the key.
:type shared_access_key: str
:param expiry: The lifetime in seconds for the generated token. Default is 1 hour.
:type expiry: int
:param port: The TLS port - default for AMQP is 5671.
:type port: int
:param timeout: The timeout in seconds in which to negotiate the token.
The default value is 10 seconds.
:type timeout: int
:param retry_policy: The retry policy for the PUT token request. The default
retry policy has 3 retries.
:type retry_policy: ~uamqp.authentication.cbs_auth.TokenRetryPolicy
:param verify: The path to a user-defined certificate.
:type verify: str
:param http_proxy: HTTP proxy configuration. This should be a dictionary with
the following keys present: 'proxy_hostname' and 'proxy_port'. Additional optional
| python | {
"resource": ""
} |
q20398 | MessageReceiver._state_changed | train | def _state_changed(self, previous_state, new_state):
"""Callback called whenever the underlying Receiver undergoes a change
of state. This function wraps the states as Enums to prepare for
calling the public callback.
:param previous_state: The previous Receiver state.
:type previous_state: int
:param new_state: The new Receiver state.
:type new_state: int
"""
try:
try:
_previous_state = constants.MessageReceiverState(previous_state)
except ValueError:
_previous_state = previous_state
try:
_new_state = constants.MessageReceiverState(new_state)
except ValueError:
_new_state = new_state
if _previous_state == constants.MessageReceiverState.Opening \
and _new_state == constants.MessageReceiverState.Error:
_logger.info("Receiver link failed to open - expecting to receive DETACH frame.")
elif | python | {
"resource": ""
} |
q20399 | MessageReceiver._settle_message | train | def _settle_message(self, message_number, response):
"""Send a settle dispostition for a received message.
:param message_number: The delivery number of the message
to settle.
:type message_number: int
:response: The type of disposition to respond with, e.g. whether
the message was accepted, rejected or abandoned.
:type response: ~uamqp.errors.MessageResponse
"""
if not response or isinstance(response, errors.MessageAlreadySettled):
return
if isinstance(response, errors.MessageAccepted):
self._receiver.settle_accepted_message(message_number)
elif isinstance(response, errors.MessageReleased):
self._receiver.settle_released_message(message_number)
elif isinstance(response, errors.MessageRejected):
self._receiver.settle_rejected_message(
| python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.