_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 31 13.1k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q7800 | NounPhraseChunker._getPOS | train | def _getPOS( self, token, onlyFirst = True ):
''' Returns POS of the current token.
'''
if onlyFirst:
| python | {
"resource": ""
} |
q7801 | divide | train | def divide(elements, by, translate=False, sep=' '):
"""Divide lists `elements` and `by`.
All elements are grouped into N bins, where N denotes the elements in `by` list.
Parameters
----------
elements: list of dict
Elements to be grouped into bins.
by: list of dict
Elements defining the bins.
translate: bool (default: False)
When dividing, also translate start and end positions of elements.
| python | {
"resource": ""
} |
q7802 | Disambiguator.__isListOfTexts | train | def __isListOfTexts(self, docs):
""" Checks whether the input is a list of strings or Text-s;
"""
| python | {
"resource": ""
} |
q7803 | WordnetTagger.tag_text | train | def tag_text(self, text, **kwargs):
"""Annotates `analysis` entries in `corpus` with a list of lemmas` synsets and queried WordNet data in a 'wordnet' entry.
Note
----
Annotates every `analysis` entry with a `wordnet`:{`synsets`:[..]}.
Parameters
----------
text: estnltk.text.Text
Representation of a corpus in a disassembled form for automatic text analysis with word-level `analysis` entry.
E.g. corpus disassembled into paragraphs, sentences, words ({'paragraphs':[{'sentences':[{'words':[{'analysis':{...}},..]},..]},..]}).
pos : boolean, optional
If True, annotates each synset with a correspnding `pos` (part-of-speech) tag.
variants : boolean, optional
If True, annotates each synset with a list of all its variants' (lemmas') literals.
var_sense : boolean, optional
If True and `variants` is True, annotates each variant/lemma with its sense number.
var_definition : boolean, optional
If True and `variants` is True, annotates each variant/lemma with its definition. Definitions often missing in WordNet.
| python | {
"resource": ""
} |
q7804 | get_texts_and_labels | train | def get_texts_and_labels(sentence_chunk):
"""Given a sentence chunk, extract original texts and labels."""
words = sentence_chunk.split('\n')
texts = []
labels = []
for word in words:
word = word.strip()
if | python | {
"resource": ""
} |
q7805 | convert | train | def convert(document):
"""Convert a document to a Text object"""
raw_tokens = []
curpos = 0
text_spans = []
all_labels = []
sent_spans = []
word_texts = []
for sentence in document:
startpos = curpos
for idx, (text, label) in enumerate(sentence):
raw_tokens.append(text)
word_texts.append(text)
all_labels.append(label)
text_spans.append((curpos, curpos+len(text)))
curpos += len(text)
if idx < len(sentence) - 1:
raw_tokens.append(' ')
| python | {
"resource": ""
} |
q7806 | TransactionClass.select | train | def select(self, cb=None):
'''
Set this channel to use transactions.
'''
if not self._enabled:
self._enabled = True
| python | {
"resource": ""
} |
q7807 | TransactionClass.commit | train | def commit(self, cb=None):
'''
Commit the current transaction. Caller can specify a callback to use
when the transaction is committed.
'''
# Could call select() but spec 1.9.2.3 says to raise an exception
if not self.enabled:
raise self.TransactionsNotEnabled()
| python | {
"resource": ""
} |
q7808 | TransactionClass.rollback | train | def rollback(self, cb=None):
'''
Abandon all message publications and acks in the current transaction.
Caller can specify a callback to use when the transaction has been
aborted.
'''
# Could call select() but spec 1.9.2.5 says to raise an exception
if not self.enabled: | python | {
"resource": ""
} |
q7809 | Connection.synchronous | train | def synchronous(self):
'''
True if transport is synchronous or the connection has been forced
into synchronous mode, False otherwise.
'''
if self._transport is None:
if self._close_info and len(self._close_info['reply_text']) > 0:
raise ConnectionClosed("connection is closed: %s : %s" %
(self._close_info['reply_code'],
| python | {
"resource": ""
} |
q7810 | Connection.connect | train | def connect(self, host, port):
'''
Connect to a host and port.
'''
# Clear the connect state immediately since we're no longer connected
# at this point.
self._connected = False
# Only after the socket has connected do we clear this state; closed
# must be False so that writes can be buffered in writePacket(). The
# closed state might have been set to True due to a socket error or a
# redirect.
self._host = "%s:%d" % (host, port)
self._closed = False
self._close_info = {
'reply_code': 0,
'reply_text': 'failed to connect to %s' % (self._host),
'class_id': 0,
'method_id': 0
}
self._transport.connect((host, port))
self._transport.write(PROTOCOL_HEADER)
self._last_octet_time = time.time()
if self._synchronous_connect:
# Have to queue this callback just after connect, it can't go
# into the constructor because the channel needs to be
# "always there" for frame processing, but the synchronous
# callback can't be added until after the protocol header has
# been written. This SHOULD be registered before the protocol
# header is written, in the case where the header bytes are
# written, but this thread/greenlet/context does not return until
# | python | {
"resource": ""
} |
q7811 | Connection.disconnect | train | def disconnect(self):
'''
Disconnect from the current host, but do not update the closed state.
After the transport is disconnected, the closed state will be True if
this is called after a protocol shutdown, or False if the disconnect
was in error.
TODO: do we really need closed vs. connected states? this only adds
complication and the whole reconnect process has been scrapped anyway.
'''
self._connected = False
if self._transport is not None:
try:
| python | {
"resource": ""
} |
q7812 | Connection._next_channel_id | train | def _next_channel_id(self):
'''Return the next possible channel id. Is a circular enumeration.'''
self._channel_counter += 1
if | python | {
"resource": ""
} |
q7813 | Connection.channel | train | def channel(self, channel_id=None, synchronous=False):
"""
Fetch a Channel object identified by the numeric channel_id, or
create that object if it doesn't already exist. If channel_id is not
None but no channel exists for that id, will raise InvalidChannel. If
there are already too many channels open, will raise TooManyChannels.
If synchronous=True, then the channel will act synchronous in all cases
where a protocol method supports `nowait=False`, or where there is an
implied callback in the protocol.
"""
if channel_id is None:
# adjust for channel 0
if len(self._channels) - 1 >= self._channel_max:
raise Connection.TooManyChannels(
"%d channels already open, max %d",
len(self._channels) - 1,
self._channel_max)
channel_id = self._next_channel_id()
while channel_id in self._channels:
| python | {
"resource": ""
} |
q7814 | Connection.read_frames | train | def read_frames(self):
'''
Read frames from the transport and process them. Some transports may
choose to do this in the background, in several threads, and so on.
'''
# It's possible in a concurrent environment that our transport handle
# has gone away, so handle that cleanly.
# TODO: Consider moving this block into Translator base class. In many
# ways it belongs there. One of the problems though is that this is
# essentially the read loop. Each Transport has different rules for
# how to kick this off, and in the case of gevent, this is how a
# blocking call to read from the socket is kicked off.
if self._transport is None:
return
# Send a heartbeat (if needed)
self._channels[0].send_heartbeat()
data = self._transport.read(self._heartbeat)
current_time = time.time()
if data is None:
# Wait for 2 heartbeat intervals before giving up. See AMQP 4.2.7:
# "If a peer detects no incoming traffic (i.e. received octets) for two heartbeat intervals or longer,
# it should close the connection"
if self._heartbeat and (current_time-self._last_octet_time > 2*self._heartbeat):
msg = 'Heartbeats not received from %s for %d seconds' % (self._host, 2*self._heartbeat)
self.transport_closed(msg=msg)
raise ConnectionClosed('Connection is closed: ' + msg)
return
self._last_octet_time = current_time
reader = Reader(data)
p_channels = set()
try:
for frame in Frame.read_frames(reader):
if self._debug > 1:
self.logger.debug("READ: %s", frame)
self._frames_read += 1
ch = self.channel(frame.channel_id)
ch.buffer_frame(frame)
p_channels.add(ch)
except Frame.FrameError as e:
# Frame error in the peer, disconnect
self.close(reply_code=501,
| python | {
"resource": ""
} |
q7815 | Connection._flush_buffered_frames | train | def _flush_buffered_frames(self):
'''
Callback when protocol has been initialized on channel 0 and we're
ready to send out frames to set up any channels that have been
created.
'''
# In the rare case (a bug) where this is called but send_frame thinks
| python | {
"resource": ""
} |
q7816 | Connection.send_frame | train | def send_frame(self, frame):
'''
Send a single frame. If there is no transport or we're not connected
yet, append to the output buffer, else send immediately to the socket.
This is called from within the MethodFrames.
'''
if self._closed:
if self._close_info and len(self._close_info['reply_text']) > 0:
raise ConnectionClosed("connection is closed: %s : %s" %
(self._close_info['reply_code'],
self._close_info['reply_text']))
raise ConnectionClosed("connection is closed")
if self._transport is None or \
(not self._connected and frame.channel_id != 0):
self._output_frame_buffer.append(frame)
return
if self._debug > 1:
self.logger.debug("WRITE: %s", frame)
buf = bytearray()
frame.write_frame(buf)
if len(buf) > self._frame_max:
self.close(
| python | {
"resource": ""
} |
q7817 | ConnectionChannel.dispatch | train | def dispatch(self, frame):
'''
Override the default dispatch since we don't need the rest of
the stack.
'''
if frame.type() == HeartbeatFrame.type():
self.send_heartbeat()
elif frame.type() == MethodFrame.type():
if frame.class_id == 10:
cb = self._method_map.get(frame.method_id)
if cb:
method = self.clear_synchronous_cb(cb)
method(frame)
else:
raise Channel.InvalidMethod(
"unsupported method %d on channel %d",
frame.method_id, self.channel_id)
else:
| python | {
"resource": ""
} |
q7818 | ConnectionChannel.send_heartbeat | train | def send_heartbeat(self):
'''
Send a heartbeat if needed. Tracks last heartbeat send time.
'''
# Note that this does not take into account the time that we last
# sent a frame. Hearbeats are so small the effect should be quite
# limited. Also note that we're looking for something near to our
# scheduled interval, because if this is exact, then we'll likely
# actually send a heartbeat at twice the period, which could cause
# a broker to kill the connection if the period is large enough. The
# 90% bound is arbitrary but seems a sensible enough default.
| python | {
"resource": ""
} |
q7819 | ConnectionChannel._send_start_ok | train | def _send_start_ok(self):
'''Send the start_ok message.'''
args = Writer()
args.write_table(self.connection._properties)
args.write_shortstr(self.connection._login_method)
| python | {
"resource": ""
} |
q7820 | ExchangeClass._cleanup | train | def _cleanup(self):
'''
Cleanup local data.
'''
self._declare_cb = None
| python | {
"resource": ""
} |
q7821 | ExchangeClass.delete | train | def delete(self, exchange, if_unused=False, nowait=True, ticket=None,
cb=None):
'''
Delete an exchange.
'''
nowait = nowait and self.allow_nowait() and not cb
args = Writer()
args.write_short(ticket or self.default_ticket).\
write_shortstr(exchange).\
| python | {
"resource": ""
} |
q7822 | SocketTransport.connect | train | def connect(self, (host, port), klass=socket.socket):
'''Connect assuming a host and port tuple.
:param tuple: A tuple containing host and port for a connection.
:param klass: A implementation of socket.socket.
:raises socket.gaierror: If no address can be resolved.
:raises socket.error: If no connection can be made.
'''
self._host = "%s:%s" % (host, port)
for info in socket.getaddrinfo(host, port, 0, 0, socket.IPPROTO_TCP):
family, socktype, proto, _, sockaddr = info
| python | {
"resource": ""
} |
q7823 | SocketTransport.read | train | def read(self, timeout=None):
'''
Read from the transport. If timeout>0, will only block for `timeout`
seconds.
'''
e = None
if not hasattr(self, '_sock'):
return None
try:
# Note that we ignore both None and 0, i.e. we either block with a
# timeout or block completely and let gevent sort it out.
if timeout:
self._sock.settimeout(timeout)
else:
self._sock.settimeout(None)
data = self._sock.recv(
self._sock.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF))
if len(data):
if self.connection.debug > 1:
self.connection.logger.debug(
'read %d bytes from %s' % (len(data), self._host))
if len(self._buffer):
self._buffer.extend(data)
data = self._buffer
self._buffer = bytearray()
return data
| python | {
"resource": ""
} |
q7824 | BasicClass.set_return_listener | train | def set_return_listener(self, cb):
'''
Set a callback for basic.return listening. Will be called with a single
Message argument.
The return_info attribute of the Message will have the following
properties:
'channel': Channel instance
'reply_code': reply code (int)
'reply_text': reply text
'exchange': exchange name
'routing_key': routing key
RabbitMQ NOTE: if the channel was in confirmation mode when the message
was published, then basic.return will still be followed by basic.ack
| python | {
"resource": ""
} |
q7825 | BasicClass.qos | train | def qos(self, prefetch_size=0, prefetch_count=0, is_global=False):
'''
Set QoS on this channel.
'''
args = Writer()
args.write_long(prefetch_size).\
| python | {
"resource": ""
} |
q7826 | BasicClass.consume | train | def consume(self, queue, consumer, consumer_tag='', no_local=False,
no_ack=True, exclusive=False, nowait=True, ticket=None,
cb=None):
'''
Start a queue consumer. If `cb` is supplied, will be called when
broker confirms that consumer is registered.
'''
nowait = nowait and self.allow_nowait() and not cb
if nowait and consumer_tag == '':
consumer_tag = self._generate_consumer_tag()
args = Writer()
args.write_short(ticket or self.default_ticket).\
write_shortstr(queue).\
write_shortstr(consumer_tag).\
| python | {
"resource": ""
} |
q7827 | BasicClass._lookup_consumer_tag_by_consumer | train | def _lookup_consumer_tag_by_consumer(self, consumer):
'''Look up consumer tag given its consumer function
NOTE: this protected method may be called by derived classes
:param callable consumer: consumer function
:returns: matching consumer tag or None
| python | {
"resource": ""
} |
q7828 | BasicClass._purge_consumer_by_tag | train | def _purge_consumer_by_tag(self, consumer_tag):
'''Purge consumer entry from this basic instance
NOTE: this protected method may be called by derived classes
:param str consumer_tag:
| python | {
"resource": ""
} |
q7829 | BasicClass.publish | train | def publish(self, msg, exchange, routing_key, mandatory=False,
immediate=False, ticket=None):
'''
publish a message.
'''
args = Writer()
args.write_short(ticket or self.default_ticket).\
write_shortstr(exchange).\
write_shortstr(routing_key).\
write_bits(mandatory, immediate)
| python | {
"resource": ""
} |
q7830 | BasicClass.return_msg | train | def return_msg(self, reply_code, reply_text, exchange, routing_key):
'''
Return a failed message. Not named "return" because python interpreter
can't deal with that.
'''
args = Writer()
args.write_short(reply_code).\
| python | {
"resource": ""
} |
q7831 | BasicClass.get | train | def get(self, queue, consumer=None, no_ack=True, ticket=None):
'''
Ask to fetch a single message from a queue. If a consumer is supplied,
the consumer will be called with either a Message argument, or None if
there is no message in queue. If a synchronous transport, | python | {
"resource": ""
} |
q7832 | BasicClass._recv_get_response | train | def _recv_get_response(self, method_frame):
'''
Handle either get_ok or get_empty. This is a hack because the
synchronous callback stack is expecting one method to satisfy the
expectation. To keep that loop as tight as possible, work within
those constraints. Use of get is not recommended anyway.
''' | python | {
"resource": ""
} |
q7833 | BasicClass.ack | train | def ack(self, delivery_tag, multiple=False):
'''
Acknowledge delivery of a message. If multiple=True, acknowledge up-to
| python | {
"resource": ""
} |
q7834 | BasicClass.reject | train | def reject(self, delivery_tag, requeue=False):
'''
Reject a message.
'''
args = Writer()
args.write_longlong(delivery_tag).\
| python | {
"resource": ""
} |
q7835 | BasicClass.recover_async | train | def recover_async(self, requeue=False):
'''
Redeliver all unacknowledged messages on this channel.
This method is deprecated in | python | {
"resource": ""
} |
q7836 | BasicClass.recover | train | def recover(self, requeue=False, cb=None):
'''
Ask server to redeliver all unacknowledged messages.
'''
args = Writer()
args.write_bit(requeue)
# The XML spec is incorrect; this method is always synchronous
# http://lists.rabbitmq.com/pipermail/rabbitmq-discuss/2011-January/010738.html | python | {
"resource": ""
} |
q7837 | BasicClass._read_msg | train | def _read_msg(self, method_frame, with_consumer_tag=False,
with_message_count=False):
'''
Support method to read a Message from the current frame buffer.
Will return a Message, or re-queue current frames and raise a
FrameUnderflow. Takes an optional argument on whether to read the
consumer tag so it can be used for both deliver and get-ok.
'''
header_frame, body = self._reap_msg_frames(method_frame)
if with_consumer_tag:
consumer_tag = method_frame.args.read_shortstr()
delivery_tag = method_frame.args.read_longlong()
redelivered = method_frame.args.read_bit()
exchange = method_frame.args.read_shortstr()
routing_key = method_frame.args.read_shortstr()
if with_message_count:
message_count = method_frame.args.read_long()
| python | {
"resource": ""
} |
q7838 | ChannelPool.publish | train | def publish(self, *args, **kwargs):
'''
Publish a message. Caller can supply an optional callback which will
be fired when the transaction is committed. Tries very hard to avoid
closed and inactive channels, but a ChannelError or ConnectionError
may still be raised.
'''
user_cb = kwargs.pop('cb', None)
# If the first channel we grab is inactive, continue fetching until
# we get an active channel, then put the inactive channels back in
# the pool. Try to keep the overhead to a minimum.
channel = self._get_channel()
if channel and not channel.active:
inactive_channels = set()
while channel and not channel.active:
inactive_channels.add(channel)
channel = self._get_channel()
self._free_channels.update(inactive_channels)
# When the transaction is committed, add the channel back to the pool
# and call any user-defined callbacks. If there is anything in queue,
# pop it and call back to publish(). Only do so if the channel is
# still active though, because | python | {
"resource": ""
} |
q7839 | ChannelPool._process_queue | train | def _process_queue(self):
'''
If there are any message in the queue, process one of them. | python | {
"resource": ""
} |
q7840 | ChannelPool._get_channel | train | def _get_channel(self):
'''
Fetch a channel from the pool. Will return a new one if necessary. If
a channel in the free pool is closed, will remove it. Will return None
if we hit the cap. Will clean up any channels that were published to
but closed due to error.
'''
while len(self._free_channels):
rval = self._free_channels.pop()
if not rval.closed:
return rval
# don't adjust _channels value because the callback will do that
| python | {
"resource": ""
} |
q7841 | ContentFrame.create_frames | train | def create_frames(self, channel_id, buf, frame_max):
'''
A generator which will create frames from a buffer given a max
frame size.
'''
size = frame_max - 8 # 8 bytes overhead for frame header and footer
offset = 0
while True:
| python | {
"resource": ""
} |
q7842 | EventTransport.connect | train | def connect(self, (host, port)):
'''
Connect assuming a host and port tuple. Implemented as non-blocking,
and will close the transport if there's an error
'''
self._host = "%s:%s" % (host, port)
self._sock = EventSocket(
read_cb=self._sock_read_cb,
close_cb=self._sock_close_cb,
error_cb=self._sock_error_cb,
debug=self.connection.debug,
logger=self.connection.logger)
if self.connection._sock_opts:
for k, v in self.connection._sock_opts.iteritems():
| python | {
"resource": ""
} |
q7843 | EventTransport.read | train | def read(self, timeout=None):
'''
Read from the transport. If no data is available, should return None.
The timeout is ignored as this returns only data that has already
been buffered locally.
'''
# NOTE: copying over this comment from Connection, because there is
# knowledge captured here, even if the details are stale
# Because of the timer callback to dataRead when we re-buffered,
# there's a chance that in between we've lost the socket. If that's
# the case, just silently return as some code elsewhere would have
| python | {
"resource": ""
} |
q7844 | HeaderFrame.parse | train | def parse(self, channel_id, payload):
'''
Parse a header frame for a channel given a Reader payload.
'''
class_id = payload.read_short()
weight = payload.read_short()
size = payload.read_longlong()
properties = {}
# The AMQP spec is overly-complex when it comes to handling header
# frames. The spec says that in addition to the first 16bit field,
# additional ones can follow which /may/ then be in the property list
# (because bit flags aren't in the list). Properly implementing custom
# values requires the ability change the properties and their types,
# which someone is welcome to do, but seriously, what's the point?
# Because the complexity of parsing and writing this frame directly
# impacts the speed at which messages can be processed, there are two
# branches for both a fast parse which assumes no changes to the
# properties and a slow parse. For now it's up to someone using custom
# headers to flip the flag.
if self.DEFAULT_PROPERTIES:
flag_bits = payload.read_short()
for key, proptype, rfunc, wfunc, mask in self.PROPERTIES:
if flag_bits & mask:
| python | {
"resource": ""
} |
q7845 | Frame.read_frames | train | def read_frames(cls, reader):
'''
Read one or more frames from an IO stream. Buffer must support file
object interface.
After reading, caller will need to check if there are bytes remaining
in the stream. If there are, then that implies that there is one or
more incomplete frames and more data needs to be read. The position
of the cursor in the frame stream will mark the point at which the
last good frame was read. If the caller is expecting a sequence of
frames and only received a part of that sequence, they are responsible
for buffering those frames until the rest of the frames in the sequence
have arrived.
'''
rval = deque()
while True:
frame_start_pos = reader.tell()
try:
frame = Frame._read_frame(reader)
except Reader.BufferUnderflow:
| python | {
"resource": ""
} |
q7846 | Frame._read_frame | train | def _read_frame(cls, reader):
'''
Read a single frame from a Reader. Will return None if there is an
incomplete frame in the stream.
Raise MissingFooter if there's a problem reading the footer byte.
'''
frame_type = reader.read_octet()
channel_id = reader.read_short()
size = reader.read_long()
payload = Reader(reader, reader.tell(), size)
# Seek to end of payload
reader.seek(size, 1)
ch = reader.read_octet() # footer
if ch != 0xce:
raise Frame.FormatError(
| python | {
"resource": ""
} |
q7847 | RabbitExchangeClass.unbind | train | def unbind(self, exchange, source, routing_key='', nowait=True,
arguments={}, ticket=None, cb=None):
'''
Unbind an exchange from another.
'''
nowait = nowait and self.allow_nowait() and not cb
args = Writer()
args.write_short(ticket or self.default_ticket).\
| python | {
"resource": ""
} |
q7848 | RabbitBasicClass.publish | train | def publish(self, *args, **kwargs):
'''
Publish a message. Will return the id of the message if publisher
confirmations are enabled, else will return 0.
'''
if self.channel.confirm._enabled:
| python | {
"resource": ""
} |
q7849 | RabbitBasicClass._recv_ack | train | def _recv_ack(self, method_frame):
'''Receive an ack from the broker.'''
if self._ack_listener:
delivery_tag = method_frame.args.read_longlong()
multiple = method_frame.args.read_bit()
if multiple:
while self._last_ack_id < delivery_tag:
self._last_ack_id += 1
| python | {
"resource": ""
} |
q7850 | RabbitBasicClass.nack | train | def nack(self, delivery_tag, multiple=False, requeue=False):
'''Send a nack to the broker.'''
args = Writer()
args.write_longlong(delivery_tag).\
| python | {
"resource": ""
} |
q7851 | RabbitBasicClass._recv_nack | train | def _recv_nack(self, method_frame):
'''Receive a nack from the broker.'''
if self._nack_listener:
delivery_tag = method_frame.args.read_longlong()
multiple, requeue = method_frame.args.read_bits(2)
if multiple:
while self._last_ack_id < delivery_tag:
self._last_ack_id += 1
| python | {
"resource": ""
} |
q7852 | RabbitBasicClass._recv_cancel | train | def _recv_cancel(self, method_frame):
'''Handle Basic.Cancel from broker
:param MethodFrame method_frame: Basic.Cancel method frame from broker
'''
self.logger.warning("consumer cancelled by broker: %r", method_frame)
consumer_tag = method_frame.args.read_shortstr()
# NOTE: per RabbitMQ spec, no-wait is always true in Basic.Cancel from
# broker
# Remove consumer from this basic instance
try:
cancel_cb = self._broker_cancel_cb_map.pop(consumer_tag)
except KeyError:
# Must be a race condition between user's cancel and broker's cancel
| python | {
"resource": ""
} |
q7853 | RabbitConfirmClass.select | train | def select(self, nowait=True, cb=None):
'''
Set this channel to use publisher confirmations.
'''
nowait = nowait and self.allow_nowait() and not cb
if not self._enabled:
self._enabled = True
| python | {
"resource": ""
} |
q7854 | Channel.close | train | def close(self, reply_code=0, reply_text='', class_id=0, method_id=0):
'''
Close this channel. Routes to channel.close.
'''
# In the off chance that we call this twice. A good example is if
# there's an error in close listeners and so we're still inside a
# single call to | python | {
"resource": ""
} |
q7855 | Channel.publish_synchronous | train | def publish_synchronous(self, *args, **kwargs):
'''
Helper for publishing a message using transactions. If 'cb' keyword
arg | python | {
"resource": ""
} |
q7856 | Channel.dispatch | train | def dispatch(self, method_frame):
'''
Dispatch a method.
'''
klass = self._class_map.get(method_frame.class_id)
if klass:
klass.dispatch(method_frame)
else:
| python | {
"resource": ""
} |
q7857 | Channel.process_frames | train | def process_frames(self):
'''
Process the input buffer.
'''
while len(self._frame_buffer):
# It would make sense to call next_frame, but it's
# technically faster to repeat the code here.
frame = self._frame_buffer.popleft()
if self._emergency_close_pending:
# Implement stability rule from AMQP 0.9.1 section 1.5.2.5.
# Method channel.close: "After sending this method, any
# received methods except Close and Close-OK MUST be discarded."
#
# NOTE: presently, we limit our implementation of the rule to
# the "emergency close" scenario to avoid potential adverse
# side-effect during normal user-initiated close
if (not isinstance(frame, MethodFrame) or
frame.class_id != self.channel.CLASS_ID or
frame.method_id not in (self.channel.CLOSE_METHOD_ID,
self.channel.CLOSE_OK_METHOD_ID)):
self.logger.warn("Emergency channel close: dropping input "
"frame %.255s", frame)
continue
try:
self.dispatch(frame)
except ProtocolClass.FrameUnderflow:
return
except (ConnectionClosed, ChannelClosed):
# Immediately raise if connection or channel is closed
raise
except Exception:
self.logger.exception(
"Closing on failed dispatch of frame %.255s", frame)
# | python | {
"resource": ""
} |
q7858 | Channel.send_frame | train | def send_frame(self, frame):
'''
Queue a frame for sending. Will send immediately if there are no
pending synchronous transactions on this connection.
'''
if self.closed:
if self.close_info and len(self.close_info['reply_text']) > 0:
raise ChannelClosed(
"channel %d is closed: %s : %s",
self.channel_id,
self.close_info['reply_code'],
self.close_info['reply_text'])
raise ChannelClosed()
# If there's any pending event at all, then it means that when the
# current dispatch loop started, all possible frames were flushed
# and the remaining item(s) starts with a sync callback. | python | {
"resource": ""
} |
q7859 | Channel.add_synchronous_cb | train | def add_synchronous_cb(self, cb):
'''
Add an expectation of a callback to release a synchronous transaction.
'''
if self.connection.synchronous or self._synchronous:
wrapper = SyncWrapper(cb)
self._pending_events.append(wrapper)
while wrapper._read:
# Don't check that the channel has been closed until after
# reading frames, in the case that this is processing a clean
# channel closed. If there's a protocol error during
# read_frames, this will loop back around and result in a
# channel closed exception.
if self.closed:
if self.close_info and \
len(self.close_info['reply_text']) > 0:
| python | {
"resource": ""
} |
q7860 | Channel.clear_synchronous_cb | train | def clear_synchronous_cb(self, cb):
'''
If the callback is the current expected callback, will clear it off the
stack. Else will raise in exception if there's an expectation but this
doesn't satisfy it.
'''
if len(self._pending_events):
ev = self._pending_events[0]
# We can't have a strict check using this simple mechanism,
# because we could be waiting for a synch response while messages
# are being published. So for now, if it's not in the list, do a
# check to see if the callback is in the pending list, and if so,
# then raise, because it means we received stuff | python | {
"resource": ""
} |
q7861 | Channel._flush_pending_events | train | def _flush_pending_events(self):
'''
Send pending frames that are in the event queue.
'''
while len(self._pending_events) and \
| python | {
"resource": ""
} |
q7862 | Channel._closed_cb | train | def _closed_cb(self, final_frame=None):
'''
"Private" callback from the ChannelClass when a channel is closed. Only
called after broker initiated close, or we receive a close_ok. Caller
has the option to send a final frame, to be used to bypass any
synchronous or otherwise-pending frames so that the channel can be
cleanly closed.
'''
# delete all pending data and send final frame if thre is one. note
# that it bypasses send_frame so that even if the closed state is set,
# the frame is published.
if final_frame:
self._connection.send_frame(final_frame)
try:
| python | {
"resource": ""
} |
q7863 | GeventTransport.connect | train | def connect(self, (host, port)):
'''
Connect using a host,port tuple
'''
| python | {
"resource": ""
} |
q7864 | GeventTransport.read | train | def read(self, timeout=None):
'''
Read from the transport. If no data is available, should return None.
If timeout>0, will only block for `timeout` seconds.
'''
# If currently locked, another greenlet is trying to read, so yield
# control and then return none. Required if a Connection is configured
# to be synchronous, a sync callback is trying to read, and there's
# another read loop running read_frames. Without it, the run loop will
# release the lock but then immediately acquire it again. Yielding
# control in the reading thread after bytes are read won't fix
# anything, because it's quite possible the bytes read resulted in a
# frame that satisfied the synchronous callback, and so this needs to
| python | {
"resource": ""
} |
q7865 | ProtocolClass.dispatch | train | def dispatch(self, method_frame):
'''
Dispatch a method for this protocol.
'''
method = self.dispatch_map.get(method_frame.method_id)
if method:
callback = self.channel.clear_synchronous_cb(method)
| python | {
"resource": ""
} |
q7866 | Reader.seek | train | def seek(self, offset, whence=0):
'''
Simple seek. Follows standard interface.
'''
if whence == 0:
self._pos = self._start_pos + offset
elif whence == 1:
| python | {
"resource": ""
} |
q7867 | Reader._check_underflow | train | def _check_underflow(self, n):
'''
Raise BufferUnderflow if there's not enough bytes to | python | {
"resource": ""
} |
q7868 | Reader.buffer | train | def buffer(self):
'''
Get a copy of the buffer that this is reading from. Returns a
buffer object
| python | {
"resource": ""
} |
q7869 | Reader.read_bit | train | def read_bit(self):
"""
Read a single boolean value, returns 0 or 1. Convience for single
bit fields.
Will raise BufferUnderflow if there's not enough bytes in the buffer.
"""
# Perform a faster check on underflow
| python | {
"resource": ""
} |
q7870 | Reader.read_bits | train | def read_bits(self, num):
'''
Read several bits packed into the same field. Will return as a list.
The bit field itself is little-endian, though the order of the
returned array looks big-endian for ease of decomposition.
Reader('\x02').read_bits(2) -> [False,True]
Reader('\x08').read_bits(2) ->
[False,True,False,False,False,False,False,False]
first_field, second_field = Reader('\x02').read_bits(2)
Will raise BufferUnderflow if there's not enough bytes in the buffer.
Will raise ValueError if num < 0 or num > 9
'''
# Perform a faster check | python | {
"resource": ""
} |
q7871 | Reader._read_field | train | def _read_field(self):
'''
Read a single byte for field type, then read the value.
'''
ftype = self._input[self._pos]
| python | {
"resource": ""
} |
q7872 | ChannelClass.open | train | def open(self):
'''
Open the channel for communication.
'''
args = Writer()
| python | {
"resource": ""
} |
q7873 | ChannelClass._send_flow | train | def _send_flow(self, active):
'''
Send a flow control command.
'''
args = Writer()
| python | {
"resource": ""
} |
q7874 | ChannelClass._recv_flow | train | def _recv_flow(self, method_frame):
'''
Receive a flow control command from the broker
'''
self.channel._active = method_frame.args.read_bit()
args = Writer()
| python | {
"resource": ""
} |
q7875 | ChannelClass._recv_flow_ok | train | def _recv_flow_ok(self, method_frame):
'''
Receive a flow control ack from the broker.
'''
self.channel._active | python | {
"resource": ""
} |
q7876 | ChannelClass.close | train | def close(self, reply_code=0, reply_text='', class_id=0, method_id=0):
'''
Close this channel. Caller has the option of specifying the reason for
closure and the class and method ids of the current frame in which an
error occurred. If in the event of an exception, the channel will be
marked as immediately closed. If channel is already closed, call is
ignored.
'''
if not getattr(self, 'channel', None) or self.channel._closed:
return
self.channel._close_info = {
'reply_code': reply_code,
'reply_text': reply_text,
'class_id': class_id,
'method_id': method_id
}
# exceptions here likely due to race condition as connection is closing
# cap the reply_text we send because it may be arbitrarily long
try:
args = Writer()
args.write_short(reply_code)
args.write_shortstr(reply_text[:255])
args.write_short(class_id)
| python | {
"resource": ""
} |
q7877 | ChannelClass._recv_close | train | def _recv_close(self, method_frame):
'''
Receive a close command from the broker.
'''
self.channel._close_info = {
'reply_code': method_frame.args.read_short(),
'reply_text': method_frame.args.read_shortstr(),
'class_id': method_frame.args.read_short(),
| python | {
"resource": ""
} |
q7878 | ChannelClass._recv_close_ok | train | def _recv_close_ok(self, method_frame):
'''
Receive a close ack from the broker.
'''
| python | {
"resource": ""
} |
q7879 | QueueClass.bind | train | def bind(self, queue, exchange, routing_key='', nowait=True, arguments={},
ticket=None, cb=None):
'''
bind to a queue.
'''
nowait = nowait and self.allow_nowait() and not cb
args = Writer()
args.write_short(ticket or self.default_ticket).\
| python | {
"resource": ""
} |
q7880 | QueueClass.unbind | train | def unbind(self, queue, exchange, routing_key='', arguments={},
ticket=None, cb=None):
'''
Unbind a queue from an exchange. This is always synchronous.
'''
args = Writer()
args.write_short(ticket or self.default_ticket).\
write_shortstr(queue).\
write_shortstr(exchange).\
| python | {
"resource": ""
} |
q7881 | QueueClass.purge | train | def purge(self, queue, nowait=True, ticket=None, cb=None):
'''
Purge all messages in a queue.
'''
nowait = nowait and self.allow_nowait() and not cb
args = Writer()
args.write_short(ticket or self.default_ticket).\
write_shortstr(queue).\
write_bit(nowait)
| python | {
"resource": ""
} |
q7882 | Writer.write_bits | train | def write_bits(self, *args):
'''
Write multiple bits in a single byte field. The bits will be written in
little-endian order, but should be supplied in big endian order. Will
raise ValueError when more than 8 arguments are supplied.
| python | {
"resource": ""
} |
q7883 | Writer.write_bit | train | def write_bit(self, b, pack=Struct('B').pack):
'''
Write a single bit. Convenience method for single bit args.
'''
| python | {
"resource": ""
} |
q7884 | Writer.write_short_at | train | def write_short_at(self, n, pos, pack_into=Struct('>H').pack_into):
'''
Write an unsigned 16bit value at a specific position in the buffer.
Used for writing tables and frames.
'''
if 0 <= n <= 0xFFFF: | python | {
"resource": ""
} |
q7885 | Writer.write_long | train | def write_long(self, n, pack=Struct('>I').pack):
"""
Write an integer as an unsigned 32-bit value.
"""
| python | {
"resource": ""
} |
q7886 | Writer.write_long_at | train | def write_long_at(self, n, pos, pack_into=Struct('>I').pack_into):
'''
Write an unsigned 32bit value at a specific position in the buffer.
Used for writing tables and frames.
'''
if 0 <= n <= 0xFFFFFFFF:
| python | {
"resource": ""
} |
q7887 | Writer.write_shortstr | train | def write_shortstr(self, s):
"""
Write a string up to 255 bytes long after encoding. If passed
a unicode string, encode as UTF-8.
"""
if isinstance(s, unicode):
| python | {
"resource": ""
} |
q7888 | Writer.write_timestamp | train | def write_timestamp(self, t, pack=Struct('>Q').pack):
"""
Write out a Python datetime.datetime object as a 64-bit integer
representing seconds since the Unix UTC epoch.
"""
| python | {
"resource": ""
} |
q7889 | RTMPPacket.body | train | def body(self):
"""The body of the packet."""
view = | python | {
"resource": ""
} |
q7890 | add_log_callback | train | def add_log_callback(callback):
"""Adds a log callback."""
global _log_callbacks
| python | {
"resource": ""
} |
q7891 | RTMPStream.read | train | def read(self, size):
"""Attempts to read data from the stream.
:param size: int, The maximum amount of bytes to read.
Raises :exc:`IOError` on error.
"""
# If enabled tell the server that our buffer can fit the whole
# stream, this often increases throughput alot.
if self._update_buffer and not self._updated_buffer and | python | {
"resource": ""
} |
q7892 | RTMPStream.write | train | def write(self, data):
"""Writes data to the stream.
:param data: bytes, FLV data to write to the stream
The data passed can contain multiple FLV tags, but it MUST
always contain complete tags or undefined behaviour might
occur.
Raises :exc:`IOError` on error.
"""
if isinstance(data, bytearray):
data = bytes(data)
if not isinstance(data, byte_types):
| python | {
"resource": ""
} |
q7893 | RTMPStream.pause | train | def pause(self):
"""Pauses the stream."""
res = librtmp.RTMP_Pause(self.client.rtmp, 1)
| python | {
"resource": ""
} |
q7894 | RTMPStream.unpause | train | def unpause(self):
"""Unpauses the stream."""
res = librtmp.RTMP_Pause(self.client.rtmp, 0)
| python | {
"resource": ""
} |
q7895 | RTMPStream.seek | train | def seek(self, time):
"""Attempts to seek in the stream.
:param time: int, Time to seek to in seconds
"""
| python | {
"resource": ""
} |
q7896 | RTMP.set_option | train | def set_option(self, key, value):
"""Sets a option for this session.
For a detailed list of available options see the librtmp(3) man page.
:param key: str, A valid option key.
:param value: A value, anything that can be converted to | python | {
"resource": ""
} |
q7897 | RTMP.setup_url | train | def setup_url(self, url):
r"""Attempt to parse a RTMP URL.
Additional options may be specified by appending space-separated
key=value pairs to the URL. Special characters in values may need
to be escaped to prevent misinterpretation by the option parser.
The escape encoding uses a backslash followed by two hexadecimal
digits representing the ASCII value of the character. E.g., spaces
must be escaped as `\\20` and backslashes | python | {
"resource": ""
} |
q7898 | RTMP.read_packet | train | def read_packet(self):
"""Reads a RTMP packet from the server.
Returns a :class:`RTMPPacket`.
Raises :exc:`RTMPError` on error.
Raises :exc:`RTMPTimeoutError` on timeout.
Usage::
>>> packet = conn.read_packet()
>>> packet.body
b'packet body ...'
"""
packet = ffi.new("RTMPPacket*")
packet_complete = False
while not packet_complete:
res = librtmp.RTMP_ReadPacket(self.rtmp, packet)
if res < 1:
| python | {
"resource": ""
} |
q7899 | RTMP.send_packet | train | def send_packet(self, packet, queue=True):
"""Sends a RTMP packet to the server.
:param packet: RTMPPacket, the packet to send to the server.
:param queue: bool, If True, queue up the packet in a internal queue | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.