text
stringlengths
81
112k
Receive a message from a declared queue by name. :returns: A :class:`Message` object if a message was received, ``None`` otherwise. If ``None`` was returned, it probably means there was no messages waiting on the queue. def get(self, queue, no_ack=False): """Receive a message from a declared queue by name. :returns: A :class:`Message` object if a message was received, ``None`` otherwise. If ``None`` was returned, it probably means there was no messages waiting on the queue. """ raw_message = self.channel.basic_get(queue, no_ack=no_ack) if not raw_message: return None return self.message_to_python(raw_message)
Returns an iterator that waits for one message at a time. def consume(self, limit=None): """Returns an iterator that waits for one message at a time.""" for total_message_count in count(): if limit and total_message_count >= limit: raise StopIteration if not self.channel.is_open: raise StopIteration self.channel.wait() yield True
Cancel a channel by consumer tag. def cancel(self, consumer_tag): """Cancel a channel by consumer tag.""" if not self.channel.connection: return self.channel.basic_cancel(consumer_tag)
Close the channel if open. def close(self): """Close the channel if open.""" if self._channel and self._channel.is_open: self._channel.close() self._channel_ref = None
Request specific Quality of Service. def qos(self, prefetch_size, prefetch_count, apply_global=False): """Request specific Quality of Service.""" self.channel.basic_qos(prefetch_size, prefetch_count, apply_global)
If no channel exists, a new one is requested. def channel(self): """If no channel exists, a new one is requested.""" if not self._channel: self._channel_ref = weakref.ref(self.connection.get_channel()) return self._channel
Establish connection to the AMQP broker. def establish_connection(self): """Establish connection to the AMQP broker.""" conninfo = self.connection if not conninfo.hostname: raise KeyError("Missing hostname for AMQP connection.") if conninfo.userid is None: raise KeyError("Missing user id for AMQP connection.") if conninfo.password is None: raise KeyError("Missing password for AMQP connection.") if not conninfo.port: conninfo.port = self.default_port conn = amqp.Connection(host=conninfo.hostname, port=conninfo.port, userid=conninfo.userid, password=conninfo.password, virtual_host=conninfo.virtual_host) return conn
Declare a consumer. def declare_consumer(self, queue, no_ack, callback, consumer_tag, nowait=False): """Declare a consumer.""" return self.channel.basic_consume(queue=queue, no_ack=no_ack, callback=callback, consumer_tag=consumer_tag)
Returns an iterator that waits for one message at a time. def consume(self, limit=None): """Returns an iterator that waits for one message at a time.""" for total_message_count in count(): if limit and total_message_count >= limit: raise StopIteration if not self.channel.is_open: raise StopIteration self.channel.conn.drain_events() yield True
Cancel a channel by consumer tag. def cancel(self, consumer_tag): """Cancel a channel by consumer tag.""" if not self.channel.conn: return self.channel.basic_cancel(consumer_tag)
Encapsulate data into a AMQP message. def prepare_message(self, message_data, delivery_mode, priority=None, content_type=None, content_encoding=None): """Encapsulate data into a AMQP message.""" return amqp.Message(message_data, properties={ "delivery_mode": delivery_mode, "priority": priority, "content_type": content_type, "content_encoding": content_encoding})
Special case serializer. def raw_encode(data): """Special case serializer.""" content_type = 'application/data' payload = data if isinstance(payload, unicode): content_encoding = 'utf-8' payload = payload.encode(content_encoding) else: content_encoding = 'binary' return content_type, content_encoding, payload
Register a encoder/decoder for JSON serialization. def register_json(): """Register a encoder/decoder for JSON serialization.""" from anyjson import serialize as json_serialize from anyjson import deserialize as json_deserialize registry.register('json', json_serialize, json_deserialize, content_type='application/json', content_encoding='utf-8')
Register a encoder/decoder for YAML serialization. It is slower than JSON, but allows for more data types to be serialized. Useful if you need to send data such as dates def register_yaml(): """Register a encoder/decoder for YAML serialization. It is slower than JSON, but allows for more data types to be serialized. Useful if you need to send data such as dates""" try: import yaml registry.register('yaml', yaml.safe_dump, yaml.safe_load, content_type='application/x-yaml', content_encoding='utf-8') except ImportError: def not_available(*args, **kwargs): """In case a client receives a yaml message, but yaml isn't installed.""" raise SerializerNotInstalled( "No decoder installed for YAML. Install the PyYAML library") registry.register('yaml', None, not_available, 'application/x-yaml')
The fastest serialization method, but restricts you to python clients. def register_pickle(): """The fastest serialization method, but restricts you to python clients.""" import cPickle registry.register('pickle', cPickle.dumps, cPickle.loads, content_type='application/x-python-serialize', content_encoding='binary')
See http://msgpack.sourceforge.net/ def register_msgpack(): """See http://msgpack.sourceforge.net/""" try: import msgpack registry.register('msgpack', msgpack.packs, msgpack.unpacks, content_type='application/x-msgpack', content_encoding='binary') except ImportError: def not_available(*args, **kwargs): """In case a client receives a msgpack message, but yaml isn't installed.""" raise SerializerNotInstalled( "No decoder installed for msgpack. " "Install the msgpack library") registry.register('msgpack', None, not_available, 'application/x-msgpack')
Register a new encoder/decoder. :param name: A convenience name for the serialization method. :param encoder: A method that will be passed a python data structure and should return a string representing the serialized data. If ``None``, then only a decoder will be registered. Encoding will not be possible. :param decoder: A method that will be passed a string representing serialized data and should return a python data structure. If ``None``, then only an encoder will be registered. Decoding will not be possible. :param content_type: The mime-type describing the serialized structure. :param content_encoding: The content encoding (character set) that the :param:`decoder` method will be returning. Will usually be ``utf-8``, ``us-ascii``, or ``binary``. def register(self, name, encoder, decoder, content_type, content_encoding='utf-8'): """Register a new encoder/decoder. :param name: A convenience name for the serialization method. :param encoder: A method that will be passed a python data structure and should return a string representing the serialized data. If ``None``, then only a decoder will be registered. Encoding will not be possible. :param decoder: A method that will be passed a string representing serialized data and should return a python data structure. If ``None``, then only an encoder will be registered. Decoding will not be possible. :param content_type: The mime-type describing the serialized structure. :param content_encoding: The content encoding (character set) that the :param:`decoder` method will be returning. Will usually be ``utf-8``, ``us-ascii``, or ``binary``. """ if encoder: self._encoders[name] = (content_type, content_encoding, encoder) if decoder: self._decoders[content_type] = decoder
Set the default serialization method used by this library. :param name: The name of the registered serialization method. For example, ``json`` (default), ``pickle``, ``yaml``, or any custom methods registered using :meth:`register`. :raises SerializerNotInstalled: If the serialization method requested is not available. def _set_default_serializer(self, name): """ Set the default serialization method used by this library. :param name: The name of the registered serialization method. For example, ``json`` (default), ``pickle``, ``yaml``, or any custom methods registered using :meth:`register`. :raises SerializerNotInstalled: If the serialization method requested is not available. """ try: (self._default_content_type, self._default_content_encoding, self._default_encode) = self._encoders[name] except KeyError: raise SerializerNotInstalled( "No encoder installed for %s" % name)
Serialize a data structure into a string suitable for sending as an AMQP message body. :param data: The message data to send. Can be a list, dictionary or a string. :keyword serializer: An optional string representing the serialization method you want the data marshalled into. (For example, ``json``, ``raw``, or ``pickle``). If ``None`` (default), then `JSON`_ will be used, unless ``data`` is a ``str`` or ``unicode`` object. In this latter case, no serialization occurs as it would be unnecessary. Note that if ``serializer`` is specified, then that serialization method will be used even if a ``str`` or ``unicode`` object is passed in. :returns: A three-item tuple containing the content type (e.g., ``application/json``), content encoding, (e.g., ``utf-8``) and a string containing the serialized data. :raises SerializerNotInstalled: If the serialization method requested is not available. def encode(self, data, serializer=None): """ Serialize a data structure into a string suitable for sending as an AMQP message body. :param data: The message data to send. Can be a list, dictionary or a string. :keyword serializer: An optional string representing the serialization method you want the data marshalled into. (For example, ``json``, ``raw``, or ``pickle``). If ``None`` (default), then `JSON`_ will be used, unless ``data`` is a ``str`` or ``unicode`` object. In this latter case, no serialization occurs as it would be unnecessary. Note that if ``serializer`` is specified, then that serialization method will be used even if a ``str`` or ``unicode`` object is passed in. :returns: A three-item tuple containing the content type (e.g., ``application/json``), content encoding, (e.g., ``utf-8``) and a string containing the serialized data. :raises SerializerNotInstalled: If the serialization method requested is not available. """ if serializer == "raw": return raw_encode(data) if serializer and not self._encoders.get(serializer): raise SerializerNotInstalled( "No encoder installed for %s" % serializer) # If a raw string was sent, assume binary encoding # (it's likely either ASCII or a raw binary file, but 'binary' # charset will encompass both, even if not ideal. if not serializer and isinstance(data, str): # In Python 3+, this would be "bytes"; allow binary data to be # sent as a message without getting encoder errors return "application/data", "binary", data # For unicode objects, force it into a string if not serializer and isinstance(data, unicode): payload = data.encode("utf-8") return "text/plain", "utf-8", payload if serializer: content_type, content_encoding, encoder = \ self._encoders[serializer] else: encoder = self._default_encode content_type = self._default_content_type content_encoding = self._default_content_encoding payload = encoder(data) return content_type, content_encoding, payload
Deserialize a data stream as serialized using ``encode`` based on :param:`content_type`. :param data: The message data to deserialize. :param content_type: The content-type of the data. (e.g., ``application/json``). :param content_encoding: The content-encoding of the data. (e.g., ``utf-8``, ``binary``, or ``us-ascii``). :returns: The unserialized data. def decode(self, data, content_type, content_encoding): """Deserialize a data stream as serialized using ``encode`` based on :param:`content_type`. :param data: The message data to deserialize. :param content_type: The content-type of the data. (e.g., ``application/json``). :param content_encoding: The content-encoding of the data. (e.g., ``utf-8``, ``binary``, or ``us-ascii``). :returns: The unserialized data. """ content_type = content_type or 'application/data' content_encoding = (content_encoding or 'utf-8').lower() # Don't decode 8-bit strings or unicode objects if content_encoding not in ('binary', 'ascii-8bit') and \ not isinstance(data, unicode): data = codecs.decode(data, content_encoding) try: decoder = self._decoders[content_type] except KeyError: return data return decoder(data)
Acknowledge this message as being processed., This will remove the message from the queue. :raises MessageStateError: If the message has already been acknowledged/requeued/rejected. def ack(self): """Acknowledge this message as being processed., This will remove the message from the queue. :raises MessageStateError: If the message has already been acknowledged/requeued/rejected. """ if self.acknowledged: raise self.MessageStateError( "Message already acknowledged with state: %s" % self._state) self.backend.ack(self._frame) self._state = "ACK"
Returns an iterator that waits for one message at a time. def consume(self, limit=None): """Returns an iterator that waits for one message at a time.""" for total_message_count in count(): if limit and total_message_count >= limit: raise StopIteration self.drain_events() yield True
Create a bit string of the given length, with the probability of each bit being set equal to bit_prob, which defaults to .5. Usage: # Create a random BitString of length 10 with mostly zeros. bits = BitString.random(10, bit_prob=.1) Arguments: length: An int, indicating the desired length of the result. bit_prob: A float in the range [0, 1]. This is the probability of any given bit in the result having a value of 1; default is .5, giving 0 and 1 equal probabilities of appearance for each bit's value. Return: A randomly generated BitString instance of the requested length. def random(cls, length, bit_prob=.5): """Create a bit string of the given length, with the probability of each bit being set equal to bit_prob, which defaults to .5. Usage: # Create a random BitString of length 10 with mostly zeros. bits = BitString.random(10, bit_prob=.1) Arguments: length: An int, indicating the desired length of the result. bit_prob: A float in the range [0, 1]. This is the probability of any given bit in the result having a value of 1; default is .5, giving 0 and 1 equal probabilities of appearance for each bit's value. Return: A randomly generated BitString instance of the requested length. """ assert isinstance(length, int) and length >= 0 assert isinstance(bit_prob, (int, float)) and 0 <= bit_prob <= 1 bits = numpy.random.choice( [False, True], size=(length,), p=[1-bit_prob, bit_prob] ) bits.flags.writeable = False return cls(bits)
Create a crossover template with the given number of points. The crossover template can be used as a mask to crossover two bitstrings of the same length. Usage: assert len(parent1) == len(parent2) template = BitString.crossover_template(len(parent1)) inv_template = ~template child1 = (parent1 & template) | (parent2 & inv_template) child2 = (parent1 & inv_template) | (parent2 & template) Arguments: length: An int, indicating the desired length of the result. points: An int, the number of crossover points. Return: A BitString instance of the requested length which can be used as a crossover template. def crossover_template(cls, length, points=2): """Create a crossover template with the given number of points. The crossover template can be used as a mask to crossover two bitstrings of the same length. Usage: assert len(parent1) == len(parent2) template = BitString.crossover_template(len(parent1)) inv_template = ~template child1 = (parent1 & template) | (parent2 & inv_template) child2 = (parent1 & inv_template) | (parent2 & template) Arguments: length: An int, indicating the desired length of the result. points: An int, the number of crossover points. Return: A BitString instance of the requested length which can be used as a crossover template. """ assert isinstance(length, int) and length >= 0 assert isinstance(points, int) and points >= 0 # Select the crossover points. points = random.sample(range(length + 1), points) # Prep the points for the loop. points.sort() points.append(length) # Fill the bits in with alternating ranges of 0 and 1 according to # the selected crossover points. previous = 0 include_range = bool(random.randrange(2)) pieces = [] for point in points: if point > previous: fill = (numpy.ones if include_range else numpy.zeros) pieces.append(fill(point - previous, dtype=bool)) include_range = not include_range previous = point bits = numpy.concatenate(pieces) bits.flags.writeable = False return cls(bits)
Create a new bit condition that matches the provided bit string, with the indicated per-index wildcard probability. Usage: condition = BitCondition.cover(bitstring, .33) assert condition(bitstring) Arguments: bits: A BitString which the resulting condition must match. wildcard_probability: A float in the range [0, 1] which indicates the likelihood of any given bit position containing a wildcard. Return: A randomly generated BitCondition which matches the given bits. def cover(cls, bits, wildcard_probability): """Create a new bit condition that matches the provided bit string, with the indicated per-index wildcard probability. Usage: condition = BitCondition.cover(bitstring, .33) assert condition(bitstring) Arguments: bits: A BitString which the resulting condition must match. wildcard_probability: A float in the range [0, 1] which indicates the likelihood of any given bit position containing a wildcard. Return: A randomly generated BitCondition which matches the given bits. """ if not isinstance(bits, BitString): bits = BitString(bits) mask = BitString([ random.random() > wildcard_probability for _ in range(len(bits)) ]) return cls(bits, mask)
Perform 2-point crossover on this bit condition and another of the same length, returning the two resulting children. Usage: offspring1, offspring2 = condition1.crossover_with(condition2) Arguments: other: A second BitCondition of the same length as this one. points: An int, the number of crossover points of the crossover operation. Return: A tuple (condition1, condition2) of BitConditions, where the value at each position of this BitCondition and the other is preserved in one or the other of the two resulting conditions. def crossover_with(self, other, points=2): """Perform 2-point crossover on this bit condition and another of the same length, returning the two resulting children. Usage: offspring1, offspring2 = condition1.crossover_with(condition2) Arguments: other: A second BitCondition of the same length as this one. points: An int, the number of crossover points of the crossover operation. Return: A tuple (condition1, condition2) of BitConditions, where the value at each position of this BitCondition and the other is preserved in one or the other of the two resulting conditions. """ assert isinstance(other, BitCondition) assert len(self) == len(other) template = BitString.crossover_template(len(self), points) inv_template = ~template bits1 = (self._bits & template) | (other._bits & inv_template) mask1 = (self._mask & template) | (other._mask & inv_template) bits2 = (self._bits & inv_template) | (other._bits & template) mask2 = (self._mask & inv_template) | (other._mask & template) # Convert the modified sequences back into BitConditions return type(self)(bits1, mask1), type(self)(bits2, mask2)
Get the currently used backend class. def get_backend_cls(self): """Get the currently used backend class.""" backend_cls = self.backend_cls if not backend_cls or isinstance(backend_cls, basestring): backend_cls = get_backend_cls(backend_cls) return backend_cls
Ensure we have a connection to the server. If not retry establishing the connection with the settings specified. :keyword errback: Optional callback called each time the connection can't be established. Arguments provided are the exception raised and the interval that will be slept ``(exc, interval)``. :keyword max_retries: Maximum number of times to retry. If this limit is exceeded the connection error will be re-raised. :keyword interval_start: The number of seconds we start sleeping for. :keyword interval_step: How many seconds added to the interval for each retry. :keyword interval_max: Maximum number of seconds to sleep between each retry. def ensure_connection(self, errback=None, max_retries=None, interval_start=2, interval_step=2, interval_max=30): """Ensure we have a connection to the server. If not retry establishing the connection with the settings specified. :keyword errback: Optional callback called each time the connection can't be established. Arguments provided are the exception raised and the interval that will be slept ``(exc, interval)``. :keyword max_retries: Maximum number of times to retry. If this limit is exceeded the connection error will be re-raised. :keyword interval_start: The number of seconds we start sleeping for. :keyword interval_step: How many seconds added to the interval for each retry. :keyword interval_max: Maximum number of seconds to sleep between each retry. """ retry_over_time(self.connect, self.connection_errors, (), {}, errback, max_retries, interval_start, interval_step, interval_max) return self
Close the currently open connection. def close(self): """Close the currently open connection.""" try: if self._connection: backend = self.create_backend() backend.close_connection(self._connection) except socket.error: pass self._closed = True
Get connection info. def info(self): """Get connection info.""" backend_cls = self.backend_cls or "amqplib" port = self.port or self.create_backend().default_port return {"hostname": self.hostname, "userid": self.userid, "password": self.password, "virtual_host": self.virtual_host, "port": port, "insist": self.insist, "ssl": self.ssl, "transport_cls": backend_cls, "backend_cls": backend_cls, "connect_timeout": self.connect_timeout}
Deserialize the message body, returning the original python structure sent by the publisher. def decode(self): """Deserialize the message body, returning the original python structure sent by the publisher.""" return serialization.decode(self.body, self.content_type, self.content_encoding)
The decoded message. def payload(self): """The decoded message.""" if not self._decoded_cache: self._decoded_cache = self.decode() return self._decoded_cache
Acknowledge this message as being processed., This will remove the message from the queue. :raises MessageStateError: If the message has already been acknowledged/requeued/rejected. def ack(self): """Acknowledge this message as being processed., This will remove the message from the queue. :raises MessageStateError: If the message has already been acknowledged/requeued/rejected. """ if self.acknowledged: raise self.MessageStateError( "Message already acknowledged with state: %s" % self._state) self.backend.ack(self.delivery_tag) self._state = "ACK"
Reject this message. The message will be discarded by the server. :raises MessageStateError: If the message has already been acknowledged/requeued/rejected. def reject(self): """Reject this message. The message will be discarded by the server. :raises MessageStateError: If the message has already been acknowledged/requeued/rejected. """ if self.acknowledged: raise self.MessageStateError( "Message already acknowledged with state: %s" % self._state) self.backend.reject(self.delivery_tag) self._state = "REJECTED"
Reject this message and put it back on the queue. You must not use this method as a means of selecting messages to process. :raises MessageStateError: If the message has already been acknowledged/requeued/rejected. def requeue(self): """Reject this message and put it back on the queue. You must not use this method as a means of selecting messages to process. :raises MessageStateError: If the message has already been acknowledged/requeued/rejected. """ if self.acknowledged: raise self.MessageStateError( "Message already acknowledged with state: %s" % self._state) self.backend.requeue(self.delivery_tag) self._state = "REQUEUED"
Generate a unique id, having - hopefully - a very small chance of collission. For now this is provided by :func:`uuid.uuid4`. def gen_unique_id(): """Generate a unique id, having - hopefully - a very small chance of collission. For now this is provided by :func:`uuid.uuid4`. """ # Workaround for http://bugs.python.org/issue4607 if ctypes and _uuid_generate_random: buffer = ctypes.create_string_buffer(16) _uuid_generate_random(buffer) return str(UUID(bytes=buffer.raw)) return str(uuid4())
Retry the function over and over until max retries is exceeded. For each retry we sleep a for a while before we try again, this interval is increased for every retry until the max seconds is reached. :param fun: The function to try :param catch: Exceptions to catch, can be either tuple or a single exception class. :keyword args: Positional arguments passed on to the function. :keyword kwargs: Keyword arguments passed on to the function. :keyword errback: Callback for when an exception in ``catch`` is raised. The callback must take two arguments: ``exc`` and ``interval``, where ``exc`` is the exception instance, and ``interval`` is the time in seconds to sleep next.. :keyword max_retries: Maximum number of retries before we give up. If this is not set, we will retry forever. :keyword interval_start: How long (in seconds) we start sleeping between retries. :keyword interval_step: By how much the interval is increased for each retry. :keyword interval_max: Maximum number of seconds to sleep between retries. def retry_over_time(fun, catch, args=[], kwargs={}, errback=None, max_retries=None, interval_start=2, interval_step=2, interval_max=30): """Retry the function over and over until max retries is exceeded. For each retry we sleep a for a while before we try again, this interval is increased for every retry until the max seconds is reached. :param fun: The function to try :param catch: Exceptions to catch, can be either tuple or a single exception class. :keyword args: Positional arguments passed on to the function. :keyword kwargs: Keyword arguments passed on to the function. :keyword errback: Callback for when an exception in ``catch`` is raised. The callback must take two arguments: ``exc`` and ``interval``, where ``exc`` is the exception instance, and ``interval`` is the time in seconds to sleep next.. :keyword max_retries: Maximum number of retries before we give up. If this is not set, we will retry forever. :keyword interval_start: How long (in seconds) we start sleeping between retries. :keyword interval_step: By how much the interval is increased for each retry. :keyword interval_max: Maximum number of seconds to sleep between retries. """ retries = 0 interval_range = xrange(interval_start, interval_max + interval_start, interval_step) for retries, interval in enumerate(repeatlast(interval_range)): try: retval = fun(*args, **kwargs) except catch, exc: if max_retries and retries > max_retries: raise if errback: errback(exc, interval) sleep(interval) else: return retval
Get the next waiting message from the queue. :returns: A :class:`Message` instance, or ``None`` if there is no messages waiting. def get(self, *args, **kwargs): """Get the next waiting message from the queue. :returns: A :class:`Message` instance, or ``None`` if there is no messages waiting. """ if not mqueue.qsize(): return None message_data, content_type, content_encoding = mqueue.get() return self.Message(backend=self, body=message_data, content_type=content_type, content_encoding=content_encoding)
Discard all messages in the queue. def queue_purge(self, queue, **kwargs): """Discard all messages in the queue.""" qsize = mqueue.qsize() mqueue.queue.clear() return qsize
Prepare message for sending. def prepare_message(self, message_data, delivery_mode, content_type, content_encoding, **kwargs): """Prepare message for sending.""" return (message_data, content_type, content_encoding)
Return a numerical value representing the expected future payoff of the previously selected action, given only the current match set. The match_set argument is a MatchSet instance representing the current match set. Usage: match_set = model.match(situation) expectation = model.algorithm.get_future_expectation(match_set) payoff = previous_reward + discount_factor * expectation previous_match_set.payoff = payoff Arguments: match_set: A MatchSet instance. Return: A float, the estimate of the expected near-future payoff for the situation for which match_set was generated, based on the contents of match_set. def get_future_expectation(self, match_set): """Return a numerical value representing the expected future payoff of the previously selected action, given only the current match set. The match_set argument is a MatchSet instance representing the current match set. Usage: match_set = model.match(situation) expectation = model.algorithm.get_future_expectation(match_set) payoff = previous_reward + discount_factor * expectation previous_match_set.payoff = payoff Arguments: match_set: A MatchSet instance. Return: A float, the estimate of the expected near-future payoff for the situation for which match_set was generated, based on the contents of match_set. """ assert isinstance(match_set, MatchSet) assert match_set.algorithm is self return self.discount_factor * ( self.idealization_factor * match_set.best_prediction + (1 - self.idealization_factor) * match_set.prediction )
Return a Boolean indicating whether covering is required for the current match set. The match_set argument is a MatchSet instance representing the current match set before covering is applied. Usage: match_set = model.match(situation) if model.algorithm.covering_is_required(match_set): new_rule = model.algorithm.cover(match_set) assert new_rule.condition(situation) model.add(new_rule) match_set = model.match(situation) Arguments: match_set: A MatchSet instance. Return: A bool indicating whether match_set contains too few matching classifier rules and therefore needs to be augmented with a new one. def covering_is_required(self, match_set): """Return a Boolean indicating whether covering is required for the current match set. The match_set argument is a MatchSet instance representing the current match set before covering is applied. Usage: match_set = model.match(situation) if model.algorithm.covering_is_required(match_set): new_rule = model.algorithm.cover(match_set) assert new_rule.condition(situation) model.add(new_rule) match_set = model.match(situation) Arguments: match_set: A MatchSet instance. Return: A bool indicating whether match_set contains too few matching classifier rules and therefore needs to be augmented with a new one. """ assert isinstance(match_set, MatchSet) assert match_set.algorithm is self if self.minimum_actions is None: return len(match_set) < len(match_set.model.possible_actions) else: return len(match_set) < self.minimum_actions
Return a new classifier rule that can be added to the match set, with a condition that matches the situation of the match set and an action selected to avoid duplication of the actions already contained therein. The match_set argument is a MatchSet instance representing the match set to which the returned rule may be added. Usage: match_set = model.match(situation) if model.algorithm.covering_is_required(match_set): new_rule = model.algorithm.cover(match_set) assert new_rule.condition(situation) model.add(new_rule) match_set = model.match(situation) Arguments: match_set: A MatchSet instance. Return: A new ClassifierRule instance, appropriate for the addition to match_set and to the classifier set from which match_set was drawn. def cover(self, match_set): """Return a new classifier rule that can be added to the match set, with a condition that matches the situation of the match set and an action selected to avoid duplication of the actions already contained therein. The match_set argument is a MatchSet instance representing the match set to which the returned rule may be added. Usage: match_set = model.match(situation) if model.algorithm.covering_is_required(match_set): new_rule = model.algorithm.cover(match_set) assert new_rule.condition(situation) model.add(new_rule) match_set = model.match(situation) Arguments: match_set: A MatchSet instance. Return: A new ClassifierRule instance, appropriate for the addition to match_set and to the classifier set from which match_set was drawn. """ assert isinstance(match_set, MatchSet) assert match_set.model.algorithm is self # Create a new condition that matches the situation. condition = bitstrings.BitCondition.cover( match_set.situation, self.wildcard_probability ) # Pick a random action that (preferably) isn't already suggested by # some other rule for this situation. action_candidates = ( frozenset(match_set.model.possible_actions) - frozenset(match_set) ) if not action_candidates: action_candidates = match_set.model.possible_actions action = random.choice(list(action_candidates)) # Create the new rule. return XCSClassifierRule( condition, action, self, match_set.time_stamp )
Distribute the payoff received in response to the selected action of the given match set among the rules in the action set which deserve credit for recommending the action. The match_set argument is the MatchSet instance which suggested the selected action and earned the payoff. Usage: match_set = model.match(situation) match_set.select_action() match_set.payoff = reward model.algorithm.distribute_payoff(match_set) Arguments: match_set: A MatchSet instance for which the accumulated payoff needs to be distributed among its classifier rules. Return: None def distribute_payoff(self, match_set): """Distribute the payoff received in response to the selected action of the given match set among the rules in the action set which deserve credit for recommending the action. The match_set argument is the MatchSet instance which suggested the selected action and earned the payoff. Usage: match_set = model.match(situation) match_set.select_action() match_set.payoff = reward model.algorithm.distribute_payoff(match_set) Arguments: match_set: A MatchSet instance for which the accumulated payoff needs to be distributed among its classifier rules. Return: None """ assert isinstance(match_set, MatchSet) assert match_set.algorithm is self assert match_set.selected_action is not None payoff = float(match_set.payoff) action_set = match_set[match_set.selected_action] action_set_size = sum(rule.numerosity for rule in action_set) # Update the average reward, error, and action set size of each # rule participating in the action set. for rule in action_set: rule.experience += 1 update_rate = max(self.learning_rate, 1 / rule.experience) rule.average_reward += ( (payoff - rule.average_reward) * update_rate ) rule.error += ( (abs(payoff - rule.average_reward) - rule.error) * update_rate ) rule.action_set_size += ( (action_set_size - rule.action_set_size) * update_rate ) # Update the fitness of the rules. self._update_fitness(action_set) # If the parameters so indicate, perform action set subsumption. if self.do_action_set_subsumption: self._action_set_subsumption(action_set)
Update the classifier set from which the match set was drawn, e.g. by applying a genetic algorithm. The match_set argument is the MatchSet instance whose classifier set should be updated. Usage: match_set = model.match(situation) match_set.select_action() match_set.payoff = reward model.algorithm.distribute_payoff(match_set) model.algorithm.update(match_set) Arguments: match_set: A MatchSet instance for which the classifier set from which it was drawn needs to be updated based on the match set's payoff distribution. Return: None def update(self, match_set): """Update the classifier set from which the match set was drawn, e.g. by applying a genetic algorithm. The match_set argument is the MatchSet instance whose classifier set should be updated. Usage: match_set = model.match(situation) match_set.select_action() match_set.payoff = reward model.algorithm.distribute_payoff(match_set) model.algorithm.update(match_set) Arguments: match_set: A MatchSet instance for which the classifier set from which it was drawn needs to be updated based on the match set's payoff distribution. Return: None """ assert isinstance(match_set, MatchSet) assert match_set.model.algorithm is self assert match_set.selected_action is not None # Increment the iteration counter. match_set.model.update_time_stamp() action_set = match_set[match_set.selected_action] # If the average number of iterations since the last update for # each rule in the action set is too small, return early instead of # applying the GA. average_time_passed = ( match_set.model.time_stamp - self._get_average_time_stamp(action_set) ) if average_time_passed <= self.ga_threshold: return # Update the time step for each rule to indicate that they were # updated by the GA. self._set_timestamps(action_set) # Select two parents from the action set, with probability # proportionate to their fitness. parent1 = self._select_parent(action_set) parent2 = self._select_parent(action_set) # With the probability specified in the parameters, apply the # crossover operator to the parents. Otherwise, just take the # parents unchanged. if random.random() < self.crossover_probability: condition1, condition2 = parent1.condition.crossover_with( parent2.condition ) else: condition1, condition2 = parent1.condition, parent2.condition # Apply the mutation operator to each child, randomly flipping # their mask bits with a small probability. condition1 = self._mutate(condition1, action_set.situation) condition2 = self._mutate(condition2, action_set.situation) # If the newly generated children are already present in the # population (or if they should be subsumed due to GA subsumption) # then simply increment the numerosities of the existing rules in # the population. new_children = [] for condition in condition1, condition2: # If the parameters specify that GA subsumption should be # performed, look for an accurate parent that can subsume the # new child. if self.do_ga_subsumption: subsumed = False for parent in parent1, parent2: should_subsume = ( (parent.experience > self.subsumption_threshold) and parent.error < self.error_threshold and parent.condition(condition) ) if should_subsume: if parent in action_set.model: parent.numerosity += 1 self.prune(action_set.model) else: # Sometimes the parent is removed from a # previous subsumption parent.numerosity = 1 action_set.model.add(parent) subsumed = True break if subsumed: continue # Provided the child has not already been subsumed and it is # present in the population, just increment its numerosity. # Otherwise, if the child has neither been subsumed nor does it # already exist, remember it so we can add it to the classifier # set in just a moment. child = XCSClassifierRule( condition, action_set.action, self, action_set.model.time_stamp ) if child in action_set.model: action_set.model.add(child) else: new_children.append(child) # If there were any children which weren't subsumed and weren't # already present in the classifier set, add them. if new_children: average_reward = .5 * ( parent1.average_reward + parent2.average_reward ) error = .5 * (parent1.error + parent2.error) # .1 * (average fitness of parents) fitness = .05 * ( parent1.fitness + parent2.fitness ) for child in new_children: child.average_reward = average_reward child.error = error child.fitness = fitness action_set.model.add(child)
Reduce the classifier set's population size, if necessary, by removing lower-quality *rules. Return a list containing any rules whose numerosities dropped to zero as a result of this call. (The list may be empty, if no rule's numerosity dropped to 0.) The model argument is a ClassifierSet instance which utilizes this algorithm. Usage: deleted_rules = model.algorithm.prune(model) Arguments: model: A ClassifierSet instance whose population may need to be reduced in size. Return: A possibly empty list of ClassifierRule instances which were removed entirely from the classifier set because their numerosities dropped to 0. def prune(self, model): """Reduce the classifier set's population size, if necessary, by removing lower-quality *rules. Return a list containing any rules whose numerosities dropped to zero as a result of this call. (The list may be empty, if no rule's numerosity dropped to 0.) The model argument is a ClassifierSet instance which utilizes this algorithm. Usage: deleted_rules = model.algorithm.prune(model) Arguments: model: A ClassifierSet instance whose population may need to be reduced in size. Return: A possibly empty list of ClassifierRule instances which were removed entirely from the classifier set because their numerosities dropped to 0. """ assert isinstance(model, ClassifierSet) assert model.algorithm is self # Determine the (virtual) population size. total_numerosity = sum(rule.numerosity for rule in model) # If the population size is already small enough, just return early if total_numerosity <= self.max_population_size: return [] # No rule's numerosity dropped to zero. # Determine the average fitness of the rules in the population. total_fitness = sum(rule.fitness for rule in model) average_fitness = total_fitness / total_numerosity # Determine the probability of deletion, as a function of both # accuracy and niche sparsity. total_votes = 0 deletion_votes = {} for rule in model: vote = rule.action_set_size * rule.numerosity sufficient_experience = ( rule.experience > self.deletion_threshold ) low_fitness = ( rule.fitness / rule.numerosity < self.fitness_threshold * average_fitness ) if sufficient_experience and low_fitness: vote *= average_fitness / (rule.fitness / rule.numerosity) deletion_votes[rule] = vote total_votes += vote # Choose a rule to delete based on the probabilities just computed. selector = random.uniform(0, total_votes) for rule, vote in deletion_votes.items(): selector -= vote if selector <= 0: assert rule in model if model.discard(rule): return [rule] else: return [] assert False
Update the fitness values of the rules belonging to this action set. def _update_fitness(self, action_set): """Update the fitness values of the rules belonging to this action set.""" # Compute the accuracy of each rule. Accuracy is inversely # proportional to error. Below a certain error threshold, accuracy # becomes constant. Accuracy values range over (0, 1]. total_accuracy = 0 accuracies = {} for rule in action_set: if rule.error < self.error_threshold: accuracy = 1 else: accuracy = ( self.accuracy_coefficient * (rule.error / self.error_threshold) ** -self.accuracy_power ) accuracies[rule] = accuracy total_accuracy += accuracy * rule.numerosity # On rare occasions we have zero total accuracy. This avoids a div # by zero total_accuracy = total_accuracy or 1 # Use the relative accuracies of the rules to update their fitness for rule in action_set: accuracy = accuracies[rule] rule.fitness += ( self.learning_rate * (accuracy * rule.numerosity / total_accuracy - rule.fitness) )
Perform action set subsumption. def _action_set_subsumption(self, action_set): """Perform action set subsumption.""" # Select a condition with maximum bit count among those having # sufficient experience and sufficiently low error. selected_rule = None selected_bit_count = None for rule in action_set: if not (rule.experience > self.subsumption_threshold and rule.error < self.error_threshold): continue bit_count = rule.condition.count() if (selected_rule is None or bit_count > selected_bit_count or (bit_count == selected_bit_count and random.randrange(2))): selected_rule = rule selected_bit_count = bit_count # If no rule was found satisfying the requirements, return # early. if selected_rule is None: return # Subsume each rule which the selected rule generalizes. When a # rule is subsumed, all instances of the subsumed rule are replaced # with instances of the more general one in the population. to_remove = [] for rule in action_set: if (selected_rule is not rule and selected_rule.condition(rule.condition)): selected_rule.numerosity += rule.numerosity action_set.model.discard(rule, rule.numerosity) to_remove.append(rule) for rule in to_remove: action_set.remove(rule)
Return the average time stamp for the rules in this action set. def _get_average_time_stamp(action_set): """Return the average time stamp for the rules in this action set.""" # This is the average value of the iteration counter upon the most # recent update of each rule in this action set. total_time_stamps = sum(rule.time_stamp * rule.numerosity for rule in action_set) total_numerosity = sum(rule.numerosity for rule in action_set) return total_time_stamps / (total_numerosity or 1)
Select a rule from this action set, with probability proportionate to its fitness, to act as a parent for a new rule in the classifier set. Return the selected rule. def _select_parent(action_set): """Select a rule from this action set, with probability proportionate to its fitness, to act as a parent for a new rule in the classifier set. Return the selected rule.""" total_fitness = sum(rule.fitness for rule in action_set) selector = random.uniform(0, total_fitness) for rule in action_set: selector -= rule.fitness if selector <= 0: return rule # If for some reason a case slips through the above loop, perhaps # due to floating point error, we fall back on uniform selection. return random.choice(list(action_set))
Create a new condition from the given one by probabilistically applying point-wise mutations. Bits that were originally wildcarded in the parent condition acquire their values from the provided situation, to ensure the child condition continues to match it. def _mutate(self, condition, situation): """Create a new condition from the given one by probabilistically applying point-wise mutations. Bits that were originally wildcarded in the parent condition acquire their values from the provided situation, to ensure the child condition continues to match it.""" # Go through each position in the condition, randomly flipping # whether the position is a value (0 or 1) or a wildcard (#). We do # this in a new list because the original condition's mask is # immutable. mutation_points = bitstrings.BitString.random( len(condition.mask), self.mutation_probability ) mask = condition.mask ^ mutation_points # The bits that aren't wildcards always have the same value as the # situation, which ensures that the mutated condition still matches # the situation. if isinstance(situation, bitstrings.BitCondition): mask &= situation.mask return bitstrings.BitCondition(situation.bits, mask) return bitstrings.BitCondition(situation, mask)
Establish connection to the AMQP broker. def establish_connection(self): """Establish connection to the AMQP broker.""" conninfo = self.connection if not conninfo.port: conninfo.port = self.default_port credentials = pika.PlainCredentials(conninfo.userid, conninfo.password) return self._connection_cls(pika.ConnectionParameters( conninfo.hostname, port=conninfo.port, virtual_host=conninfo.virtual_host, credentials=credentials))
Discard all messages in the queue. This will delete the messages and results in an empty queue. def queue_purge(self, queue, **kwargs): """Discard all messages in the queue. This will delete the messages and results in an empty queue.""" return self.channel.queue_purge(queue=queue).message_count
Declare a named queue. def queue_declare(self, queue, durable, exclusive, auto_delete, warn_if_exists=False, arguments=None): """Declare a named queue.""" return self.channel.queue_declare(queue=queue, durable=durable, exclusive=exclusive, auto_delete=auto_delete, arguments=arguments)
Declare a consumer. def declare_consumer(self, queue, no_ack, callback, consumer_tag, nowait=False): """Declare a consumer.""" @functools.wraps(callback) def _callback_decode(channel, method, header, body): return callback((channel, method, header, body)) return self.channel.basic_consume(_callback_decode, queue=queue, no_ack=no_ack, consumer_tag=consumer_tag)
Close the channel if open. def close(self): """Close the channel if open.""" if self._channel and not self._channel.handler.channel_close: self._channel.close() self._channel_ref = None
Encapsulate data into a AMQP message. def prepare_message(self, message_data, delivery_mode, priority=None, content_type=None, content_encoding=None): """Encapsulate data into a AMQP message.""" properties = pika.BasicProperties(priority=priority, content_type=content_type, content_encoding=content_encoding, delivery_mode=delivery_mode) return message_data, properties
Publish a message to a named exchange. def publish(self, message, exchange, routing_key, mandatory=None, immediate=None, headers=None): """Publish a message to a named exchange.""" body, properties = message if headers: properties.headers = headers ret = self.channel.basic_publish(body=body, properties=properties, exchange=exchange, routing_key=routing_key, mandatory=mandatory, immediate=immediate) if mandatory or immediate: self.close()
Generate a unique consumer tag. :rtype string: def _generate_consumer_tag(self): """Generate a unique consumer tag. :rtype string: """ return "%s.%s%s" % ( self.__class__.__module__, self.__class__.__name__, self._next_consumer_tag())
Declares the queue, the exchange and binds the queue to the exchange. def declare(self): """Declares the queue, the exchange and binds the queue to the exchange.""" arguments = None routing_key = self.routing_key if self.exchange_type == "headers": arguments, routing_key = routing_key, "" if self.queue: self.backend.queue_declare(queue=self.queue, durable=self.durable, exclusive=self.exclusive, auto_delete=self.auto_delete, arguments=self.queue_arguments, warn_if_exists=self.warn_if_exists) if self.exchange: self.backend.exchange_declare(exchange=self.exchange, type=self.exchange_type, durable=self.durable, auto_delete=self.auto_delete) if self.queue: self.backend.queue_bind(queue=self.queue, exchange=self.exchange, routing_key=routing_key, arguments=arguments) self._closed = False return self
Internal method used when a message is received in consume mode. def _receive_callback(self, raw_message): """Internal method used when a message is received in consume mode.""" message = self.backend.message_to_python(raw_message) if self.auto_ack and not message.acknowledged: message.ack() self.receive(message.payload, message)
Receive the next message waiting on the queue. :returns: A :class:`carrot.backends.base.BaseMessage` instance, or ``None`` if there's no messages to be received. :keyword enable_callbacks: Enable callbacks. The message will be processed with all registered callbacks. Default is disabled. :keyword auto_ack: Override the default :attr:`auto_ack` setting. :keyword no_ack: Override the default :attr:`no_ack` setting. def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): """Receive the next message waiting on the queue. :returns: A :class:`carrot.backends.base.BaseMessage` instance, or ``None`` if there's no messages to be received. :keyword enable_callbacks: Enable callbacks. The message will be processed with all registered callbacks. Default is disabled. :keyword auto_ack: Override the default :attr:`auto_ack` setting. :keyword no_ack: Override the default :attr:`no_ack` setting. """ no_ack = no_ack or self.no_ack auto_ack = auto_ack or self.auto_ack message = self.backend.get(self.queue, no_ack=no_ack) if message: if auto_ack and not message.acknowledged: message.ack() if enable_callbacks: self.receive(message.payload, message) return message
This method is called when a new message is received by running :meth:`wait`, :meth:`process_next` or :meth:`iterqueue`. When a message is received, it passes the message on to the callbacks listed in the :attr:`callbacks` attribute. You can register callbacks using :meth:`register_callback`. :param message_data: The deserialized message data. :param message: The :class:`carrot.backends.base.BaseMessage` instance. :raises NotImplementedError: If no callbacks has been registered. def receive(self, message_data, message): """This method is called when a new message is received by running :meth:`wait`, :meth:`process_next` or :meth:`iterqueue`. When a message is received, it passes the message on to the callbacks listed in the :attr:`callbacks` attribute. You can register callbacks using :meth:`register_callback`. :param message_data: The deserialized message data. :param message: The :class:`carrot.backends.base.BaseMessage` instance. :raises NotImplementedError: If no callbacks has been registered. """ if not self.callbacks: raise NotImplementedError("No consumer callbacks registered") for callback in self.callbacks: callback(message_data, message)
Discard all waiting messages. :param filterfunc: A filter function to only discard the messages this filter returns. :returns: the number of messages discarded. *WARNING*: All incoming messages will be ignored and not processed. Example using filter: >>> def waiting_feeds_only(message): ... try: ... message_data = message.decode() ... except: # Should probably be more specific. ... pass ... ... if message_data.get("type") == "feed": ... return True ... else: ... return False def discard_all(self, filterfunc=None): """Discard all waiting messages. :param filterfunc: A filter function to only discard the messages this filter returns. :returns: the number of messages discarded. *WARNING*: All incoming messages will be ignored and not processed. Example using filter: >>> def waiting_feeds_only(message): ... try: ... message_data = message.decode() ... except: # Should probably be more specific. ... pass ... ... if message_data.get("type") == "feed": ... return True ... else: ... return False """ if not filterfunc: return self.backend.queue_purge(self.queue) if self.no_ack or self.auto_ack: raise Exception("discard_all: Can't use filter with auto/no-ack.") discarded_count = 0 while True: message = self.fetch() if message is None: return discarded_count if filterfunc(message): message.ack() discarded_count += 1
Declare consumer. def consume(self, no_ack=None): """Declare consumer.""" no_ack = no_ack or self.no_ack self.backend.declare_consumer(queue=self.queue, no_ack=no_ack, callback=self._receive_callback, consumer_tag=self.consumer_tag, nowait=True) self.channel_open = True
Go into consume mode. Mostly for testing purposes and simple programs, you probably want :meth:`iterconsume` or :meth:`iterqueue` instead. This runs an infinite loop, processing all incoming messages using :meth:`receive` to apply the message to all registered callbacks. def wait(self, limit=None): """Go into consume mode. Mostly for testing purposes and simple programs, you probably want :meth:`iterconsume` or :meth:`iterqueue` instead. This runs an infinite loop, processing all incoming messages using :meth:`receive` to apply the message to all registered callbacks. """ it = self.iterconsume(limit) while True: it.next()
Infinite iterator yielding pending messages, by using synchronous direct access to the queue (``basic_get``). :meth:`iterqueue` is used where synchronous functionality is more important than performance. If you can, use :meth:`iterconsume` instead. :keyword limit: If set, the iterator stops when it has processed this number of messages in total. :keyword infinite: Don't raise :exc:`StopIteration` if there is no messages waiting, but return ``None`` instead. If infinite you obviously shouldn't consume the whole iterator at once without using a ``limit``. :raises StopIteration: If there is no messages waiting, and the iterator is not infinite. def iterqueue(self, limit=None, infinite=False): """Infinite iterator yielding pending messages, by using synchronous direct access to the queue (``basic_get``). :meth:`iterqueue` is used where synchronous functionality is more important than performance. If you can, use :meth:`iterconsume` instead. :keyword limit: If set, the iterator stops when it has processed this number of messages in total. :keyword infinite: Don't raise :exc:`StopIteration` if there is no messages waiting, but return ``None`` instead. If infinite you obviously shouldn't consume the whole iterator at once without using a ``limit``. :raises StopIteration: If there is no messages waiting, and the iterator is not infinite. """ for items_since_start in count(): item = self.fetch() if (not infinite and item is None) or \ (limit and items_since_start >= limit): raise StopIteration yield item
Cancel a running :meth:`iterconsume` session. def cancel(self): """Cancel a running :meth:`iterconsume` session.""" if self.channel_open: try: self.backend.cancel(self.consumer_tag) except KeyError: pass
Close the channel to the queue. def close(self): """Close the channel to the queue.""" self.cancel() self.backend.close() self._closed = True
Request specific Quality of Service. This method requests a specific quality of service. The QoS can be specified for the current channel or for all channels on the connection. The particular properties and semantics of a qos method always depend on the content class semantics. Though the qos method could in principle apply to both peers, it is currently meaningful only for the server. :param prefetch_size: Prefetch window in octets. The client can request that messages be sent in advance so that when the client finishes processing a message, the following message is already held locally, rather than needing to be sent down the channel. Prefetching gives a performance improvement. This field specifies the prefetch window size in octets. The server will send a message in advance if it is equal to or smaller in size than the available prefetch size (and also falls into other prefetch limits). May be set to zero, meaning "no specific limit", although other prefetch limits may still apply. The ``prefetch_size`` is ignored if the :attr:`no_ack` option is set. :param prefetch_count: Specifies a prefetch window in terms of whole messages. This field may be used in combination with ``prefetch_size``; A message will only be sent in advance if both prefetch windows (and those at the channel and connection level) allow it. The prefetch- count is ignored if the :attr:`no_ack` option is set. :keyword apply_global: By default the QoS settings apply to the current channel only. If this is set, they are applied to the entire connection. def qos(self, prefetch_size=0, prefetch_count=0, apply_global=False): """Request specific Quality of Service. This method requests a specific quality of service. The QoS can be specified for the current channel or for all channels on the connection. The particular properties and semantics of a qos method always depend on the content class semantics. Though the qos method could in principle apply to both peers, it is currently meaningful only for the server. :param prefetch_size: Prefetch window in octets. The client can request that messages be sent in advance so that when the client finishes processing a message, the following message is already held locally, rather than needing to be sent down the channel. Prefetching gives a performance improvement. This field specifies the prefetch window size in octets. The server will send a message in advance if it is equal to or smaller in size than the available prefetch size (and also falls into other prefetch limits). May be set to zero, meaning "no specific limit", although other prefetch limits may still apply. The ``prefetch_size`` is ignored if the :attr:`no_ack` option is set. :param prefetch_count: Specifies a prefetch window in terms of whole messages. This field may be used in combination with ``prefetch_size``; A message will only be sent in advance if both prefetch windows (and those at the channel and connection level) allow it. The prefetch- count is ignored if the :attr:`no_ack` option is set. :keyword apply_global: By default the QoS settings apply to the current channel only. If this is set, they are applied to the entire connection. """ return self.backend.qos(prefetch_size, prefetch_count, apply_global)
Declare the exchange. Creates the exchange on the broker. def declare(self): """Declare the exchange. Creates the exchange on the broker. """ self.backend.exchange_declare(exchange=self.exchange, type=self.exchange_type, durable=self.durable, auto_delete=self.auto_delete)
With any data, serialize it and encapsulate it in a AMQP message with the proper headers set. def create_message(self, message_data, delivery_mode=None, priority=None, content_type=None, content_encoding=None, serializer=None): """With any data, serialize it and encapsulate it in a AMQP message with the proper headers set.""" delivery_mode = delivery_mode or self.delivery_mode # No content_type? Then we're serializing the data internally. if not content_type: serializer = serializer or self.serializer (content_type, content_encoding, message_data) = serialization.encode(message_data, serializer=serializer) else: # If the programmer doesn't want us to serialize, # make sure content_encoding is set. if isinstance(message_data, unicode): if not content_encoding: content_encoding = 'utf-8' message_data = message_data.encode(content_encoding) # If they passed in a string, we can't know anything # about it. So assume it's binary data. elif not content_encoding: content_encoding = 'binary' return self.backend.prepare_message(message_data, delivery_mode, priority=priority, content_type=content_type, content_encoding=content_encoding)
Send a message. :param message_data: The message data to send. Can be a list, dictionary or a string. :keyword routing_key: A custom routing key for the message. If not set, the default routing key set in the :attr:`routing_key` attribute is used. :keyword mandatory: If set, the message has mandatory routing. By default the message is silently dropped by the server if it can't be routed to a queue. However - If the message is mandatory, an exception will be raised instead. :keyword immediate: Request immediate delivery. If the message cannot be routed to a queue consumer immediately, an exception will be raised. This is instead of the default behaviour, where the server will accept and queue the message, but with no guarantee that the message will ever be consumed. :keyword delivery_mode: Override the default :attr:`delivery_mode`. :keyword priority: The message priority, ``0`` to ``9``. :keyword content_type: The messages content_type. If content_type is set, no serialization occurs as it is assumed this is either a binary object, or you've done your own serialization. Leave blank if using built-in serialization as our library properly sets content_type. :keyword content_encoding: The character set in which this object is encoded. Use "binary" if sending in raw binary objects. Leave blank if using built-in serialization as our library properly sets content_encoding. :keyword serializer: Override the default :attr:`serializer`. :keyword exchange: Override the exchange to publish to. Note that this exchange must have been declared. def send(self, message_data, routing_key=None, delivery_mode=None, mandatory=False, immediate=False, priority=0, content_type=None, content_encoding=None, serializer=None, exchange=None): """Send a message. :param message_data: The message data to send. Can be a list, dictionary or a string. :keyword routing_key: A custom routing key for the message. If not set, the default routing key set in the :attr:`routing_key` attribute is used. :keyword mandatory: If set, the message has mandatory routing. By default the message is silently dropped by the server if it can't be routed to a queue. However - If the message is mandatory, an exception will be raised instead. :keyword immediate: Request immediate delivery. If the message cannot be routed to a queue consumer immediately, an exception will be raised. This is instead of the default behaviour, where the server will accept and queue the message, but with no guarantee that the message will ever be consumed. :keyword delivery_mode: Override the default :attr:`delivery_mode`. :keyword priority: The message priority, ``0`` to ``9``. :keyword content_type: The messages content_type. If content_type is set, no serialization occurs as it is assumed this is either a binary object, or you've done your own serialization. Leave blank if using built-in serialization as our library properly sets content_type. :keyword content_encoding: The character set in which this object is encoded. Use "binary" if sending in raw binary objects. Leave blank if using built-in serialization as our library properly sets content_encoding. :keyword serializer: Override the default :attr:`serializer`. :keyword exchange: Override the exchange to publish to. Note that this exchange must have been declared. """ headers = None routing_key = routing_key or self.routing_key if self.exchange_type == "headers": headers, routing_key = routing_key, "" exchange = exchange or self.exchange message = self.create_message(message_data, priority=priority, delivery_mode=delivery_mode, content_type=content_type, content_encoding=content_encoding, serializer=serializer) self.backend.publish(message, exchange=exchange, routing_key=routing_key, mandatory=mandatory, immediate=immediate, headers=headers)
See :meth:`Publisher.send` def send(self, message_data, delivery_mode=None): """See :meth:`Publisher.send`""" self.publisher.send(message_data, delivery_mode=delivery_mode)
Close any open channels. def close(self): """Close any open channels.""" self.consumer.close() self.publisher.close() self._closed = True
Internal method used when a message is received in consume mode. def _receive_callback(self, raw_message): """Internal method used when a message is received in consume mode.""" message = self.backend.message_to_python(raw_message) if self.auto_ack and not message.acknowledged: message.ack() try: decoded = message.decode() except Exception, exc: if not self.on_decode_error: raise self.on_decode_error(message, exc) else: self.receive(decoded, message)
Add another consumer from dictionary configuration. def add_consumer_from_dict(self, queue, **options): """Add another consumer from dictionary configuration.""" options.setdefault("routing_key", options.pop("binding_key", None)) consumer = Consumer(self.connection, queue=queue, backend=self.backend, **options) self.consumers.append(consumer) return consumer
Add another consumer from a :class:`Consumer` instance. def add_consumer(self, consumer): """Add another consumer from a :class:`Consumer` instance.""" consumer.backend = self.backend self.consumers.append(consumer)
Declare consumer so messages can be received from it using :meth:`iterconsume`. def _declare_consumer(self, consumer, nowait=False): """Declare consumer so messages can be received from it using :meth:`iterconsume`.""" if consumer.queue not in self._open_consumers: # Use the ConsumerSet's consumer by default, but if the # child consumer has a callback, honor it. callback = consumer.callbacks and \ consumer._receive_callback or self._receive_callback self.backend.declare_consumer(queue=consumer.queue, no_ack=consumer.no_ack, nowait=nowait, callback=callback, consumer_tag=consumer.consumer_tag) self._open_consumers[consumer.queue] = consumer.consumer_tag
Declare consumers. def consume(self): """Declare consumers.""" head = self.consumers[:-1] tail = self.consumers[-1] [self._declare_consumer(consumer, nowait=True) for consumer in head] self._declare_consumer(tail, nowait=False)
Cycle between all consumers in consume mode. See :meth:`Consumer.iterconsume`. def iterconsume(self, limit=None): """Cycle between all consumers in consume mode. See :meth:`Consumer.iterconsume`. """ self.consume() return self.backend.consume(limit=limit)
Cancel a running :meth:`iterconsume` session. def cancel(self): """Cancel a running :meth:`iterconsume` session.""" for consumer_tag in self._open_consumers.values(): try: self.backend.cancel(consumer_tag) except KeyError: pass self._open_consumers.clear()
Try to convert the source, an .md (markdown) file, to an .rst (reStructuredText) file at the destination. If the destination isn't provided, it defaults to be the same as the source path except for the filename extension. If the destination file already exists, it will be overwritten. In the event of an error, the destination file will be left untouched. def convert_md_to_rst(source, destination=None, backup_dir=None): """Try to convert the source, an .md (markdown) file, to an .rst (reStructuredText) file at the destination. If the destination isn't provided, it defaults to be the same as the source path except for the filename extension. If the destination file already exists, it will be overwritten. In the event of an error, the destination file will be left untouched.""" # Doing this in the function instead of the module level ensures the # error occurs when the function is called, rather than when the module # is evaluated. try: import pypandoc except ImportError: # Don't give up right away; first try to install the python module. os.system("pip install pypandoc") import pypandoc # Set our destination path to a default, if necessary destination = destination or (os.path.splitext(source)[0] + '.rst') # Likewise for the backup directory backup_dir = backup_dir or os.path.join(os.path.dirname(destination), 'bak') bak_name = (os.path.basename(destination) + time.strftime('.%Y%m%d%H%M%S.bak')) bak_path = os.path.join(backup_dir, bak_name) # If there's already a file at the destination path, move it out of the # way, but don't delete it. if os.path.isfile(destination): if not os.path.isdir(os.path.dirname(bak_path)): os.mkdir(os.path.dirname(bak_path)) os.rename(destination, bak_path) try: # Try to convert the file. pypandoc.convert( source, 'rst', format='md', outputfile=destination ) except: # If for any reason the conversion fails, try to put things back # like we found them. if os.path.isfile(destination): os.remove(destination) if os.path.isfile(bak_path): os.rename(bak_path, destination) raise
Call the conversion routine on README.md to generate README.rst. Why do all this? Because pypi requires reStructuredText, but markdown is friendlier to work with and is nicer for GitHub. def build_readme(base_path=None): """Call the conversion routine on README.md to generate README.rst. Why do all this? Because pypi requires reStructuredText, but markdown is friendlier to work with and is nicer for GitHub.""" if base_path: path = os.path.join(base_path, 'README.md') else: path = 'README.md' convert_md_to_rst(path) print("Successfully converted README.md to README.rst")
Return a situation, encoded as a bit string, which represents the observable state of the environment. Usage: situation = scenario.sense() assert isinstance(situation, BitString) Arguments: None Return: The current situation. def sense(self): """Return a situation, encoded as a bit string, which represents the observable state of the environment. Usage: situation = scenario.sense() assert isinstance(situation, BitString) Arguments: None Return: The current situation. """ self.current_situation = bitstrings.BitString([ random.randrange(2) for _ in range(self.address_size + (1 << self.address_size)) ]) return self.current_situation
Execute the indicated action within the environment and return the resulting immediate reward dictated by the reward program. Usage: immediate_reward = scenario.execute(selected_action) Arguments: action: The action to be executed within the current situation. Return: A float, the reward received for the action that was executed, or None if no reward is offered. def execute(self, action): """Execute the indicated action within the environment and return the resulting immediate reward dictated by the reward program. Usage: immediate_reward = scenario.execute(selected_action) Arguments: action: The action to be executed within the current situation. Return: A float, the reward received for the action that was executed, or None if no reward is offered. """ assert action in self.possible_actions self.remaining_cycles -= 1 index = int(bitstrings.BitString( self.current_situation[:self.address_size] )) bit = self.current_situation[self.address_size + index] return action == bit
Reset the scenario, starting it over for a new run. Usage: if not scenario.more(): scenario.reset() Arguments: None Return: None def reset(self): """Reset the scenario, starting it over for a new run. Usage: if not scenario.more(): scenario.reset() Arguments: None Return: None """ self.remaining_cycles = self.initial_training_cycles self.needle_index = random.randrange(self.input_size)
Return a situation, encoded as a bit string, which represents the observable state of the environment. Usage: situation = scenario.sense() assert isinstance(situation, BitString) Arguments: None Return: The current situation. def sense(self): """Return a situation, encoded as a bit string, which represents the observable state of the environment. Usage: situation = scenario.sense() assert isinstance(situation, BitString) Arguments: None Return: The current situation. """ haystack = bitstrings.BitString.random(self.input_size) self.needle_value = haystack[self.needle_index] return haystack
Execute the indicated action within the environment and return the resulting immediate reward dictated by the reward program. Usage: immediate_reward = scenario.execute(selected_action) Arguments: action: The action to be executed within the current situation. Return: A float, the reward received for the action that was executed, or None if no reward is offered. def execute(self, action): """Execute the indicated action within the environment and return the resulting immediate reward dictated by the reward program. Usage: immediate_reward = scenario.execute(selected_action) Arguments: action: The action to be executed within the current situation. Return: A float, the reward received for the action that was executed, or None if no reward is offered. """ assert action in self.possible_actions self.remaining_cycles -= 1 return action == self.needle_value
Return a sequence containing the possible actions that can be executed within the environment. Usage: possible_actions = scenario.get_possible_actions() Arguments: None Return: A sequence containing the possible actions which can be executed within this scenario. def get_possible_actions(self): """Return a sequence containing the possible actions that can be executed within the environment. Usage: possible_actions = scenario.get_possible_actions() Arguments: None Return: A sequence containing the possible actions which can be executed within this scenario. """ possible_actions = self.wrapped.get_possible_actions() if len(possible_actions) <= 20: # Try to ensure that the possible actions are unique. Also, put # them into a list so we can iterate over them safely before # returning them; this avoids accidentally exhausting an # iterator, if the wrapped class happens to return one. try: possible_actions = list(set(possible_actions)) except TypeError: possible_actions = list(possible_actions) try: possible_actions.sort() except TypeError: pass self.logger.info('Possible actions:') for action in possible_actions: self.logger.info(' %s', action) else: self.logger.info("%d possible actions.", len(possible_actions)) return possible_actions
Return a situation, encoded as a bit string, which represents the observable state of the environment. Usage: situation = scenario.sense() assert isinstance(situation, BitString) Arguments: None Return: The current situation. def sense(self): """Return a situation, encoded as a bit string, which represents the observable state of the environment. Usage: situation = scenario.sense() assert isinstance(situation, BitString) Arguments: None Return: The current situation. """ situation = self.wrapped.sense() self.logger.debug('Situation: %s', situation) return situation
Execute the indicated action within the environment and return the resulting immediate reward dictated by the reward program. Usage: immediate_reward = scenario.execute(selected_action) Arguments: action: The action to be executed within the current situation. Return: A float, the reward received for the action that was executed, or None if no reward is offered. def execute(self, action): """Execute the indicated action within the environment and return the resulting immediate reward dictated by the reward program. Usage: immediate_reward = scenario.execute(selected_action) Arguments: action: The action to be executed within the current situation. Return: A float, the reward received for the action that was executed, or None if no reward is offered. """ self.logger.debug('Executing action: %s', action) reward = self.wrapped.execute(action) if reward: self.total_reward += reward self.steps += 1 self.logger.debug('Reward received on this step: %.5f', reward or 0) self.logger.debug('Average reward per step: %.5f', self.total_reward / self.steps) return reward
Return a Boolean indicating whether additional actions may be executed, per the reward program. Usage: while scenario.more(): situation = scenario.sense() selected_action = choice(possible_actions) reward = scenario.execute(selected_action) Arguments: None Return: A bool indicating whether additional situations remain in the current run. def more(self): """Return a Boolean indicating whether additional actions may be executed, per the reward program. Usage: while scenario.more(): situation = scenario.sense() selected_action = choice(possible_actions) reward = scenario.execute(selected_action) Arguments: None Return: A bool indicating whether additional situations remain in the current run. """ more = self.wrapped.more() if not self.steps % 100: self.logger.info('Steps completed: %d', self.steps) self.logger.info('Average reward per step: %.5f', self.total_reward / (self.steps or 1)) if not more: self.logger.info('Run completed.') self.logger.info('Total steps: %d', self.steps) self.logger.info('Total reward received: %.5f', self.total_reward) self.logger.info('Average reward per step: %.5f', self.total_reward / (self.steps or 1)) return more
Execute the indicated action within the environment and return the resulting immediate reward dictated by the reward program. Usage: immediate_reward = scenario.execute(selected_action) Arguments: action: The action to be executed within the current situation. Return: A float, the reward received for the action that was executed, or None if no reward is offered. def execute(self, action): """Execute the indicated action within the environment and return the resulting immediate reward dictated by the reward program. Usage: immediate_reward = scenario.execute(selected_action) Arguments: action: The action to be executed within the current situation. Return: A float, the reward received for the action that was executed, or None if no reward is offered. """ reward = self.reward_function( action, self.classifications[self.steps] ) self.total_reward += reward self.steps += 1 return reward
Return the classifications made by the algorithm for this scenario. Usage: model.run(scenario, learn=False) classifications = scenario.get_classifications() Arguments: None Return: An indexable sequence containing the classifications made by the model for each situation, in the same order as the original situations themselves appear. def get_classifications(self): """Return the classifications made by the algorithm for this scenario. Usage: model.run(scenario, learn=False) classifications = scenario.get_classifications() Arguments: None Return: An indexable sequence containing the classifications made by the model for each situation, in the same order as the original situations themselves appear. """ if bitstrings.using_numpy(): return numpy.array(self.classifications) else: return self.classifications
Create and return a new classifier set initialized for handling the given scenario. Usage: scenario = MUXProblem() model = algorithm.new_model(scenario) model.run(scenario, learn=True) Arguments: scenario: A Scenario instance. Return: A new, untrained classifier set, suited for the given scenario. def new_model(self, scenario): """Create and return a new classifier set initialized for handling the given scenario. Usage: scenario = MUXProblem() model = algorithm.new_model(scenario) model.run(scenario, learn=True) Arguments: scenario: A Scenario instance. Return: A new, untrained classifier set, suited for the given scenario. """ assert isinstance(scenario, scenarios.Scenario) return ClassifierSet(self, scenario.get_possible_actions())
Run the algorithm, utilizing a classifier set to choose the most appropriate action for each situation produced by the scenario. Improve the situation/action mapping on each reward cycle to maximize reward. Return the classifier set that was created. Usage: scenario = MUXProblem() model = algorithm.run(scenario) Arguments: scenario: A Scenario instance. Return: A new classifier set, trained on the given scenario. def run(self, scenario): """Run the algorithm, utilizing a classifier set to choose the most appropriate action for each situation produced by the scenario. Improve the situation/action mapping on each reward cycle to maximize reward. Return the classifier set that was created. Usage: scenario = MUXProblem() model = algorithm.run(scenario) Arguments: scenario: A Scenario instance. Return: A new classifier set, trained on the given scenario. """ assert isinstance(scenario, scenarios.Scenario) model = self.new_model(scenario) model.run(scenario, learn=True) return model
Compute the combined prediction and prediction weight for this action set. The combined prediction is the weighted average of the individual predictions of the classifiers. The combined prediction weight is the sum of the individual prediction weights of the classifiers. Usage: Do not call this method directly. Use the prediction and/or prediction_weight properties instead. Arguments: None Return: None def _compute_prediction(self): """Compute the combined prediction and prediction weight for this action set. The combined prediction is the weighted average of the individual predictions of the classifiers. The combined prediction weight is the sum of the individual prediction weights of the classifiers. Usage: Do not call this method directly. Use the prediction and/or prediction_weight properties instead. Arguments: None Return: None """ total_weight = 0 total_prediction = 0 for rule in self._rules.values(): total_weight += rule.prediction_weight total_prediction += (rule.prediction * rule.prediction_weight) self._prediction = total_prediction / (total_weight or 1) self._prediction_weight = total_weight
The highest value from among the predictions made by the action sets in this match set. def best_prediction(self): """The highest value from among the predictions made by the action sets in this match set.""" if self._best_prediction is None and self._action_sets: self._best_prediction = max( action_set.prediction for action_set in self._action_sets.values() ) return self._best_prediction
A tuple containing the actions whose action sets have the best prediction. def best_actions(self): """A tuple containing the actions whose action sets have the best prediction.""" if self._best_actions is None: best_prediction = self.best_prediction self._best_actions = tuple( action for action, action_set in self._action_sets.items() if action_set.prediction == best_prediction ) return self._best_actions