text stringlengths 81 112k |
|---|
Finds square directly in front of Pawn
:type: location: Location
:rtype: Location
def square_in_front(self, location=None):
"""
Finds square directly in front of Pawn
:type: location: Location
:rtype: Location
"""
location = location or self.location
return location.shift_up() if self.color == color.white else location.shift_down() |
Finds possible moves one step and two steps in front
of Pawn.
:type: position: Board
:rtype: list
def forward_moves(self, position):
"""
Finds possible moves one step and two steps in front
of Pawn.
:type: position: Board
:rtype: list
"""
if position.is_square_empty(self.square_in_front(self.location)):
"""
If square in front is empty add the move
"""
if self.would_move_be_promotion():
for move in self.create_promotion_moves(notation_const.PROMOTE):
yield move
else:
yield self.create_move(end_loc=self.square_in_front(self.location),
status=notation_const.MOVEMENT)
if self.on_home_row() and \
position.is_square_empty(self.two_squares_in_front(self.location)):
"""
If pawn is on home row and two squares in front of the pawn is empty
add the move
"""
yield self.create_move(
end_loc=self.square_in_front(self.square_in_front(self.location)),
status=notation_const.MOVEMENT
) |
Adds specified diagonal as a capture move if it is one
def _one_diagonal_capture_square(self, capture_square, position):
"""
Adds specified diagonal as a capture move if it is one
"""
if self.contains_opposite_color_piece(capture_square, position):
if self.would_move_be_promotion():
for move in self.create_promotion_moves(status=notation_const.CAPTURE_AND_PROMOTE,
location=capture_square):
yield move
else:
yield self.create_move(end_loc=capture_square,
status=notation_const.CAPTURE) |
Finds out all possible capture moves
:rtype: list
def capture_moves(self, position):
"""
Finds out all possible capture moves
:rtype: list
"""
try:
right_diagonal = self.square_in_front(self.location.shift_right())
for move in self._one_diagonal_capture_square(right_diagonal, position):
yield move
except IndexError:
pass
try:
left_diagonal = self.square_in_front(self.location.shift_left())
for move in self._one_diagonal_capture_square(left_diagonal, position):
yield move
except IndexError:
pass |
Finds out if pawn is on enemy center rank.
:rtype: bool
def on_en_passant_valid_location(self):
"""
Finds out if pawn is on enemy center rank.
:rtype: bool
"""
return (self.color == color.white and self.location.rank == 4) or \
(self.color == color.black and self.location.rank == 3) |
Finds if their opponent's pawn is next to this pawn
:rtype: bool
def _is_en_passant_valid(self, opponent_pawn_location, position):
"""
Finds if their opponent's pawn is next to this pawn
:rtype: bool
"""
try:
pawn = position.piece_at_square(opponent_pawn_location)
return pawn is not None and \
isinstance(pawn, Pawn) and \
pawn.color != self.color and \
position.piece_at_square(opponent_pawn_location).just_moved_two_steps
except IndexError:
return False |
Yields en_passant moves in given direction if it is legal.
:type: direction: function
:type: position: Board
:rtype: gen
def add_one_en_passant_move(self, direction, position):
"""
Yields en_passant moves in given direction if it is legal.
:type: direction: function
:type: position: Board
:rtype: gen
"""
try:
if self._is_en_passant_valid(direction(self.location), position):
yield self.create_move(
end_loc=self.square_in_front(direction(self.location)),
status=notation_const.EN_PASSANT
)
except IndexError:
pass |
Finds possible en passant moves.
:rtype: list
def en_passant_moves(self, position):
"""
Finds possible en passant moves.
:rtype: list
"""
# if pawn is not on a valid en passant get_location then return None
if self.on_en_passant_valid_location():
for move in itertools.chain(self.add_one_en_passant_move(lambda x: x.shift_right(), position),
self.add_one_en_passant_move(lambda x: x.shift_left(), position)):
yield move |
Finds out the locations of possible moves given board.Board position.
:pre get_location is on board and piece at specified get_location on position
:type: position: Board
:rtype: list
def possible_moves(self, position):
"""
Finds out the locations of possible moves given board.Board position.
:pre get_location is on board and piece at specified get_location on position
:type: position: Board
:rtype: list
"""
for move in itertools.chain(self.forward_moves(position),
self.capture_moves(position),
self.en_passant_moves(position)):
yield move |
Main method
def main():
"""
Main method
"""
print("Creating a new game...")
new_game = Game(Human(color.white), Human(color.black))
result = new_game.play()
print("Result is ", result) |
Build a dict containing a valid response to an Alexa request.
If speech output is desired, either of `text` or `ssml` should
be specified.
:param text: Plain text speech output to be said by Alexa device.
:param ssml: Speech output in SSML form.
:param attributes: Dictionary of attributes to store in the session.
:param end_session: Should the session be terminated after this response?
:param reprompt_text, reprompt_ssml: Works the same as
`text`/`ssml`, but instead sets the reprompting speech output.
def respond(text=None, ssml=None, attributes=None, reprompt_text=None,
reprompt_ssml=None, end_session=True):
""" Build a dict containing a valid response to an Alexa request.
If speech output is desired, either of `text` or `ssml` should
be specified.
:param text: Plain text speech output to be said by Alexa device.
:param ssml: Speech output in SSML form.
:param attributes: Dictionary of attributes to store in the session.
:param end_session: Should the session be terminated after this response?
:param reprompt_text, reprompt_ssml: Works the same as
`text`/`ssml`, but instead sets the reprompting speech output.
"""
obj = {
'version': '1.0',
'response': {
'outputSpeech': {'type': 'PlainText', 'text': ''},
'shouldEndSession': end_session
},
'sessionAttributes': attributes or {}
}
if text:
obj['response']['outputSpeech'] = {'type': 'PlainText', 'text': text}
elif ssml:
obj['response']['outputSpeech'] = {'type': 'SSML', 'ssml': ssml}
reprompt_output = None
if reprompt_text:
reprompt_output = {'type': 'PlainText', 'text': reprompt_text}
elif reprompt_ssml:
reprompt_output = {'type': 'SSML', 'ssml': reprompt_ssml}
if reprompt_output:
obj['response']['reprompt'] = {'outputSpeech': reprompt_output}
return obj |
Convenience method to save a little bit of typing for the common case of
reprompting the user. Simply calls :py:func:`alexandra.util.respond` with
the given arguments and holds the session open.
One of either the `text` or `ssml` should be provided if any
speech output is desired.
:param text: Plain text speech output
:param ssml: Speech output in SSML format
:param attributes: Dictionary of attributes to store in the current session
def reprompt(text=None, ssml=None, attributes=None):
"""Convenience method to save a little bit of typing for the common case of
reprompting the user. Simply calls :py:func:`alexandra.util.respond` with
the given arguments and holds the session open.
One of either the `text` or `ssml` should be provided if any
speech output is desired.
:param text: Plain text speech output
:param ssml: Speech output in SSML format
:param attributes: Dictionary of attributes to store in the current session
"""
return respond(
reprompt_text=text,
reprompt_ssml=ssml,
attributes=attributes,
end_session=False
) |
Ensure the request's timestamp doesn't fall outside of the
app's specified tolerance.
Returns True if this request is valid, False otherwise.
:param req_body: JSON object parsed out of the raw POST data of a request.
:param max_diff: Maximum allowable difference in seconds between request
timestamp and system clock. Amazon requires <= 150 seconds for
published skills.
def validate_request_timestamp(req_body, max_diff=150):
"""Ensure the request's timestamp doesn't fall outside of the
app's specified tolerance.
Returns True if this request is valid, False otherwise.
:param req_body: JSON object parsed out of the raw POST data of a request.
:param max_diff: Maximum allowable difference in seconds between request
timestamp and system clock. Amazon requires <= 150 seconds for
published skills.
"""
time_str = req_body.get('request', {}).get('timestamp')
if not time_str:
log.error('timestamp not present %s', req_body)
return False
req_ts = datetime.strptime(time_str, "%Y-%m-%dT%H:%M:%SZ")
diff = (datetime.utcnow() - req_ts).total_seconds()
if abs(diff) > max_diff:
log.error('timestamp difference too high: %d sec', diff)
return False
return True |
Ensure that the certificate and signature specified in the
request headers are truely from Amazon and correctly verify.
Returns True if certificate verification succeeds, False otherwise.
:param headers: Dictionary (or sufficiently dictionary-like) map of request
headers.
:param data: Raw POST data attached to this request.
def validate_request_certificate(headers, data):
"""Ensure that the certificate and signature specified in the
request headers are truely from Amazon and correctly verify.
Returns True if certificate verification succeeds, False otherwise.
:param headers: Dictionary (or sufficiently dictionary-like) map of request
headers.
:param data: Raw POST data attached to this request.
"""
# Make sure we have the appropriate headers.
if 'SignatureCertChainUrl' not in headers or \
'Signature' not in headers:
log.error('invalid request headers')
return False
cert_url = headers['SignatureCertChainUrl']
sig = base64.b64decode(headers['Signature'])
cert = _get_certificate(cert_url)
if not cert:
return False
try:
# ... wtf kind of API decision is this
crypto.verify(cert, sig, data, 'sha1')
return True
except:
log.error('invalid request signature')
return False |
Download and validate a specified Amazon PEM file.
def _get_certificate(cert_url):
"""Download and validate a specified Amazon PEM file."""
global _cache
if cert_url in _cache:
cert = _cache[cert_url]
if cert.has_expired():
_cache = {}
else:
return cert
url = urlparse(cert_url)
host = url.netloc.lower()
path = posixpath.normpath(url.path)
# Sanity check location so we don't get some random person's cert.
if url.scheme != 'https' or \
host not in ['s3.amazonaws.com', 's3.amazonaws.com:443'] or \
not path.startswith('/echo.api/'):
log.error('invalid cert location %s', cert_url)
return
resp = urlopen(cert_url)
if resp.getcode() != 200:
log.error('failed to download certificate')
return
cert = crypto.load_certificate(crypto.FILETYPE_PEM, resp.read())
if cert.has_expired() or cert.get_subject().CN != 'echo-api.amazon.com':
log.error('certificate expired or invalid')
return
_cache[cert_url] = cert
return cert |
Check if version is already applied in the database.
:param db_versions:
def is_processed(self, db_versions):
"""Check if version is already applied in the database.
:param db_versions:
"""
return self.number in (v.number for v in db_versions if v.date_done) |
Check if version is a no operation version.
def is_noop(self):
"""Check if version is a no operation version.
"""
has_operations = [mode.pre_operations or mode.post_operations
for mode in self._version_modes.values()]
has_upgrade_addons = [mode.upgrade_addons or mode.remove_addons
for mode in self._version_modes.values()]
noop = not any((has_upgrade_addons, has_operations))
return noop |
Return a VersionMode for a mode name.
When the mode is None, we are working with the 'base' mode.
def _get_version_mode(self, mode=None):
"""Return a VersionMode for a mode name.
When the mode is None, we are working with the 'base' mode.
"""
version_mode = self._version_modes.get(mode)
if not version_mode:
version_mode = self._version_modes[mode] = VersionMode(name=mode)
return version_mode |
Add an operation to the version
:param mode: Name of the mode in which the operation is executed
:type mode: str
:param operation_type: one of 'pre', 'post'
:type operation_type: str
:param operation: the operation to add
:type operation: :class:`marabunta.model.Operation`
def add_operation(self, operation_type, operation, mode=None):
"""Add an operation to the version
:param mode: Name of the mode in which the operation is executed
:type mode: str
:param operation_type: one of 'pre', 'post'
:type operation_type: str
:param operation: the operation to add
:type operation: :class:`marabunta.model.Operation`
"""
version_mode = self._get_version_mode(mode=mode)
if operation_type == 'pre':
version_mode.add_pre(operation)
elif operation_type == 'post':
version_mode.add_post(operation)
else:
raise ConfigurationError(
u"Type of operation must be 'pre' or 'post', got %s" %
(operation_type,)
) |
Add a backup operation to the version.
:param backup: To either add or skip the backup
:type backup: Boolean
:param mode: Name of the mode in which the operation is executed
For now, backups are mode-independent
:type mode: String
def add_backup_operation(self, backup, mode=None):
"""Add a backup operation to the version.
:param backup: To either add or skip the backup
:type backup: Boolean
:param mode: Name of the mode in which the operation is executed
For now, backups are mode-independent
:type mode: String
"""
try:
if self.options.backup:
self.options.backup.ignore_if_operation().execute()
except OperationError:
self.backup = backup |
Return pre-operations only for the mode asked
def pre_operations(self, mode=None):
""" Return pre-operations only for the mode asked """
version_mode = self._get_version_mode(mode=mode)
return version_mode.pre_operations |
Return post-operations only for the mode asked
def post_operations(self, mode=None):
""" Return post-operations only for the mode asked """
version_mode = self._get_version_mode(mode=mode)
return version_mode.post_operations |
Return merged set of main addons and mode's addons
def upgrade_addons_operation(self, addons_state, mode=None):
""" Return merged set of main addons and mode's addons """
installed = set(a.name for a in addons_state
if a.state in ('installed', 'to upgrade'))
base_mode = self._get_version_mode()
addons_list = base_mode.upgrade_addons.copy()
if mode:
add_mode = self._get_version_mode(mode=mode)
addons_list |= add_mode.upgrade_addons
to_install = addons_list - installed
to_upgrade = installed & addons_list
return UpgradeAddonsOperation(self.options, to_install, to_upgrade) |
get copy of object
:return: ReactionContainer
def copy(self):
"""
get copy of object
:return: ReactionContainer
"""
return type(self)(reagents=[x.copy() for x in self.__reagents], meta=self.__meta.copy(),
products=[x.copy() for x in self.__products],
reactants=[x.copy() for x in self.__reactants]) |
remove explicit hydrogens if possible
:return: number of removed hydrogens
def implicify_hydrogens(self):
"""
remove explicit hydrogens if possible
:return: number of removed hydrogens
"""
total = 0
for ml in (self.__reagents, self.__reactants, self.__products):
for m in ml:
if hasattr(m, 'implicify_hydrogens'):
total += m.implicify_hydrogens()
if total:
self.flush_cache()
return total |
set or reset hyb and neighbors marks to atoms.
def reset_query_marks(self):
"""
set or reset hyb and neighbors marks to atoms.
"""
for ml in (self.__reagents, self.__reactants, self.__products):
for m in ml:
if hasattr(m, 'reset_query_marks'):
m.reset_query_marks()
self.flush_cache() |
get CGR of reaction
reagents will be presented as unchanged molecules
:return: CGRContainer
def compose(self):
"""
get CGR of reaction
reagents will be presented as unchanged molecules
:return: CGRContainer
"""
rr = self.__reagents + self.__reactants
if rr:
if not all(isinstance(x, (MoleculeContainer, CGRContainer)) for x in rr):
raise TypeError('Queries not composable')
r = reduce(or_, rr)
else:
r = MoleculeContainer()
if self.__products:
if not all(isinstance(x, (MoleculeContainer, CGRContainer)) for x in self.__products):
raise TypeError('Queries not composable')
p = reduce(or_, self.__products)
else:
p = MoleculeContainer()
return r ^ p |
recalculate 2d coordinates. currently rings can be calculated badly.
:param force: ignore existing coordinates of atoms
def calculate2d(self, force=True):
"""
recalculate 2d coordinates. currently rings can be calculated badly.
:param force: ignore existing coordinates of atoms
"""
for ml in (self.__reagents, self.__reactants, self.__products):
for m in ml:
m.calculate2d(force)
self.fix_positions() |
fix coordinates of molecules in reaction
def fix_positions(self):
"""
fix coordinates of molecules in reaction
"""
shift_x = 0
for m in self.__reactants:
max_x = self.__fix_positions(m, shift_x, 0)
shift_x = max_x + 1
arrow_min = shift_x
if self.__reagents:
for m in self.__reagents:
max_x = self.__fix_positions(m, shift_x, 1.5)
shift_x = max_x + 1
else:
shift_x += 3
arrow_max = shift_x - 1
for m in self.__products:
max_x = self.__fix_positions(m, shift_x, 0)
shift_x = max_x + 1
self._arrow = (arrow_min, arrow_max)
self.flush_cache() |
:param unit_key: Parent unit key
:return: role keys of subunits
def get_role_keys(cls, unit_key):
"""
:param unit_key: Parent unit key
:return: role keys of subunits
"""
stack = Role.objects.filter(unit_id=unit_key).values_list('key', flatten=True)
for unit_key in cls.objects.filter(parent_id=unit_key).values_list('key', flatten=True):
stack.extend(cls.get_role_keys(unit_key))
return stack |
Permissions of the user.
Returns:
List of Permission objects.
def get_permissions(self):
"""
Permissions of the user.
Returns:
List of Permission objects.
"""
user_role = self.last_login_role() if self.last_login_role_key else self.role_set[0].role
return user_role.get_permissions() |
Soyut role ait Permission nesnelerini bulur ve code değerlerini
döner.
Returns:
list: Permission code değerleri
def get_permissions(self):
"""
Soyut role ait Permission nesnelerini bulur ve code değerlerini
döner.
Returns:
list: Permission code değerleri
"""
return [p.permission.code for p in self.Permissions if p.permission.code] |
Soyut Role Permission nesnesi tanımlamayı sağlar.
Args:
perm (object):
def add_permission(self, perm):
"""
Soyut Role Permission nesnesi tanımlamayı sağlar.
Args:
perm (object):
"""
self.Permissions(permission=perm)
PermissionCache.flush()
self.save() |
Adds a permission with given name.
Args:
code (str): Code name of the permission.
save (bool): If False, does nothing.
def add_permission_by_name(self, code, save=False):
"""
Adds a permission with given name.
Args:
code (str): Code name of the permission.
save (bool): If False, does nothing.
"""
if not save:
return ["%s | %s" % (p.name, p.code) for p in
Permission.objects.filter(code__contains=code)]
for p in Permission.objects.filter(code__contains=code):
if p not in self.Permissions:
self.Permissions(permission=p)
if p:
self.save() |
sends a message to user of this role's private mq exchange
def send_notification(self, title, message, typ=1, url=None, sender=None):
"""
sends a message to user of this role's private mq exchange
"""
self.user.send_notification(title=title, message=message, typ=typ, url=url,
sender=sender) |
Finds if move from current location would be a promotion
def would_move_be_promotion(self):
"""
Finds if move from current location would be a promotion
"""
return (self._end_loc.rank == 0 and not self.color) or \
(self._end_loc.rank == 7 and self.color) |
Connect receiver to sender for signal.
Arguments:
receiver
A function or an instance method which is to receive signals.
Receivers must be hashable objects.
If weak is True, then receiver must be weak referenceable.
Receivers must be able to accept keyword arguments.
If a receiver is connected with a dispatch_uid argument, it
will not be added if another receiver was already connected
with that dispatch_uid.
sender
The sender to which the receiver should respond. Must either be
of type Signal, or None to receive events from any sender.
weak
Whether to use weak references to the receiver. By default, the
module will attempt to use weak references to the receiver
objects. If this parameter is false, then strong references will
be used.
dispatch_uid
An identifier used to uniquely identify a particular instance of
a receiver. This will usually be a string, though it may be
anything hashable.
def connect(self, receiver, sender=None, weak=True, dispatch_uid=None):
"""
Connect receiver to sender for signal.
Arguments:
receiver
A function or an instance method which is to receive signals.
Receivers must be hashable objects.
If weak is True, then receiver must be weak referenceable.
Receivers must be able to accept keyword arguments.
If a receiver is connected with a dispatch_uid argument, it
will not be added if another receiver was already connected
with that dispatch_uid.
sender
The sender to which the receiver should respond. Must either be
of type Signal, or None to receive events from any sender.
weak
Whether to use weak references to the receiver. By default, the
module will attempt to use weak references to the receiver
objects. If this parameter is false, then strong references will
be used.
dispatch_uid
An identifier used to uniquely identify a particular instance of
a receiver. This will usually be a string, though it may be
anything hashable.
"""
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
if weak:
ref = weakref.ref
receiver_object = receiver
# Check for bound methods
if hasattr(receiver, '__self__') and hasattr(receiver, '__func__'):
ref = WeakMethod
receiver_object = receiver.__self__
if six.PY3:
receiver = ref(receiver)
weakref.finalize(receiver_object, self._remove_receiver)
else:
receiver = ref(receiver, self._remove_receiver)
with self.lock:
self._clear_dead_receivers()
for r_key, _ in self.receivers:
if r_key == lookup_key:
break
else:
self.receivers.append((lookup_key, receiver))
self.sender_receivers_cache.clear() |
Disconnect receiver from sender for signal.
If weak references are used, disconnect need not be called. The receiver
will be remove from dispatch automatically.
Arguments:
receiver
The registered receiver to disconnect. May be none if
dispatch_uid is specified.
sender
The registered sender to disconnect
dispatch_uid
the unique identifier of the receiver to disconnect
def disconnect(self, receiver=None, sender=None, dispatch_uid=None):
"""
Disconnect receiver from sender for signal.
If weak references are used, disconnect need not be called. The receiver
will be remove from dispatch automatically.
Arguments:
receiver
The registered receiver to disconnect. May be none if
dispatch_uid is specified.
sender
The registered sender to disconnect
dispatch_uid
the unique identifier of the receiver to disconnect
"""
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
disconnected = False
with self.lock:
self._clear_dead_receivers()
for index in range(len(self.receivers)):
(r_key, _) = self.receivers[index]
if r_key == lookup_key:
disconnected = True
del self.receivers[index]
break
self.sender_receivers_cache.clear()
return disconnected |
Perform a migration according to config.
:param config: The configuration to be applied
:type config: Config
def migrate(config):
"""Perform a migration according to config.
:param config: The configuration to be applied
:type config: Config
"""
webapp = WebApp(config.web_host, config.web_port,
custom_maintenance_file=config.web_custom_html)
webserver = WebServer(webapp)
webserver.daemon = True
webserver.start()
migration_parser = YamlParser.parse_from_file(config.migration_file)
migration = migration_parser.parse()
database = Database(config)
with database.connect() as lock_connection:
application_lock = ApplicationLock(lock_connection)
application_lock.start()
while not application_lock.acquired:
time.sleep(0.5)
else:
if application_lock.replica:
# when a replica could finally acquire a lock, it
# means that the concurrent process has finished the
# migration or that it failed to run it.
# In both cases after the lock is released, this process will
# verify if it has still to do something (if the other process
# failed mainly).
application_lock.stop = True
application_lock.join()
# we are not in the replica or the lock is released: go on for the
# migration
try:
table = MigrationTable(database)
runner = Runner(config, migration, database, table)
runner.perform()
finally:
application_lock.stop = True
application_lock.join() |
Parse the command line and run :func:`migrate`.
def main():
"""Parse the command line and run :func:`migrate`."""
parser = get_args_parser()
args = parser.parse_args()
config = Config.from_parse_args(args)
migrate(config) |
Generates permissions for all CrudView based class methods.
Returns:
List of Permission objects.
def get_permissions(cls):
"""
Generates permissions for all CrudView based class methods.
Returns:
List of Permission objects.
"""
perms = []
for kls_name, kls in cls.registry.items():
for method_name in cls.__dict__.keys():
if method_name.endswith('_view'):
perms.append("%s.%s" % (kls_name, method_name))
return perms |
we need to create basic permissions
for only CRUD enabled models
def _get_object_menu_models():
"""
we need to create basic permissions
for only CRUD enabled models
"""
from pyoko.conf import settings
enabled_models = []
for entry in settings.OBJECT_MENU.values():
for mdl in entry:
if 'wf' not in mdl:
enabled_models.append(mdl['name'])
return enabled_models |
create a custom permission
def add(cls, code_name, name='', description=''):
"""
create a custom permission
"""
if code_name not in cls.registry:
cls.registry[code_name] = (code_name, name or code_name, description)
return code_name |
get self to other mapping
def get_mapping(self, other):
"""
get self to other mapping
"""
m = next(self._matcher(other).isomorphisms_iter(), None)
if m:
return {v: k for k, v in m.items()} |
get self to other substructure mapping
:param limit: number of matches. if 0 return iterator for all possible; if 1 return dict or None;
if > 1 return list of dicts
def get_substructure_mapping(self, other, limit=1):
"""
get self to other substructure mapping
:param limit: number of matches. if 0 return iterator for all possible; if 1 return dict or None;
if > 1 return list of dicts
"""
i = self._matcher(other).subgraph_isomorphisms_iter()
if limit == 1:
m = next(i, None)
if m:
return {v: k for k, v in m.items()}
return
elif limit == 0:
return ({v: k for k, v in m.items()} for m in i)
return [{v: k for k, v in m.items()} for m in islice(i, limit)] |
Creates a location from a two character string consisting of
the file then rank written in algebraic notation.
Examples: e4, b5, a7
:type: alg_str: str
:rtype: Location
def from_string(cls, alg_str):
"""
Creates a location from a two character string consisting of
the file then rank written in algebraic notation.
Examples: e4, b5, a7
:type: alg_str: str
:rtype: Location
"""
try:
return cls(int(alg_str[1]) - 1, ord(alg_str[0]) - 97)
except ValueError as e:
raise ValueError("Location.from_string {} invalid: {}".format(alg_str, e)) |
Shifts in direction provided by ``Direction`` enum.
:type: direction: Direction
:rtype: Location
def shift(self, direction):
"""
Shifts in direction provided by ``Direction`` enum.
:type: direction: Direction
:rtype: Location
"""
try:
if direction == Direction.UP:
return self.shift_up()
elif direction == Direction.DOWN:
return self.shift_down()
elif direction == Direction.RIGHT:
return self.shift_right()
elif direction == Direction.LEFT:
return self.shift_left()
else:
raise IndexError("Invalid direction {}".format(direction))
except IndexError as e:
raise IndexError(e) |
Finds Location shifted up by 1
:rtype: Location
def shift_up(self, times=1):
"""
Finds Location shifted up by 1
:rtype: Location
"""
try:
return Location(self._rank + times, self._file)
except IndexError as e:
raise IndexError(e) |
Finds Location shifted down by 1
:rtype: Location
def shift_down(self, times=1):
"""
Finds Location shifted down by 1
:rtype: Location
"""
try:
return Location(self._rank - times, self._file)
except IndexError as e:
raise IndexError(e) |
Finds Location shifted right by 1
:rtype: Location
def shift_right(self, times=1):
"""
Finds Location shifted right by 1
:rtype: Location
"""
try:
return Location(self._rank, self._file + times)
except IndexError as e:
raise IndexError(e) |
Finds Location shifted left by 1
:rtype: Location
def shift_left(self, times=1):
"""
Finds Location shifted left by 1
:rtype: Location
"""
try:
return Location(self._rank, self._file - times)
except IndexError as e:
raise IndexError(e) |
Finds Location shifted up right by 1
:rtype: Location
def shift_up_right(self, times=1):
"""
Finds Location shifted up right by 1
:rtype: Location
"""
try:
return Location(self._rank + times, self._file + times)
except IndexError as e:
raise IndexError(e) |
Finds Location shifted up left by 1
:rtype: Location
def shift_up_left(self, times=1):
"""
Finds Location shifted up left by 1
:rtype: Location
"""
try:
return Location(self._rank + times, self._file - times)
except IndexError as e:
raise IndexError(e) |
Finds Location shifted down right by 1
:rtype: Location
def shift_down_right(self, times=1):
"""
Finds Location shifted down right by 1
:rtype: Location
"""
try:
return Location(self._rank - times, self._file + times)
except IndexError as e:
raise IndexError(e) |
Finds Location shifted down left by 1
:rtype: Location
def shift_down_left(self, times=1):
"""
Finds Location shifted down left by 1
:rtype: Location
"""
try:
return Location(self._rank - times, self._file - times)
except IndexError as e:
raise IndexError(e) |
standardize functional groups
:return: number of found groups
def standardize(self):
"""
standardize functional groups
:return: number of found groups
"""
self.reset_query_marks()
seen = set()
total = 0
for n, atom in self.atoms():
if n in seen:
continue
for k, center in central.items():
if center != atom:
continue
shell = tuple((bond, self._node[m]) for m, bond in self._adj[n].items())
for shell_query, shell_patch, atom_patch in query_patch[k]:
if shell_query != shell:
continue
total += 1
for attr_name, attr_value in atom_patch.items():
setattr(atom, attr_name, attr_value)
for (bond_patch, atom_patch), (bond, atom) in zip(shell_patch, shell):
bond.update(bond_patch)
for attr_name, attr_value in atom_patch.items():
setattr(atom, attr_name, attr_value)
seen.add(n)
seen.update(self._adj[n])
break
else:
continue
break
if total:
self.flush_cache()
return total |
Get all files staged for the current commit.
def get_staged_files():
"""Get all files staged for the current commit.
"""
proc = subprocess.Popen(('git', 'status', '--porcelain'),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, _ = proc.communicate()
staged_files = modified_re.findall(out)
return staged_files |
Run Tornado server
def runserver(host=None, port=None):
"""
Run Tornado server
"""
host = host or os.getenv('HTTP_HOST', '0.0.0.0')
port = port or os.getenv('HTTP_PORT', '9001')
zioloop = ioloop.IOLoop.instance()
# setup pika client:
pc = QueueManager(zioloop)
app.pc = pc
pc.connect()
app.listen(port, host)
zioloop.start() |
Called on new websocket connection.
def open(self):
"""
Called on new websocket connection.
"""
sess_id = self._get_sess_id()
if sess_id:
self.application.pc.websockets[self._get_sess_id()] = self
self.write_message(json.dumps({"cmd": "status", "status": "open"}))
else:
self.write_message(json.dumps({"cmd": "error", "error": "Please login", "code": 401})) |
called on new websocket message,
def on_message(self, message):
"""
called on new websocket message,
"""
log.debug("WS MSG for %s: %s" % (self._get_sess_id(), message))
self.application.pc.redirect_incoming_message(self._get_sess_id(), message, self.request) |
Do response processing
def _handle_headers(self):
"""
Do response processing
"""
origin = self.request.headers.get('Origin')
if not settings.DEBUG:
if origin in settings.ALLOWED_ORIGINS or not origin:
self.set_header('Access-Control-Allow-Origin', origin)
else:
log.debug("CORS ERROR: %s not allowed, allowed hosts: %s" % (origin,
settings.ALLOWED_ORIGINS))
raise HTTPError(403, "Origin not in ALLOWED_ORIGINS: %s" % origin)
else:
self.set_header('Access-Control-Allow-Origin', origin or '*')
self.set_header('Access-Control-Allow-Credentials', "true")
self.set_header('Access-Control-Allow-Headers', 'Content-Type')
self.set_header('Access-Control-Allow-Methods', 'OPTIONS')
self.set_header('Content-Type', 'application/json') |
login handler
def post(self, view_name):
"""
login handler
"""
sess_id = None
input_data = {}
# try:
self._handle_headers()
# handle input
input_data = json_decode(self.request.body) if self.request.body else {}
input_data['path'] = view_name
# set or get session cookie
if not self.get_cookie(COOKIE_NAME) or 'username' in input_data:
sess_id = uuid4().hex
self.set_cookie(COOKIE_NAME, sess_id) # , domain='127.0.0.1'
else:
sess_id = self.get_cookie(COOKIE_NAME)
# h_sess_id = "HTTP_%s" % sess_id
input_data = {'data': input_data,
'_zops_remote_ip': self.request.remote_ip}
log.info("New Request for %s: %s" % (sess_id, input_data))
self.application.pc.register_websocket(sess_id, self)
self.application.pc.redirect_incoming_message(sess_id,
json_encode(input_data),
self.request) |
>>> load_formatter_fn('logagg.formatters.basescript') #doctest: +ELLIPSIS
<function basescript at 0x...>
def load_formatter_fn(formatter):
'''
>>> load_formatter_fn('logagg.formatters.basescript') #doctest: +ELLIPSIS
<function basescript at 0x...>
'''
obj = util.load_object(formatter)
if not hasattr(obj, 'ispartial'):
obj.ispartial = util.ispartial
return obj |
Removes duplicate data from 'data' inside log dict and brings it
out.
>>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30)
>>> log = {'id' : 46846876, 'type' : 'log',
... 'data' : {'a' : 1, 'b' : 2, 'type' : 'metric'}}
>>> lc._remove_redundancy(log)
{'data': {'a': 1, 'b': 2}, 'type': 'metric', 'id': 46846876}
def _remove_redundancy(self, log):
"""Removes duplicate data from 'data' inside log dict and brings it
out.
>>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30)
>>> log = {'id' : 46846876, 'type' : 'log',
... 'data' : {'a' : 1, 'b' : 2, 'type' : 'metric'}}
>>> lc._remove_redundancy(log)
{'data': {'a': 1, 'b': 2}, 'type': 'metric', 'id': 46846876}
"""
for key in log:
if key in log and key in log['data']:
log[key] = log['data'].pop(key)
return log |
>>> lc = LogCollector('file=/path/to/file.log:formatter=logagg.formatters.basescript', 30)
>>> incomplete_log = {'data' : {'x' : 1, 'y' : 2},
... 'raw' : 'Not all keys present'}
>>> lc.validate_log_format(incomplete_log)
'failed'
>>> redundant_log = {'one_invalid_key' : 'Extra information',
... 'data': {'x' : 1, 'y' : 2},
... 'error': False,
... 'error_tb': '',
... 'event': 'event',
... 'file': '/path/to/file.log',
... 'formatter': 'logagg.formatters.mongodb',
... 'host': 'deepcompute-ThinkPad-E470',
... 'id': '0112358',
... 'level': 'debug',
... 'raw': 'some log line here',
... 'timestamp': '2018-04-07T14:06:17.404818',
... 'type': 'log'}
>>> lc.validate_log_format(redundant_log)
'failed'
>>> correct_log = {'data': {'x' : 1, 'y' : 2},
... 'error': False,
... 'error_tb': '',
... 'event': 'event',
... 'file': '/path/to/file.log',
... 'formatter': 'logagg.formatters.mongodb',
... 'host': 'deepcompute-ThinkPad-E470',
... 'id': '0112358',
... 'level': 'debug',
... 'raw': 'some log line here',
... 'timestamp': '2018-04-07T14:06:17.404818',
... 'type': 'log'}
>>> lc.validate_log_format(correct_log)
'passed'
def validate_log_format(self, log):
'''
>>> lc = LogCollector('file=/path/to/file.log:formatter=logagg.formatters.basescript', 30)
>>> incomplete_log = {'data' : {'x' : 1, 'y' : 2},
... 'raw' : 'Not all keys present'}
>>> lc.validate_log_format(incomplete_log)
'failed'
>>> redundant_log = {'one_invalid_key' : 'Extra information',
... 'data': {'x' : 1, 'y' : 2},
... 'error': False,
... 'error_tb': '',
... 'event': 'event',
... 'file': '/path/to/file.log',
... 'formatter': 'logagg.formatters.mongodb',
... 'host': 'deepcompute-ThinkPad-E470',
... 'id': '0112358',
... 'level': 'debug',
... 'raw': 'some log line here',
... 'timestamp': '2018-04-07T14:06:17.404818',
... 'type': 'log'}
>>> lc.validate_log_format(redundant_log)
'failed'
>>> correct_log = {'data': {'x' : 1, 'y' : 2},
... 'error': False,
... 'error_tb': '',
... 'event': 'event',
... 'file': '/path/to/file.log',
... 'formatter': 'logagg.formatters.mongodb',
... 'host': 'deepcompute-ThinkPad-E470',
... 'id': '0112358',
... 'level': 'debug',
... 'raw': 'some log line here',
... 'timestamp': '2018-04-07T14:06:17.404818',
... 'type': 'log'}
>>> lc.validate_log_format(correct_log)
'passed'
'''
keys_in_log = set(log)
keys_in_log_structure = set(self.LOG_STRUCTURE)
try:
assert (keys_in_log == keys_in_log_structure)
except AssertionError as e:
self.log.warning('formatted_log_structure_rejected' ,
key_not_found = list(keys_in_log_structure-keys_in_log),
extra_keys_found = list(keys_in_log-keys_in_log_structure),
num_logs=1,
type='metric')
return 'failed'
for key in log:
try:
assert isinstance(log[key], self.LOG_STRUCTURE[key])
except AssertionError as e:
self.log.warning('formatted_log_structure_rejected' ,
key_datatype_not_matched = key,
datatype_expected = type(self.LOG_STRUCTURE[key]),
datatype_got = type(log[key]),
num_logs=1,
type='metric')
return 'failed'
return 'passed' |
>>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30)
>>> from pprint import pprint
>>> formatter = 'logagg.formatters.mongodb'
>>> fpath = '/var/log/mongodb/mongodb.log'
>>> line = 'some log line here'
>>> default_log = lc.assign_default_log_values(fpath, line, formatter)
>>> pprint(default_log) #doctest: +ELLIPSIS
{'data': {},
'error': False,
'error_tb': '',
'event': 'event',
'file': '/var/log/mongodb/mongodb.log',
'formatter': 'logagg.formatters.mongodb',
'host': '...',
'id': None,
'level': 'debug',
'raw': 'some log line here',
'timestamp': '...',
'type': 'log'}
def assign_default_log_values(self, fpath, line, formatter):
'''
>>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30)
>>> from pprint import pprint
>>> formatter = 'logagg.formatters.mongodb'
>>> fpath = '/var/log/mongodb/mongodb.log'
>>> line = 'some log line here'
>>> default_log = lc.assign_default_log_values(fpath, line, formatter)
>>> pprint(default_log) #doctest: +ELLIPSIS
{'data': {},
'error': False,
'error_tb': '',
'event': 'event',
'file': '/var/log/mongodb/mongodb.log',
'formatter': 'logagg.formatters.mongodb',
'host': '...',
'id': None,
'level': 'debug',
'raw': 'some log line here',
'timestamp': '...',
'type': 'log'}
'''
return dict(
id=None,
file=fpath,
host=self.HOST,
formatter=formatter,
event='event',
data={},
raw=line,
timestamp=datetime.datetime.utcnow().isoformat(),
type='log',
level='debug',
error= False,
error_tb='',
) |
For a list of given fpatterns, this starts a thread
collecting log lines from file
>>> os.path.isfile = lambda path: path == '/path/to/log_file.log'
>>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30)
>>> print(lc.fpaths)
file=/path/to/log_file.log:formatter=logagg.formatters.basescript
>>> print('formatters loaded:', lc.formatters)
{}
>>> print('log file reader threads started:', lc.log_reader_threads)
{}
>>> state = AttrDict(files_tracked=list())
>>> print('files bieng tracked:', state.files_tracked)
[]
>>> if not state.files_tracked:
>>> lc._scan_fpatterns(state)
>>> print('formatters loaded:', lc.formatters)
>>> print('log file reader threads started:', lc.log_reader_threads)
>>> print('files bieng tracked:', state.files_tracked)
def _scan_fpatterns(self, state):
'''
For a list of given fpatterns, this starts a thread
collecting log lines from file
>>> os.path.isfile = lambda path: path == '/path/to/log_file.log'
>>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30)
>>> print(lc.fpaths)
file=/path/to/log_file.log:formatter=logagg.formatters.basescript
>>> print('formatters loaded:', lc.formatters)
{}
>>> print('log file reader threads started:', lc.log_reader_threads)
{}
>>> state = AttrDict(files_tracked=list())
>>> print('files bieng tracked:', state.files_tracked)
[]
>>> if not state.files_tracked:
>>> lc._scan_fpatterns(state)
>>> print('formatters loaded:', lc.formatters)
>>> print('log file reader threads started:', lc.log_reader_threads)
>>> print('files bieng tracked:', state.files_tracked)
'''
for f in self.fpaths:
fpattern, formatter =(a.split('=')[1] for a in f.split(':', 1))
self.log.debug('scan_fpatterns', fpattern=fpattern, formatter=formatter)
# TODO code for scanning fpatterns for the files not yet present goes here
fpaths = glob.glob(fpattern)
# Load formatter_fn if not in list
fpaths = list(set(fpaths) - set(state.files_tracked))
for fpath in fpaths:
try:
formatter_fn = self.formatters.get(formatter,
load_formatter_fn(formatter))
self.log.info('found_formatter_fn', fn=formatter)
self.formatters[formatter] = formatter_fn
except (SystemExit, KeyboardInterrupt): raise
except (ImportError, AttributeError):
self.log.exception('formatter_fn_not_found', fn=formatter)
sys.exit(-1)
# Start a thread for every file
self.log.info('found_log_file', log_file=fpath)
log_f = dict(fpath=fpath, fpattern=fpattern,
formatter=formatter, formatter_fn=formatter_fn)
log_key = (fpath, fpattern, formatter)
if log_key not in self.log_reader_threads:
self.log.info('starting_collect_log_lines_thread', log_key=log_key)
# There is no existing thread tracking this log file. Start one
log_reader_thread = util.start_daemon_thread(self.collect_log_lines, (log_f,))
self.log_reader_threads[log_key] = log_reader_thread
state.files_tracked.append(fpath)
time.sleep(self.SCAN_FPATTERNS_INTERVAL) |
Prepare links of form by mimicing pyoko's get_links method's result
Args:
**kw:
Returns: list of link dicts
def get_links(self, **kw):
"""
Prepare links of form by mimicing pyoko's get_links method's result
Args:
**kw:
Returns: list of link dicts
"""
links = [a for a in dir(self) if isinstance(getattr(self, a), Model)
and not a.startswith('_model')]
return [
{
'field': l,
'mdl': getattr(self, l).__class__,
} for l in links
] |
Fills form with data
Args:
data (dict): Data to assign form fields.
Returns:
Self. Form object.
def set_data(self, data):
"""
Fills form with data
Args:
data (dict): Data to assign form fields.
Returns:
Self. Form object.
"""
for name in self._fields:
setattr(self, name, data.get(name))
return self |
Converts the form/model into JSON ready dicts/lists compatible
with `Ulakbus-UI API`_.
Example:
.. code-block:: json
{
"forms": {
"constraints": {},
"model": {
"code": null,
"name": null,
"save_edit": null,
},
"grouping": {},
"form": [
{
"helpvalue": null,
"type": "help"
},
"name",
"code",
"save_edit"
],
"schema": {
"required": [
"name",
"code",
"save_edit"
],
"type": "object",
"properties": {
"code": {
"type": "string",
"title": "Code Name"
},
"name": {
"type": "string",
"title": "Name"
},
"save_edit": {
"cmd": "save::add_edit_form",
"type": "button",
"title": "Save"
}
},
"title": "Add Permission"
}
}
}
def serialize(self):
"""
Converts the form/model into JSON ready dicts/lists compatible
with `Ulakbus-UI API`_.
Example:
.. code-block:: json
{
"forms": {
"constraints": {},
"model": {
"code": null,
"name": null,
"save_edit": null,
},
"grouping": {},
"form": [
{
"helpvalue": null,
"type": "help"
},
"name",
"code",
"save_edit"
],
"schema": {
"required": [
"name",
"code",
"save_edit"
],
"type": "object",
"properties": {
"code": {
"type": "string",
"title": "Code Name"
},
"name": {
"type": "string",
"title": "Name"
},
"save_edit": {
"cmd": "save::add_edit_form",
"type": "button",
"title": "Save"
}
},
"title": "Add Permission"
}
}
}
"""
result = {
"schema": {
"title": self.title,
"type": "object",
"properties": {},
"required": []
},
"form": [
{
"type": "help",
"helpvalue": self.help_text
}
],
"model": {}
}
for itm in self.META_TO_FORM_ROOT:
if itm in self.Meta.__dict__:
result[itm] = self.Meta.__dict__[itm]
if self._model.is_in_db():
result["model"]['object_key'] = self._model.key
result["model"]['model_type'] = self._model.__class__.__name__
result["model"]['unicode'] = six.text_type(self._model)
# if form intentionally marked as fillable from task data by assigning False to always_blank
# field in Meta class, form_data is retrieved from task_data if exist in else None
form_data = None
if not self.Meta.always_blank:
form_data = self.context.task_data.get(self.__class__.__name__, None)
for itm in self._serialize():
item_props = {'type': itm['type'], 'title': itm['title']}
if not itm.get('value') and 'kwargs' in itm and 'value' in itm['kwargs']:
itm['value'] = itm['kwargs'].pop('value')
if 'kwargs' in itm and 'widget' in itm['kwargs']:
item_props['widget'] = itm['kwargs'].pop('widget')
if form_data:
if form_data[itm['name']] and (itm['type'] == 'date' or itm['type'] == 'datetime'):
value_to_serialize = datetime.strptime(
form_data[itm['name']], itm['format'])
else:
value_to_serialize = form_data[itm['name']]
value = self._serialize_value(value_to_serialize)
if itm['type'] == 'button':
value = None
# if form_data is empty, value will be None, so it is needed to fill the form from model
# or leave empty
else:
# if itm['value'] is not None returns itm['value']
# else itm['default']
if itm['value'] is not None:
value = itm['value']
else:
value = itm['default']
result["model"][itm['name']] = value
if itm['type'] == 'model':
item_props['model_name'] = itm['model_name']
if itm['type'] not in ['ListNode', 'model', 'Node']:
if 'hidden' in itm['kwargs']:
# we're simulating HTML's hidden form fields
# by just setting it in "model" dict and bypassing other parts
continue
else:
item_props.update(itm['kwargs'])
if itm.get('choices'):
self._handle_choices(itm, item_props, result)
else:
result["form"].append(itm['name'])
if 'help_text' in itm:
item_props['help_text'] = itm['help_text']
if 'schema' in itm:
item_props['schema'] = itm['schema']
# this adds default directives for building
# add and list views of linked models
if item_props['type'] == 'model':
# this control for passing test.
# object gets context but do not use it. why is it for?
if self.context:
if self.context.has_permission("%s.select_list" % item_props['model_name']):
item_props.update({
'list_cmd': 'select_list',
'wf': 'crud',
})
if self.context.has_permission("%s.add_edit_form" % item_props['model_name']):
item_props.update({
'add_cmd': 'add_edit_form',
'wf': 'crud',
})
else:
item_props.update({
'list_cmd': 'select_list',
'add_cmd': 'add_edit_form',
'wf': 'crud'
})
result["schema"]["properties"][itm['name']] = item_props
if itm['required']:
result["schema"]["required"].append(itm['name'])
self._cache_form_details(result)
return result |
Caches some form details to lates process and validate incoming (response) form data
Args:
form: form dict
def _cache_form_details(self, form):
"""
Caches some form details to lates process and validate incoming (response) form data
Args:
form: form dict
"""
cache = FormCache()
form['model']['form_key'] = cache.form_id
form['model']['form_name'] = self.__class__.__name__
cache.set(
{
'model': list(form['model'].keys()), # In Python 3, dictionary keys are not serializable
'non_data_fields': self.non_data_fields
}
) |
>>> mdbf = MongoDBForwarder('no_host', '27017', 'deadpool',
... 'chimichanga', 'logs', 'collection')
>>> log = [{u'data': {u'_': {u'file': u'log.py',
... u'fn': u'start',
... u'ln': 8,
... u'name': u'__main__'},
... u'a': 1,
... u'b': 2,
... u'msg': u'this is a dummy log'},
... u'error': False,
... u'error_tb': u'',
... u'event': u'some_log',
... u'file': u'/var/log/sample.log',
... u'formatter': u'logagg.formatters.basescript',
... u'host': u'deepcompute',
... u'id': u'20180409T095924_aec36d313bdc11e89da654e1ad04f45e',
... u'level': u'info',
... u'raw': u'{...}',
... u'timestamp': u'2018-04-09T09:59:24.733945Z',
... u'type': u'metric'}]
>>> records = mdbf._parse_msg_for_mongodb(log)
>>> from pprint import pprint
>>> pprint(records)
[{'_id': u'20180409T095924_aec36d313bdc11e89da654e1ad04f45e',
u'data': {u'_': {u'file': u'log.py',
u'fn': u'start',
u'ln': 8,
u'name': u'__main__'},
u'a': 1,
u'b': 2,
u'msg': u'this is a dummy log'},
u'error': False,
u'error_tb': u'',
u'event': u'some_log',
u'file': u'/var/log/sample.log',
u'formatter': u'logagg.formatters.basescript',
u'host': u'deepcompute',
u'level': u'info',
u'raw': u'{...}',
u'timestamp': u'2018-04-09T09:59:24.733945Z',
u'type': u'metric'}]
def _parse_msg_for_mongodb(self, msgs):
'''
>>> mdbf = MongoDBForwarder('no_host', '27017', 'deadpool',
... 'chimichanga', 'logs', 'collection')
>>> log = [{u'data': {u'_': {u'file': u'log.py',
... u'fn': u'start',
... u'ln': 8,
... u'name': u'__main__'},
... u'a': 1,
... u'b': 2,
... u'msg': u'this is a dummy log'},
... u'error': False,
... u'error_tb': u'',
... u'event': u'some_log',
... u'file': u'/var/log/sample.log',
... u'formatter': u'logagg.formatters.basescript',
... u'host': u'deepcompute',
... u'id': u'20180409T095924_aec36d313bdc11e89da654e1ad04f45e',
... u'level': u'info',
... u'raw': u'{...}',
... u'timestamp': u'2018-04-09T09:59:24.733945Z',
... u'type': u'metric'}]
>>> records = mdbf._parse_msg_for_mongodb(log)
>>> from pprint import pprint
>>> pprint(records)
[{'_id': u'20180409T095924_aec36d313bdc11e89da654e1ad04f45e',
u'data': {u'_': {u'file': u'log.py',
u'fn': u'start',
u'ln': 8,
u'name': u'__main__'},
u'a': 1,
u'b': 2,
u'msg': u'this is a dummy log'},
u'error': False,
u'error_tb': u'',
u'event': u'some_log',
u'file': u'/var/log/sample.log',
u'formatter': u'logagg.formatters.basescript',
u'host': u'deepcompute',
u'level': u'info',
u'raw': u'{...}',
u'timestamp': u'2018-04-09T09:59:24.733945Z',
u'type': u'metric'}]
'''
msgs_list = []
for msg in msgs:
try:
msg['_id'] = msg.pop('id')
except KeyError:
self.log.exception('collector_failure_id_not_found', log=msg)
msgs_list.append(msg)
return msgs_list |
>>> idbf = InfluxDBForwarder('no_host', '8086', 'deadpool',
... 'chimichanga', 'logs', 'collection')
>>> log = {u'data': {u'_': {u'file': u'log.py',
... u'fn': u'start',
... u'ln': 8,
... u'name': u'__main__'},
... u'a': 1,
... u'b': 2,
... u'__ignore_this': 'some_string',
... u'msg': u'this is a dummy log'},
... u'error': False,
... u'error_tb': u'',
... u'event': u'some_log',
... u'file': u'/var/log/sample.log',
... u'formatter': u'logagg.formatters.basescript',
... u'host': u'deepcompute',
... u'id': u'20180409T095924_aec36d313bdc11e89da654e1ad04f45e',
... u'level': u'info',
... u'raw': u'{...}',
... u'timestamp': u'2018-04-09T09:59:24.733945Z',
... u'type': u'metric'}
>>> tags, fields = idbf._tag_and_field_maker(log)
>>> from pprint import pprint
>>> pprint(tags)
{u'data.msg': u'this is a dummy log',
u'error_tb': u'',
u'file': u'/var/log/sample.log',
u'formatter': u'logagg.formatters.basescript',
u'host': u'deepcompute',
u'level': u'info'}
>>> pprint(fields)
{u'data._': "{u'ln': 8, u'fn': u'start', u'file': u'log.py', u'name': u'__main__'}",
u'data.a': 1,
u'data.b': 2}
def _tag_and_field_maker(self, event):
'''
>>> idbf = InfluxDBForwarder('no_host', '8086', 'deadpool',
... 'chimichanga', 'logs', 'collection')
>>> log = {u'data': {u'_': {u'file': u'log.py',
... u'fn': u'start',
... u'ln': 8,
... u'name': u'__main__'},
... u'a': 1,
... u'b': 2,
... u'__ignore_this': 'some_string',
... u'msg': u'this is a dummy log'},
... u'error': False,
... u'error_tb': u'',
... u'event': u'some_log',
... u'file': u'/var/log/sample.log',
... u'formatter': u'logagg.formatters.basescript',
... u'host': u'deepcompute',
... u'id': u'20180409T095924_aec36d313bdc11e89da654e1ad04f45e',
... u'level': u'info',
... u'raw': u'{...}',
... u'timestamp': u'2018-04-09T09:59:24.733945Z',
... u'type': u'metric'}
>>> tags, fields = idbf._tag_and_field_maker(log)
>>> from pprint import pprint
>>> pprint(tags)
{u'data.msg': u'this is a dummy log',
u'error_tb': u'',
u'file': u'/var/log/sample.log',
u'formatter': u'logagg.formatters.basescript',
u'host': u'deepcompute',
u'level': u'info'}
>>> pprint(fields)
{u'data._': "{u'ln': 8, u'fn': u'start', u'file': u'log.py', u'name': u'__main__'}",
u'data.a': 1,
u'data.b': 2}
'''
data = event.pop('data')
data = flatten_dict({'data': data})
t = dict((k, event[k]) for k in event if k not in self.EXCLUDE_TAGS)
f = dict()
for k in data:
v = data[k]
if is_number(v) or isinstance(v, MarkValue):
f[k] = v
else:
#if v.startswith('_'): f[k] = eval(v.split('_', 1)[1])
t[k] = v
return t, f |
>>> from logagg.forwarders import InfluxDBForwarder
>>> idbf = InfluxDBForwarder('no_host', '8086', 'deadpool',
... 'chimichanga', 'logs', 'collection')
>>> valid_log = [{u'data': {u'_force_this_as_field': 'CXNS CNS nbkbsd',
... u'a': 1,
... u'b': 2,
... u'msg': u'this is a dummy log'},
... u'error': False,
... u'error_tb': u'',
... u'event': u'some_log',
... u'file': u'/var/log/sample.log',
... u'formatter': u'logagg.formatters.basescript',
... u'host': u'deepcompute',
... u'id': u'20180409T095924_aec36d313bdc11e89da654e1ad04f45e',
... u'level': u'info',
... u'raw': u'{...}',
... u'timestamp': u'2018-04-09T09:59:24.733945Z',
... u'type': u'metric'}]
>>> pointvalues = idbf._parse_msg_for_influxdb(valid_log)
>>> from pprint import pprint
>>> pprint(pointvalues)
[{'fields': {u'data._force_this_as_field': "'CXNS CNS nbkbsd'",
u'data.a': 1,
u'data.b': 2},
'measurement': u'some_log',
'tags': {u'data.msg': u'this is a dummy log',
u'error_tb': u'',
u'file': u'/var/log/sample.log',
u'formatter': u'logagg.formatters.basescript',
u'host': u'deepcompute',
u'level': u'info'},
'time': u'2018-04-09T09:59:24.733945Z'}]
>>> invalid_log = valid_log
>>> invalid_log[0]['error'] = True
>>> pointvalues = idbf._parse_msg_for_influxdb(invalid_log)
>>> pprint(pointvalues)
[]
>>> invalid_log = valid_log
>>> invalid_log[0]['type'] = 'log'
>>> pointvalues = idbf._parse_msg_for_influxdb(invalid_log)
>>> pprint(pointvalues)
[]
def _parse_msg_for_influxdb(self, msgs):
'''
>>> from logagg.forwarders import InfluxDBForwarder
>>> idbf = InfluxDBForwarder('no_host', '8086', 'deadpool',
... 'chimichanga', 'logs', 'collection')
>>> valid_log = [{u'data': {u'_force_this_as_field': 'CXNS CNS nbkbsd',
... u'a': 1,
... u'b': 2,
... u'msg': u'this is a dummy log'},
... u'error': False,
... u'error_tb': u'',
... u'event': u'some_log',
... u'file': u'/var/log/sample.log',
... u'formatter': u'logagg.formatters.basescript',
... u'host': u'deepcompute',
... u'id': u'20180409T095924_aec36d313bdc11e89da654e1ad04f45e',
... u'level': u'info',
... u'raw': u'{...}',
... u'timestamp': u'2018-04-09T09:59:24.733945Z',
... u'type': u'metric'}]
>>> pointvalues = idbf._parse_msg_for_influxdb(valid_log)
>>> from pprint import pprint
>>> pprint(pointvalues)
[{'fields': {u'data._force_this_as_field': "'CXNS CNS nbkbsd'",
u'data.a': 1,
u'data.b': 2},
'measurement': u'some_log',
'tags': {u'data.msg': u'this is a dummy log',
u'error_tb': u'',
u'file': u'/var/log/sample.log',
u'formatter': u'logagg.formatters.basescript',
u'host': u'deepcompute',
u'level': u'info'},
'time': u'2018-04-09T09:59:24.733945Z'}]
>>> invalid_log = valid_log
>>> invalid_log[0]['error'] = True
>>> pointvalues = idbf._parse_msg_for_influxdb(invalid_log)
>>> pprint(pointvalues)
[]
>>> invalid_log = valid_log
>>> invalid_log[0]['type'] = 'log'
>>> pointvalues = idbf._parse_msg_for_influxdb(invalid_log)
>>> pprint(pointvalues)
[]
'''
series = []
for msg in msgs:
if msg.get('error'):
continue
if msg.get('type').lower() == 'metric':
time = msg.get('timestamp')
measurement = msg.get('event')
tags, fields = self._tag_and_field_maker(msg)
pointvalues = {
"time": time,
"measurement": measurement,
"fields": fields,
"tags": tags}
series.append(pointvalues)
return series |
Get input and process accordingly.
Data can be:
- a uncompressed, bgzip, bzip2 or gzip compressed fastq file
- a uncompressed, bgzip, bzip2 or gzip compressed fasta file
- a rich fastq containing additional key=value information in the description,
as produced by MinKNOW and albacore with the same compression options as above
- a sorted bam file
- a sorted cram file
- a (compressed) sequencing_summary.txt file generated by albacore
Handle is passed to the proper functions to get DataFrame with metrics
Multiple files of the same type can be used to extract info from, which is done in parallel
Arguments:
- source: defines the input data type and the function that needs to be called
- files: is a list of one or more files to operate on, from the type of <source>
- threads: is the amount of workers which can be used
- readtype: (only relevant for summary input) and specifies which columns have to be extracted
- combine: is either 'simple' or 'track', with the difference that with 'track' an additional
field is created with the name of the dataset
- names: if combine="track", the names to be used for the datasets. Needs to have same length as
files, or None
def get_input(source, files, threads=4, readtype="1D",
combine="simple", names=None, barcoded=False):
"""Get input and process accordingly.
Data can be:
- a uncompressed, bgzip, bzip2 or gzip compressed fastq file
- a uncompressed, bgzip, bzip2 or gzip compressed fasta file
- a rich fastq containing additional key=value information in the description,
as produced by MinKNOW and albacore with the same compression options as above
- a sorted bam file
- a sorted cram file
- a (compressed) sequencing_summary.txt file generated by albacore
Handle is passed to the proper functions to get DataFrame with metrics
Multiple files of the same type can be used to extract info from, which is done in parallel
Arguments:
- source: defines the input data type and the function that needs to be called
- files: is a list of one or more files to operate on, from the type of <source>
- threads: is the amount of workers which can be used
- readtype: (only relevant for summary input) and specifies which columns have to be extracted
- combine: is either 'simple' or 'track', with the difference that with 'track' an additional
field is created with the name of the dataset
- names: if combine="track", the names to be used for the datasets. Needs to have same length as
files, or None
"""
proc_functions = {
'fastq': ex.process_fastq_plain,
'fasta': ex.process_fasta,
'bam': ex.process_bam,
'summary': ex.process_summary,
'fastq_rich': ex.process_fastq_rich,
'fastq_minimal': ex.process_fastq_minimal,
'cram': ex.process_cram,
'ubam': ex.process_ubam, }
filethreads = min(len(files), threads)
threadsleft = threads - filethreads
with cfutures.ProcessPoolExecutor(max_workers=filethreads) as executor:
extration_function = partial(proc_functions[source],
threads=threadsleft,
readtype=readtype,
barcoded=barcoded)
datadf = combine_dfs(
dfs=[out for out in executor.map(extration_function, files)],
names=names or files,
method=combine)
if "readIDs" in datadf and pd.isna(datadf["readIDs"]).any():
datadf.drop("readIDs", axis='columns', inplace=True)
datadf = calculate_start_time(datadf)
logging.info("Nanoget: Gathered all metrics of {} reads".format(len(datadf)))
if len(datadf) == 0:
logging.critical("Nanoget: no reads retrieved.".format(len(datadf)))
sys.exit("Fatal: No reads found in input.")
else:
return datadf |
Combine dataframes.
Combination is either done simple by just concatenating the DataFrames
or performs tracking by adding the name of the dataset as a column.
def combine_dfs(dfs, names, method):
"""Combine dataframes.
Combination is either done simple by just concatenating the DataFrames
or performs tracking by adding the name of the dataset as a column."""
if method == "track":
res = list()
for df, identifier in zip(dfs, names):
df["dataset"] = identifier
res.append(df)
return pd.concat(res, ignore_index=True)
elif method == "simple":
return pd.concat(dfs, ignore_index=True) |
Calculate the star_time per read.
Time data is either
a "time" (in seconds, derived from summary files) or
a "timestamp" (in UTC, derived from fastq_rich format)
and has to be converted appropriately in a datetime format time_arr
For both the time_zero is the minimal value of the time_arr,
which is then used to subtract from all other times
In the case of method=track (and dataset is a column in the df) then this
subtraction is done per dataset
def calculate_start_time(df):
"""Calculate the star_time per read.
Time data is either
a "time" (in seconds, derived from summary files) or
a "timestamp" (in UTC, derived from fastq_rich format)
and has to be converted appropriately in a datetime format time_arr
For both the time_zero is the minimal value of the time_arr,
which is then used to subtract from all other times
In the case of method=track (and dataset is a column in the df) then this
subtraction is done per dataset
"""
if "time" in df:
df["time_arr"] = pd.Series(df["time"], dtype='datetime64[s]')
elif "timestamp" in df:
df["time_arr"] = pd.Series(df["timestamp"], dtype="datetime64[ns]")
else:
return df
if "dataset" in df:
for dset in df["dataset"].unique():
time_zero = df.loc[df["dataset"] == dset, "time_arr"].min()
df.loc[df["dataset"] == dset, "start_time"] = \
df.loc[df["dataset"] == dset, "time_arr"] - time_zero
else:
df["start_time"] = df["time_arr"] - df["time_arr"].min()
return df.drop(["time", "timestamp", "time_arr"], axis=1, errors="ignore") |
Construct YamlParser from a file pointer.
def parser_from_buffer(cls, fp):
"""Construct YamlParser from a file pointer."""
yaml = YAML(typ="safe")
return cls(yaml.load(fp)) |
Check that we don't have unknown keys in a dictionary.
It does not raise an error if we have less keys than expected.
def check_dict_expected_keys(self, expected_keys, current, dict_name):
""" Check that we don't have unknown keys in a dictionary.
It does not raise an error if we have less keys than expected.
"""
if not isinstance(current, dict):
raise ParseError(u"'{}' key must be a dict".format(dict_name),
YAML_EXAMPLE)
expected_keys = set(expected_keys)
current_keys = {key for key in current}
extra_keys = current_keys - expected_keys
if extra_keys:
message = u"{}: the keys {} are unexpected. (allowed keys: {})"
raise ParseError(
message.format(
dict_name,
list(extra_keys),
list(expected_keys),
),
YAML_EXAMPLE,
) |
Check input and return a :class:`Migration` instance.
def parse(self):
"""Check input and return a :class:`Migration` instance."""
if not self.parsed.get('migration'):
raise ParseError(u"'migration' key is missing", YAML_EXAMPLE)
self.check_dict_expected_keys(
{'options', 'versions'}, self.parsed['migration'], 'migration',
)
return self._parse_migrations() |
Build a :class:`Migration` instance.
def _parse_migrations(self):
"""Build a :class:`Migration` instance."""
migration = self.parsed['migration']
options = self._parse_options(migration)
versions = self._parse_versions(migration, options)
return Migration(versions, options) |
Build :class:`MigrationOption` and
:class:`MigrationBackupOption` instances.
def _parse_options(self, migration):
"""Build :class:`MigrationOption` and
:class:`MigrationBackupOption` instances."""
options = migration.get('options', {})
install_command = options.get('install_command')
backup = options.get('backup')
if backup:
self.check_dict_expected_keys(
{'command', 'ignore_if', 'stop_on_failure'},
options['backup'], 'backup',
)
backup = MigrationBackupOption(
command=backup.get('command'),
ignore_if=backup.get('ignore_if'),
stop_on_failure=backup.get('stop_on_failure', True),
)
return MigrationOption(
install_command=install_command,
backup=backup,
) |
Sets user notification message.
Args:
title: Msg. title
msg: Msg. text
typ: Msg. type
url: Additional URL (if exists)
Returns:
Message ID.
def set_message(self, title, msg, typ, url=None):
"""
Sets user notification message.
Args:
title: Msg. title
msg: Msg. text
typ: Msg. type
url: Additional URL (if exists)
Returns:
Message ID.
"""
return self.user.send_notification(title=title,
message=msg,
typ=typ,
url=url) |
A property that indicates if current user is logged in or not.
Returns:
Boolean.
def is_auth(self):
"""
A property that indicates if current user is logged in or not.
Returns:
Boolean.
"""
if self.user_id is None:
self.user_id = self.session.get('user_id')
return bool(self.user_id) |
Checks if current user (or role) has the given permission.
Args:
perm: Permmission code or object.
Depends on the :attr:`~zengine.auth.auth_backend.AuthBackend` implementation.
Returns:
Boolean.
def has_permission(self, perm):
"""
Checks if current user (or role) has the given permission.
Args:
perm: Permmission code or object.
Depends on the :attr:`~zengine.auth.auth_backend.AuthBackend` implementation.
Returns:
Boolean.
"""
return self.user.superuser or self.auth.has_permission(perm) |
Create a message box
:param str msg:
:param str title:
:param str typ: 'info', 'error', 'warning'
def msg_box(self, msg, title=None, typ='info'):
"""
Create a message box
:param str msg:
:param str title:
:param str typ: 'info', 'error', 'warning'
"""
self.output['msgbox'] = {'type': typ, "title": title or msg[:20], "msg": msg} |
Tell current user that s/he finished it's job for now.
We'll notify if workflow arrives again to his/her WF Lane.
def sendoff_current_user(self):
"""
Tell current user that s/he finished it's job for now.
We'll notify if workflow arrives again to his/her WF Lane.
"""
msgs = self.task_data.get('LANE_CHANGE_MSG', DEFAULT_LANE_CHANGE_MSG)
self.msg_box(title=msgs['title'], msg=msgs['body']) |
Invites the next lane's (possible) owner(s) to participate
def invite_other_parties(self, possible_owners):
"""
Invites the next lane's (possible) owner(s) to participate
"""
signals.lane_user_change.send(sender=self.user,
current=self,
old_lane=self.old_lane,
possible_owners=possible_owners
) |
Assigns current task step to self.task
then updates the task's data with self.task_data
Args:
task: Task object.
def _update_task(self, task):
"""
Assigns current task step to self.task
then updates the task's data with self.task_data
Args:
task: Task object.
"""
self.task = task
self.task.data.update(self.task_data)
self.task_type = task.task_spec.__class__.__name__
self.spec = task.task_spec
self.task_name = task.get_name()
self.activity = getattr(self.spec, 'service_class', '')
self._set_lane_data() |
This is method automatically called on each request and
updates "object_id", "cmd" and "flow" client variables
from current.input.
"flow" and "object_id" variables will always exists in the
task_data so app developers can safely check for their
values in workflows.
Their values will be reset to None if they not exists
in the current input data set.
On the other side, if there isn't a "cmd" in the current.input
cmd will be removed from task_data.
def set_client_cmds(self):
"""
This is method automatically called on each request and
updates "object_id", "cmd" and "flow" client variables
from current.input.
"flow" and "object_id" variables will always exists in the
task_data so app developers can safely check for their
values in workflows.
Their values will be reset to None if they not exists
in the current input data set.
On the other side, if there isn't a "cmd" in the current.input
cmd will be removed from task_data.
"""
self.task_data['cmd'] = self.input.get('cmd')
self.task_data['flow'] = self.input.get('flow')
filters = self.input.get('filters', {})
try:
if isinstance(filters, dict):
# this is the new form, others will be removed when ui be ready
self.task_data['object_id'] = filters.get('object_id')['values'][0]
elif filters[0]['field'] == 'object_id':
self.task_data['object_id'] = filters[0]['values'][0]
except:
if 'object_id' in self.input:
self.task_data['object_id'] = self.input.get('object_id') |
Returns valid and legal move given position
:type: position: Board
:rtype: Move
def generate_move(self, position):
"""
Returns valid and legal move given position
:type: position: Board
:rtype: Move
"""
while True:
print(position)
raw = input(str(self.color) + "\'s move \n")
move = converter.short_alg(raw, self.color, position)
if move is None:
continue
return move |
Finds if playing my move would make both kings meet.
:type: pos: Board
:type: move: Move
:rtype: bool
def in_check_as_result(self, pos, move):
"""
Finds if playing my move would make both kings meet.
:type: pos: Board
:type: move: Move
:rtype: bool
"""
test = cp(pos)
test.update(move)
test_king = test.get_king(move.color)
return self.loc_adjacent_to_opponent_king(test_king.location, test) |
Finds if 2 kings are touching given the position of one of the kings.
:type: location: Location
:type: position: Board
:rtype: bool
def loc_adjacent_to_opponent_king(self, location, position):
"""
Finds if 2 kings are touching given the position of one of the kings.
:type: location: Location
:type: position: Board
:rtype: bool
"""
for fn in self.cardinal_directions:
try:
if isinstance(position.piece_at_square(fn(location)), King) and \
position.piece_at_square(fn(location)).color != self.color:
return True
except IndexError:
pass
return False |
Adds all 8 cardinal directions as moves for the King if legal.
:type: function: function
:type: position: Board
:rtype: gen
def add(self, func, position):
"""
Adds all 8 cardinal directions as moves for the King if legal.
:type: function: function
:type: position: Board
:rtype: gen
"""
try:
if self.loc_adjacent_to_opponent_king(func(self.location), position):
return
except IndexError:
return
if position.is_square_empty(func(self.location)):
yield self.create_move(func(self.location), notation_const.MOVEMENT)
elif position.piece_at_square(func(self.location)).color != self.color:
yield self.create_move(func(self.location), notation_const.CAPTURE) |
Decides if given rook exists, is of this color, and has not moved so it
is eligible to castle.
:type: rook: Rook
:rtype: bool
def _rook_legal_for_castle(self, rook):
"""
Decides if given rook exists, is of this color, and has not moved so it
is eligible to castle.
:type: rook: Rook
:rtype: bool
"""
return rook is not None and \
type(rook) is Rook and \
rook.color == self.color and \
not rook.has_moved |
Checks if set of squares in between ``King`` and ``Rook`` are empty and safe
for the king to castle.
:type: position: Position
:type: direction: function
:type: times: int
:rtype: bool
def _empty_not_in_check(self, position, direction):
"""
Checks if set of squares in between ``King`` and ``Rook`` are empty and safe
for the king to castle.
:type: position: Position
:type: direction: function
:type: times: int
:rtype: bool
"""
def valid_square(square):
return position.is_square_empty(square) and \
not self.in_check(position, square)
return valid_square(direction(self.location, 1)) and \
valid_square(direction(self.location, 2)) |
Adds kingside and queenside castling moves if legal
:type: position: Board
def add_castle(self, position):
"""
Adds kingside and queenside castling moves if legal
:type: position: Board
"""
if self.has_moved or self.in_check(position):
return
if self.color == color.white:
rook_rank = 0
else:
rook_rank = 7
castle_type = {
notation_const.KING_SIDE_CASTLE: {
"rook_file": 7,
"direction": lambda king_square, times: king_square.shift_right(times)
},
notation_const.QUEEN_SIDE_CASTLE: {
"rook_file": 0,
"direction": lambda king_square, times: king_square.shift_left(times)
}
}
for castle_key in castle_type:
castle_dict = castle_type[castle_key]
castle_rook = position.piece_at_square(Location(rook_rank, castle_dict["rook_file"]))
if self._rook_legal_for_castle(castle_rook) and \
self._empty_not_in_check(position, castle_dict["direction"]):
yield self.create_move(castle_dict["direction"](self.location, 2), castle_key) |
Generates list of possible moves
:type: position: Board
:rtype: list
def possible_moves(self, position):
"""
Generates list of possible moves
:type: position: Board
:rtype: list
"""
# Chain used to combine multiple generators
for move in itertools.chain(*[self.add(fn, position) for fn in self.cardinal_directions]):
yield move
for move in self.add_castle(position):
yield move |
Finds if the king is in check or if both kings are touching.
:type: position: Board
:return: bool
def in_check(self, position, location=None):
"""
Finds if the king is in check or if both kings are touching.
:type: position: Board
:return: bool
"""
location = location or self.location
for piece in position:
if piece is not None and piece.color != self.color:
if not isinstance(piece, King):
for move in piece.possible_moves(position):
if move.end_loc == location:
return True
else:
if self.loc_adjacent_to_opponent_king(piece.location, position):
return True
return False |
Sets the keep-alive setting for the peer socket.
:param sock: Socket to be configured.
:param idle: Interval in seconds after which for an idle connection a keep-alive probes
is start being sent.
:param interval: Interval in seconds between probes.
:param fails: Maximum number of failed probes.
def set_keep_alive(sock, idle=10, interval=5, fails=5):
"""Sets the keep-alive setting for the peer socket.
:param sock: Socket to be configured.
:param idle: Interval in seconds after which for an idle connection a keep-alive probes
is start being sent.
:param interval: Interval in seconds between probes.
:param fails: Maximum number of failed probes.
"""
import sys
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
if sys.platform in ('linux', 'linux2'):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, idle)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, interval)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, fails)
elif sys.platform == 'darwin':
sock.setsockopt(socket.IPPROTO_TCP, 0x10, interval)
else:
# Do nothing precise for unsupported platforms.
pass |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.